language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
tensorflow__tensorflow
|
tensorflow/python/client/session.py
|
{
"start": 15800,
"end": 16770
}
|
class ____(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._fetch_type = type(fetches)
if isinstance(fetches, collections.defaultdict):
self._type_ctor = functools.partial(collections.defaultdict,
fetches.default_factory)
else:
self._type_ctor = self._fetch_type
self._keys = fetches.keys()
self._mappers = [
_FetchMapper.for_fetch(fetch) for fetch in fetches.values()
]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
def _generator():
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
yield k, m.build_results([values[j] for j in vi])
return self._type_ctor(_generator())
|
_DictFetchMapper
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/ruff/RUF012.py
|
{
"start": 2430,
"end": 2477
}
|
class ____(SQLModel):
id: int
name: str
|
L
|
python
|
getsentry__sentry
|
tests/snuba/api/endpoints/test_organization_trace_item_attributes.py
|
{
"start": 34631,
"end": 36721
}
|
class ____(
OrganizationTraceItemAttributeValuesEndpointBaseTest, OurLogTestCase
):
item_type = SupportedTraceItemType.LOGS
feature_flags = {"organizations:ourlogs-enabled": True}
def test_no_feature(self) -> None:
response = self.do_request(features={}, key="test.attribute")
assert response.status_code == 404, response.content
def test_invalid_item_type(self) -> None:
response = self.do_request(query={"itemType": "invalid"})
assert response.status_code == 400, response.content
assert response.data == {
"itemType": [
ErrorDetail(string='"invalid" is not a valid choice.', code="invalid_choice")
],
}
def test_no_projects(self) -> None:
response = self.do_request()
assert response.status_code == 200, response.content
assert response.data == []
def test_attribute_values(self) -> None:
logs = [
self.create_ourlog(
extra_data={"body": "log message 1"},
organization=self.organization,
project=self.project,
attributes={
"test1": {"string_value": "value1"},
"test2": {"string_value": "value2"},
},
),
self.create_ourlog(
extra_data={"body": "log message 2"},
organization=self.organization,
project=self.project,
attributes={
"test1": {"string_value": "value2"},
"test2": {"string_value": "value3"},
},
),
]
self.store_ourlogs(logs)
response = self.do_request(key="test1")
assert response.status_code == 200, response.content
assert len(response.data) == 2
values = {item["value"] for item in response.data}
assert "value1" in values
assert "value2" in values
assert all(item["key"] == "test1" for item in response.data)
|
OrganizationTraceItemAttributeValuesEndpointLogsTest
|
python
|
Textualize__textual
|
docs/examples/styles/border.py
|
{
"start": 64,
"end": 384
}
|
class ____(App):
CSS_PATH = "border.tcss"
def compose(self):
yield Label("My border is solid red", id="label1")
yield Label("My border is dashed green", id="label2")
yield Label("My border is tall blue", id="label3")
if __name__ == "__main__":
app = BorderApp()
app.run()
|
BorderApp
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_reductions.py
|
{
"start": 8881,
"end": 11822
}
|
class ____(Expr):
"""Tree-reduction component of `ApplyConcatApply`
This class is used within `ApplyConcatApply._lower`.
See Also
--------
ApplyConcatApply
"""
_parameters = [
"frame",
"kind",
"_meta",
"combine",
"aggregate",
"combine_kwargs",
"aggregate_kwargs",
"split_every",
]
_defaults = {"split_every": 8}
@functools.cached_property
def _name(self):
if funcname(self.combine) in ("combine", "aggregate"):
name = funcname(self.combine.__self__).lower() + "-tree"
else:
name = funcname(self.combine)
return f"{name}-{self.deterministic_token}"
def __dask_postcompute__(self):
return toolz.first, ()
@functools.cached_property
def split_every(self):
out = self.operand("split_every")
if out is None:
return 8
if out is False or isinstance(out, int) and out >= 2:
return out
raise ValueError("split_every must be greater than 1 or False")
def _layer(self):
# apply combine to batches of intermediate results
j = 1
d = {}
keys = self.frame.__dask_keys__()
split_every = self.split_every
while split_every is not False and len(keys) > split_every:
new_keys = []
for i, batch in enumerate(
toolz.partition_all(split_every or len(keys), keys)
):
batch = list(batch)
if self.combine_kwargs:
d[self._name, j, i] = (
apply,
self.combine,
[batch],
self.combine_kwargs,
)
else:
d[self._name, j, i] = (self.combine, batch)
new_keys.append((self._name, j, i))
j += 1
keys = new_keys
# apply aggregate to the final result
d[self._name, 0] = (apply, self.aggregate, [keys], self.aggregate_kwargs)
return d
@property
def _meta(self):
return self.operand("_meta")
def _divisions(self):
return (None, None)
def __str__(self):
chunked = str(self.frame)
split_every = getattr(self, "split_every", 0)
return f"{type(self).__name__}({chunked}, kind={funcname(self.kind)}, split_every={split_every})"
def _tree_repr_lines(self, indent=0, recursive=True):
header = f"{funcname(self.kind)}({funcname(type(self))}):"
lines = []
if recursive:
for dep in self.dependencies():
lines.extend(dep._tree_repr_lines(2))
split_every = getattr(self, "split_every", 0)
header += f" split_every={split_every}"
lines = [header] + lines
lines = [" " * indent + line for line in lines]
return lines
|
TreeReduce
|
python
|
celery__celery
|
t/unit/concurrency/test_prefork.py
|
{
"start": 3597,
"end": 3887
}
|
class ____:
@patch('celery.concurrency.prefork.signals')
def test_process_destructor(self, signals):
mp.process_destructor(13, -3)
signals.worker_process_shutdown.send.assert_called_with(
sender=None, pid=13, exitcode=-3,
)
|
test_process_destructor
|
python
|
celery__celery
|
celery/bin/amqp.py
|
{
"start": 450,
"end": 10023
}
|
class ____:
def __init__(self, cli_context):
self.cli_context = cli_context
self.connection = self.cli_context.app.connection()
self.channel = None
self.reconnect()
@property
def app(self):
return self.cli_context.app
def respond(self, retval):
if isinstance(retval, str):
self.cli_context.echo(retval)
else:
self.cli_context.echo(pprint.pformat(retval))
def echo_error(self, exception):
self.cli_context.error(f'{self.cli_context.ERROR}: {exception}')
def echo_ok(self):
self.cli_context.echo(self.cli_context.OK)
def reconnect(self):
if self.connection:
self.connection.close()
else:
self.connection = self.cli_context.app.connection()
self.cli_context.echo(f'-> connecting to {self.connection.as_uri()}.')
try:
self.connection.connect()
except (ConnectionRefusedError, ConnectionResetError) as e:
self.echo_error(e)
else:
self.cli_context.secho('-> connected.', fg='green', bold=True)
self.channel = self.connection.default_channel
@click.group(invoke_without_command=True)
@click.pass_context
@handle_preload_options
def amqp(ctx):
"""AMQP Administration Shell.
Also works for non-AMQP transports (but not ones that
store declarations in memory).
"""
if not isinstance(ctx.obj, AMQPContext):
ctx.obj = AMQPContext(ctx.obj)
@amqp.command(name='exchange.declare')
@click.argument('exchange',
type=str)
@click.argument('type',
type=str)
@click.argument('passive',
type=bool,
default=False)
@click.argument('durable',
type=bool,
default=False)
@click.argument('auto_delete',
type=bool,
default=False)
@click.pass_obj
def exchange_declare(amqp_context, exchange, type, passive, durable,
auto_delete):
if amqp_context.channel is None:
amqp_context.echo_error('Not connected to broker. Please retry...')
amqp_context.reconnect()
else:
try:
amqp_context.channel.exchange_declare(exchange=exchange,
type=type,
passive=passive,
durable=durable,
auto_delete=auto_delete)
except Exception as e:
amqp_context.echo_error(e)
amqp_context.reconnect()
else:
amqp_context.echo_ok()
@amqp.command(name='exchange.delete')
@click.argument('exchange',
type=str)
@click.argument('if_unused',
type=bool)
@click.pass_obj
def exchange_delete(amqp_context, exchange, if_unused):
if amqp_context.channel is None:
amqp_context.echo_error('Not connected to broker. Please retry...')
amqp_context.reconnect()
else:
try:
amqp_context.channel.exchange_delete(exchange=exchange,
if_unused=if_unused)
except Exception as e:
amqp_context.echo_error(e)
amqp_context.reconnect()
else:
amqp_context.echo_ok()
@amqp.command(name='queue.bind')
@click.argument('queue',
type=str)
@click.argument('exchange',
type=str)
@click.argument('routing_key',
type=str)
@click.pass_obj
def queue_bind(amqp_context, queue, exchange, routing_key):
if amqp_context.channel is None:
amqp_context.echo_error('Not connected to broker. Please retry...')
amqp_context.reconnect()
else:
try:
amqp_context.channel.queue_bind(queue=queue,
exchange=exchange,
routing_key=routing_key)
except Exception as e:
amqp_context.echo_error(e)
amqp_context.reconnect()
else:
amqp_context.echo_ok()
@amqp.command(name='queue.declare')
@click.argument('queue',
type=str)
@click.argument('passive',
type=bool,
default=False)
@click.argument('durable',
type=bool,
default=False)
@click.argument('auto_delete',
type=bool,
default=False)
@click.pass_obj
def queue_declare(amqp_context, queue, passive, durable, auto_delete):
if amqp_context.channel is None:
amqp_context.echo_error('Not connected to broker. Please retry...')
amqp_context.reconnect()
else:
try:
retval = amqp_context.channel.queue_declare(queue=queue,
passive=passive,
durable=durable,
auto_delete=auto_delete)
except Exception as e:
amqp_context.echo_error(e)
amqp_context.reconnect()
else:
amqp_context.cli_context.secho(
'queue:{} messages:{} consumers:{}'.format(*retval),
fg='cyan', bold=True)
amqp_context.echo_ok()
@amqp.command(name='queue.delete')
@click.argument('queue',
type=str)
@click.argument('if_unused',
type=bool,
default=False)
@click.argument('if_empty',
type=bool,
default=False)
@click.pass_obj
def queue_delete(amqp_context, queue, if_unused, if_empty):
if amqp_context.channel is None:
amqp_context.echo_error('Not connected to broker. Please retry...')
amqp_context.reconnect()
else:
try:
retval = amqp_context.channel.queue_delete(queue=queue,
if_unused=if_unused,
if_empty=if_empty)
except Exception as e:
amqp_context.echo_error(e)
amqp_context.reconnect()
else:
amqp_context.cli_context.secho(
f'{retval} messages deleted.',
fg='cyan', bold=True)
amqp_context.echo_ok()
@amqp.command(name='queue.purge')
@click.argument('queue',
type=str)
@click.pass_obj
def queue_purge(amqp_context, queue):
if amqp_context.channel is None:
amqp_context.echo_error('Not connected to broker. Please retry...')
amqp_context.reconnect()
else:
try:
retval = amqp_context.channel.queue_purge(queue=queue)
except Exception as e:
amqp_context.echo_error(e)
amqp_context.reconnect()
else:
amqp_context.cli_context.secho(
f'{retval} messages deleted.',
fg='cyan', bold=True)
amqp_context.echo_ok()
@amqp.command(name='basic.get')
@click.argument('queue',
type=str)
@click.argument('no_ack',
type=bool,
default=False)
@click.pass_obj
def basic_get(amqp_context, queue, no_ack):
if amqp_context.channel is None:
amqp_context.echo_error('Not connected to broker. Please retry...')
amqp_context.reconnect()
else:
try:
message = amqp_context.channel.basic_get(queue, no_ack=no_ack)
except Exception as e:
amqp_context.echo_error(e)
amqp_context.reconnect()
else:
amqp_context.respond(dump_message(message))
amqp_context.echo_ok()
@amqp.command(name='basic.publish')
@click.argument('msg',
type=str)
@click.argument('exchange',
type=str)
@click.argument('routing_key',
type=str)
@click.argument('mandatory',
type=bool,
default=False)
@click.argument('immediate',
type=bool,
default=False)
@click.pass_obj
def basic_publish(amqp_context, msg, exchange, routing_key, mandatory,
immediate):
if amqp_context.channel is None:
amqp_context.echo_error('Not connected to broker. Please retry...')
amqp_context.reconnect()
else:
# XXX Hack to fix Issue #2013
if isinstance(amqp_context.connection.connection, Connection):
msg = Message(msg)
try:
amqp_context.channel.basic_publish(msg,
exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate)
except Exception as e:
amqp_context.echo_error(e)
amqp_context.reconnect()
else:
amqp_context.echo_ok()
@amqp.command(name='basic.ack')
@click.argument('delivery_tag',
type=int)
@click.pass_obj
def basic_ack(amqp_context, delivery_tag):
if amqp_context.channel is None:
amqp_context.echo_error('Not connected to broker. Please retry...')
amqp_context.reconnect()
else:
try:
amqp_context.channel.basic_ack(delivery_tag)
except Exception as e:
amqp_context.echo_error(e)
amqp_context.reconnect()
else:
amqp_context.echo_ok()
register_repl(amqp)
|
AMQPContext
|
python
|
django__django
|
tests/auth_tests/test_management.py
|
{
"start": 9905,
"end": 11147
}
|
class ____(TestCase):
databases = {"default", "other"}
@mock.patch.object(changepassword.Command, "_get_pass", return_value="not qwerty")
def test_that_changepassword_command_with_database_option_uses_given_db(
self, mock_get_pass
):
"""
changepassword --database should operate on the specified DB.
"""
user = User.objects.db_manager("other").create_user(
username="joe", password="qwerty"
)
self.assertTrue(user.check_password("qwerty"))
out = StringIO()
call_command("changepassword", username="joe", database="other", stdout=out)
command_output = out.getvalue().strip()
self.assertEqual(
command_output,
"Changing password for user 'joe'\n"
"Password changed successfully for user 'joe'",
)
self.assertTrue(
User.objects.using("other").get(username="joe").check_password("not qwerty")
)
@override_settings(
SILENCED_SYSTEM_CHECKS=["fields.W342"], # ForeignKey(unique=True)
AUTH_PASSWORD_VALIDATORS=[
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"}
],
)
|
MultiDBChangepasswordManagementCommandTestCase
|
python
|
getsentry__sentry
|
tests/sentry/models/test_pullrequest.py
|
{
"start": 609,
"end": 2894
}
|
class ____(TestCase):
def test_resolve_in_commit(self) -> None:
group = self.create_group()
repo = Repository.objects.create(name="example", organization_id=group.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
# It makes reference to the first group
message=f"Foo Biz\n\nFixes {group.qualified_short_id}",
)
groups = commit.find_referenced_groups()
assert len(groups) == 1
assert group in groups
# These are created in resolved_in_commit
assert GroupHistory.objects.filter(
group=group,
status=GroupHistoryStatus.SET_RESOLVED_IN_COMMIT,
).exists()
assert GroupLink.objects.filter(
group=group,
linked_type=GroupLink.LinkedType.commit,
linked_id=commit.id,
).exists()
group.refresh_from_db()
assert group.status == GroupStatus.RESOLVED
def test_resolve_in_pull_request(self) -> None:
group = self.create_group()
repo = Repository.objects.create(name="example", organization_id=group.organization.id)
pr = PullRequest.objects.create(
key="1",
repository_id=repo.id,
organization_id=group.organization.id,
title="very cool PR to fix the thing",
# It makes reference to the second group
message=f"Foo Biz\n\nFixes {group.qualified_short_id}",
)
groups = pr.find_referenced_groups()
assert len(groups) == 1
assert group in groups
# These are created in resolved_in_pull_request
assert GroupHistory.objects.filter(
group=group,
status=GroupHistoryStatus.SET_RESOLVED_IN_PULL_REQUEST,
).exists()
assert GroupLink.objects.filter(
group=group,
linked_type=GroupLink.LinkedType.pull_request,
linked_id=pr.id,
).exists()
# XXX: Oddly,resolved_in_pull_request doesn't update the group status
group.refresh_from_db()
assert group.status == GroupStatus.UNRESOLVED
|
FindReferencedGroupsTest
|
python
|
huggingface__transformers
|
src/transformers/models/blt/modeling_blt.py
|
{
"start": 28596,
"end": 32400
}
|
class ____(BltPreTrainedModel):
config: BltGlobalTransformerConfig
_can_record_outputs = {
"global_attentions": OutputRecorder(BltSelfAttention, index=1, layer_name="global_transformer"),
}
def __init__(self, config: BltGlobalTransformerConfig):
super().__init__(config)
self.config = config
self.layers = nn.ModuleList()
for layer_idx in range(config.num_hidden_layers):
self.layers.append(BltTransformerLayer(config, layer_idx))
self.rotary_emb = BltRotaryEmbedding(config=config)
# Create token embedding projection (use nn.Identity() when no projection needed)
if getattr(config, "encoder_cross_output_size", None) is not None:
self.token_embedding_projection = nn.Linear(
config.encoder_cross_output_size, config.hidden_size, bias=False
)
else:
self.token_embedding_projection = nn.Identity()
self.post_init()
def forward(
self,
input_embeds: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
):
batch_size, seq_len, _ = input_embeds.shape
hidden_states = self.token_embedding_projection(input_embeds)
hidden_states = F.dropout(hidden_states, p=self.config.dropout, training=self.training)
if position_ids is None:
position_ids = (
torch.arange(input_embeds.shape[1], device=input_embeds.device).unsqueeze(0).expand(batch_size, -1)
)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for i, layer in enumerate(self.layers):
hidden_states = layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
return hidden_states
def process_patch_lengths(patch_lengths: torch.Tensor, max_patch_length: Optional[int]) -> torch.Tensor:
"""
Splits patch lengths into smaller segments if they exceed `max_patch_length`.
Pads the result to uniform length across the batch.
Args:
patch_lengths (torch.Tensor): [batch_size, num_patches] tensor of patch lengths.
max_patch_length (int, optional): Maximum allowed length per patch.
Returns:
torch.Tensor: [batch_size, max_len] tensor of split and padded patch lengths.
"""
if max_patch_length is None:
return patch_lengths
batch_size = patch_lengths.size(0)
processed = []
for seq in patch_lengths:
splits = []
for length in seq[seq > 0]:
length = length.item()
full_chunks, remainder = divmod(length, max_patch_length)
splits.extend([max_patch_length] * full_chunks)
if remainder:
splits.append(remainder)
processed.append(splits)
# Find max length to pad to
max_len = max(len(splits) for splits in processed)
padded = torch.zeros((batch_size, max_len), dtype=patch_lengths.dtype, device=patch_lengths.device)
for i, splits in enumerate(processed):
if splits:
padded[i, : len(splits)] = torch.tensor(splits, dtype=patch_lengths.dtype, device=patch_lengths.device)
# Trim zero columns
if (padded != 0).any(dim=0).sum() < padded.shape[1]:
last_nonzero = (padded != 0).any(dim=0).nonzero().max().item() + 1
padded = padded[:, :last_nonzero]
return padded
|
BltGlobalTransformer
|
python
|
plotly__plotly.py
|
plotly/graph_objs/splom/hoverlabel/_font.py
|
{
"start": 233,
"end": 17133
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "splom.hoverlabel"
_path_str = "splom.hoverlabel.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.splom.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.splom.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.hoverlabel.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Font
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/organization.py
|
{
"start": 18478,
"end": 18627
}
|
class ____(TypedDict):
user: UserSerializerResponse | UserSerializerResponseSelf | None
@register(OrganizationOnboardingTask)
|
_OnboardingTasksAttrs
|
python
|
pandas-dev__pandas
|
pandas/tests/dtypes/test_inference.py
|
{
"start": 13049,
"end": 34634
}
|
class ____:
@pytest.mark.parametrize(
"arr",
[
np.array(list("abc"), dtype="S1"),
np.array(list("abc"), dtype="S1").astype(object),
[b"a", np.nan, b"c"],
],
)
def test_infer_dtype_bytes(self, arr):
result = lib.infer_dtype(arr, skipna=True)
assert result == "bytes"
@pytest.mark.parametrize(
"value, expected",
[
(float("inf"), True),
(np.inf, True),
(-np.inf, False),
(1, False),
("a", False),
],
)
def test_isposinf_scalar(self, value, expected):
# GH 11352
result = libmissing.isposinf_scalar(value)
assert result is expected
@pytest.mark.parametrize(
"value, expected",
[
(float("-inf"), True),
(-np.inf, True),
(np.inf, False),
(1, False),
("a", False),
],
)
def test_isneginf_scalar(self, value, expected):
result = libmissing.isneginf_scalar(value)
assert result is expected
@pytest.mark.parametrize(
"convert_to_masked_nullable, exp",
[
(
True,
BooleanArray(
np.array([True, False], dtype="bool"), np.array([False, True])
),
),
(False, np.array([True, np.nan], dtype="object")),
],
)
def test_maybe_convert_nullable_boolean(self, convert_to_masked_nullable, exp):
# GH 40687
arr = np.array([True, np.nan], dtype=object)
result = libops.maybe_convert_bool(
arr, set(), convert_to_masked_nullable=convert_to_masked_nullable
)
if convert_to_masked_nullable:
tm.assert_extension_array_equal(BooleanArray(*result), exp)
else:
result = result[0]
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
@pytest.mark.parametrize("coerce_numeric", [True, False])
@pytest.mark.parametrize(
"infinity", ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"]
)
@pytest.mark.parametrize("prefix", ["", "-", "+"])
def test_maybe_convert_numeric_infinities(
self, coerce_numeric, infinity, prefix, convert_to_masked_nullable
):
# see gh-13274
result, _ = lib.maybe_convert_numeric(
np.array([prefix + infinity], dtype=object),
na_values={"", "NULL", "nan"},
coerce_numeric=coerce_numeric,
convert_to_masked_nullable=convert_to_masked_nullable,
)
expected = np.array([np.inf if prefix in ["", "+"] else -np.inf])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
def test_maybe_convert_numeric_infinities_raises(self, convert_to_masked_nullable):
msg = "Unable to parse string"
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(["foo_inf"], dtype=object),
na_values={"", "NULL", "nan"},
coerce_numeric=False,
convert_to_masked_nullable=convert_to_masked_nullable,
)
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
def test_maybe_convert_numeric_post_floatify_nan(
self, coerce, convert_to_masked_nullable
):
# see gh-13314
data = np.array(["1.200", "-999.000", "4.500"], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(
data,
nan_values,
coerce,
convert_to_masked_nullable=convert_to_masked_nullable,
)
if convert_to_masked_nullable:
expected = FloatingArray(expected, np.isnan(expected))
tm.assert_extension_array_equal(expected, FloatingArray(*out))
else:
out = out[0]
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(["inf", "inf", "inf"], dtype="O")
result, _ = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(["-inf", "-inf", "-inf"], dtype="O")
result, _ = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(["42E", "2E", "99e", "6e"], dtype="O")
result, _ = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, "apple"], dtype=object)
result, _ = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
@pytest.mark.parametrize(
"arr",
[
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object),
],
)
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result, _ = lib.maybe_convert_numeric(arr, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
def test_convert_numeric_uint64_nan_values(
self, coerce, convert_to_masked_nullable
):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = np.array([np.nan, 2**63 + 1], dtype=float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(
arr,
na_values,
coerce_numeric=coerce,
convert_to_masked_nullable=convert_to_masked_nullable,
)
if convert_to_masked_nullable and coerce:
expected = IntegerArray(
np.array([0, 2**63 + 1], dtype="u8"),
np.array([True, False], dtype="bool"),
)
result = IntegerArray(*result)
else:
result = result[0] # discard mask
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
"case",
[
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object),
],
)
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
def test_convert_numeric_int64_uint64(
self, case, coerce, convert_to_masked_nullable
):
expected = case.astype(float) if coerce else case.copy()
result, _ = lib.maybe_convert_numeric(
case,
set(),
coerce_numeric=coerce,
convert_to_masked_nullable=convert_to_masked_nullable,
)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
def test_convert_numeric_string_uint64(self, convert_to_masked_nullable):
# GH32394
result = lib.maybe_convert_numeric(
np.array(["uint64"], dtype=object),
set(),
coerce_numeric=True,
convert_to_masked_nullable=convert_to_masked_nullable,
)
if convert_to_masked_nullable:
result = FloatingArray(*result)
else:
result = result[0]
assert np.isnan(result)
@pytest.mark.parametrize("value", [-(2**63) - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
@pytest.mark.parametrize(
"value, expected_value",
[
(-(1 << 65), -(1 << 65)),
(1 << 65, 1 << 65),
(str(1 << 65), 1 << 65),
(f"-{1 << 65}", -(1 << 65)),
],
)
@pytest.mark.parametrize("coerce_numeric", [False, True])
def test_convert_numeric_overflow(self, value, expected_value, coerce_numeric):
arr = np.array([value], dtype=object)
expected = np.array([expected_value], dtype=float if coerce_numeric else object)
result, _ = lib.maybe_convert_numeric(
arr,
set(),
coerce_numeric=coerce_numeric,
)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("val", [None, np.nan, float("nan")])
@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])
def test_maybe_convert_objects_nat_inference(self, val, dtype):
dtype = np.dtype(dtype)
vals = np.array([pd.NaT, val], dtype=object)
result = lib.maybe_convert_objects(
vals,
convert_non_numeric=True,
dtype_if_all_nat=dtype,
)
assert result.dtype == dtype
assert np.isnat(result).all()
result = lib.maybe_convert_objects(
vals[::-1],
convert_non_numeric=True,
dtype_if_all_nat=dtype,
)
assert result.dtype == dtype
assert np.isnat(result).all()
@pytest.mark.parametrize(
"value, expected_dtype",
[
# see gh-4471
([2**63], np.uint64),
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
([np.uint64(2**63)], np.uint64),
([2, -1], np.int64),
([2**63, -1], object),
# GH#47294
([np.uint8(1)], np.uint8),
([np.uint16(1)], np.uint16),
([np.uint32(1)], np.uint32),
([np.uint64(1)], np.uint64),
([np.uint8(2), np.uint16(1)], np.uint16),
([np.uint32(2), np.uint16(1)], np.uint32),
([np.uint32(2), -1], object),
([np.uint32(2), 1], np.uint64),
([np.uint32(2), np.int32(1)], object),
],
)
def test_maybe_convert_objects_uint(self, value, expected_dtype):
arr = np.array(value, dtype=object)
exp = np.array(value, dtype=expected_dtype)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_maybe_convert_objects_datetime(self):
# GH27438
arr = np.array(
[np.datetime64("2000-01-01"), np.timedelta64(1, "s")], dtype=object
)
exp = arr.copy()
out = lib.maybe_convert_objects(arr, convert_non_numeric=True)
tm.assert_numpy_array_equal(out, exp)
arr = np.array([pd.NaT, np.timedelta64(1, "s")], dtype=object)
exp = np.array([np.timedelta64("NaT"), np.timedelta64(1, "s")], dtype="m8[ns]")
out = lib.maybe_convert_objects(arr, convert_non_numeric=True)
tm.assert_numpy_array_equal(out, exp)
# with convert_non_numeric=True, the nan is a valid NA value for td64
arr = np.array([np.timedelta64(1, "s"), np.nan], dtype=object)
exp = exp[::-1]
out = lib.maybe_convert_objects(arr, convert_non_numeric=True)
tm.assert_numpy_array_equal(out, exp)
def test_maybe_convert_objects_dtype_if_all_nat(self):
arr = np.array([pd.NaT, pd.NaT], dtype=object)
out = lib.maybe_convert_objects(arr, convert_non_numeric=True)
# no dtype_if_all_nat passed -> we dont guess
tm.assert_numpy_array_equal(out, arr)
out = lib.maybe_convert_objects(
arr,
convert_non_numeric=True,
dtype_if_all_nat=np.dtype("timedelta64[ns]"),
)
exp = np.array(["NaT", "NaT"], dtype="timedelta64[ns]")
tm.assert_numpy_array_equal(out, exp)
out = lib.maybe_convert_objects(
arr,
convert_non_numeric=True,
dtype_if_all_nat=np.dtype("datetime64[ns]"),
)
exp = np.array(["NaT", "NaT"], dtype="datetime64[ns]")
tm.assert_numpy_array_equal(out, exp)
def test_maybe_convert_objects_dtype_if_all_nat_invalid(self):
# we accept datetime64[ns], timedelta64[ns], and EADtype
arr = np.array([pd.NaT, pd.NaT], dtype=object)
with pytest.raises(ValueError, match="int64"):
lib.maybe_convert_objects(
arr,
convert_non_numeric=True,
dtype_if_all_nat=np.dtype("int64"),
)
@pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])
def test_maybe_convert_objects_datetime_overflow_safe(self, dtype):
stamp = datetime(2363, 10, 4) # Enterprise-D launch date
if dtype == "timedelta64[ns]":
stamp = stamp - datetime(1970, 1, 1)
arr = np.array([stamp], dtype=object)
out = lib.maybe_convert_objects(arr, convert_non_numeric=True)
# no OutOfBoundsDatetime/OutOfBoundsTimedeltas
if dtype == "datetime64[ns]":
expected = np.array(["2363-10-04"], dtype="M8[us]")
else:
expected = arr
tm.assert_numpy_array_equal(out, expected)
def test_maybe_convert_objects_mixed_datetimes(self):
ts = Timestamp("now")
vals = [ts, ts.to_pydatetime(), ts.to_datetime64(), pd.NaT, np.nan, None]
for data in itertools.permutations(vals):
data = np.array(list(data), dtype=object)
expected = DatetimeIndex(data)._data._ndarray
result = lib.maybe_convert_objects(data, convert_non_numeric=True)
tm.assert_numpy_array_equal(result, expected)
def test_maybe_convert_objects_timedelta64_nat(self):
obj = np.timedelta64("NaT", "ns")
arr = np.array([obj], dtype=object)
assert arr[0] is obj
result = lib.maybe_convert_objects(arr, convert_non_numeric=True)
expected = np.array([obj], dtype="m8[ns]")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"exp",
[
IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True])),
IntegerArray(np.array([2, 0], dtype="int64"), np.array([False, True])),
],
)
def test_maybe_convert_objects_nullable_integer(self, exp):
# GH27335
arr = np.array([2, np.nan], dtype=object)
result = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)
tm.assert_extension_array_equal(result, exp)
@pytest.mark.parametrize(
"dtype, val", [("int64", 1), ("uint64", np.iinfo(np.int64).max + 1)]
)
def test_maybe_convert_objects_nullable_none(self, dtype, val):
# GH#50043
arr = np.array([val, None, 3], dtype="object")
result = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)
expected = IntegerArray(
np.array([val, 0, 3], dtype=dtype), np.array([False, True, False])
)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"convert_to_masked_nullable, exp",
[
(True, IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True]))),
(False, np.array([2, np.nan], dtype="float64")),
],
)
def test_maybe_convert_numeric_nullable_integer(
self, convert_to_masked_nullable, exp
):
# GH 40687
arr = np.array([2, np.nan], dtype=object)
result = lib.maybe_convert_numeric(
arr, set(), convert_to_masked_nullable=convert_to_masked_nullable
)
if convert_to_masked_nullable:
result = IntegerArray(*result)
tm.assert_extension_array_equal(result, exp)
else:
result = result[0]
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize(
"convert_to_masked_nullable, exp",
[
(
True,
FloatingArray(
np.array([2.0, 0.0], dtype="float64"), np.array([False, True])
),
),
(False, np.array([2.0, np.nan], dtype="float64")),
],
)
def test_maybe_convert_numeric_floating_array(
self, convert_to_masked_nullable, exp
):
# GH 40687
arr = np.array([2.0, np.nan], dtype=object)
result = lib.maybe_convert_numeric(
arr, set(), convert_to_masked_nullable=convert_to_masked_nullable
)
if convert_to_masked_nullable:
tm.assert_extension_array_equal(FloatingArray(*result), exp)
else:
result = result[0]
tm.assert_numpy_array_equal(result, exp)
def test_maybe_convert_objects_bool_nan(self):
# GH32146
ind = Index([True, False, np.nan], dtype=object)
exp = np.array([True, False, np.nan], dtype=object)
out = lib.maybe_convert_objects(ind.values, safe=1)
tm.assert_numpy_array_equal(out, exp)
def test_maybe_convert_objects_nullable_boolean(self):
# GH50047
arr = np.array([True, False], dtype=object)
exp = BooleanArray._from_sequence([True, False], dtype="boolean")
out = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)
tm.assert_extension_array_equal(out, exp)
arr = np.array([True, False, pd.NaT], dtype=object)
exp = np.array([True, False, pd.NaT], dtype=object)
out = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)
tm.assert_numpy_array_equal(out, exp)
@pytest.mark.parametrize("val", [None, np.nan])
def test_maybe_convert_objects_nullable_boolean_na(self, val):
# GH50047
arr = np.array([True, False, val], dtype=object)
exp = BooleanArray(
np.array([True, False, False]), np.array([False, False, True])
)
out = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)
tm.assert_extension_array_equal(out, exp)
@pytest.mark.parametrize(
"data0",
[
True,
1,
1.0,
1.0 + 1.0j,
np.int8(1),
np.int16(1),
np.int32(1),
np.int64(1),
np.float16(1),
np.float32(1),
np.float64(1),
np.complex64(1),
np.complex128(1),
],
)
@pytest.mark.parametrize(
"data1",
[
True,
1,
1.0,
1.0 + 1.0j,
np.int8(1),
np.int16(1),
np.int32(1),
np.int64(1),
np.float16(1),
np.float32(1),
np.float64(1),
np.complex64(1),
np.complex128(1),
],
)
def test_maybe_convert_objects_itemsize(self, data0, data1):
# GH 40908
data = [data0, data1]
arr = np.array(data, dtype="object")
common_kind = np.result_type(type(data0), type(data1)).kind
kind0 = "python" if not hasattr(data0, "dtype") else data0.dtype.kind
kind1 = "python" if not hasattr(data1, "dtype") else data1.dtype.kind
if kind0 != "python" and kind1 != "python":
kind = common_kind
itemsize = max(data0.dtype.itemsize, data1.dtype.itemsize)
elif is_bool(data0) or is_bool(data1):
kind = "bool" if (is_bool(data0) and is_bool(data1)) else "object"
itemsize = ""
elif is_complex(data0) or is_complex(data1):
kind = common_kind
itemsize = 16
else:
kind = common_kind
itemsize = 8
expected = np.array(data, dtype=f"{kind}{itemsize}")
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(result, expected)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
arr = np.array([datetime(2015, 1, 1, tzinfo=timezone.utc), 1], dtype=object)
result = lib.maybe_convert_objects(arr, convert_non_numeric=True)
tm.assert_numpy_array_equal(result, arr)
@pytest.mark.parametrize(
"idx",
[
pd.IntervalIndex.from_breaks(range(5), closed="both"),
pd.period_range("2016-01-01", periods=3, freq="D"),
],
)
def test_maybe_convert_objects_ea(self, idx):
result = lib.maybe_convert_objects(
np.array(idx, dtype=object),
convert_non_numeric=True,
)
tm.assert_extension_array_equal(result, idx._data)
|
TestInference
|
python
|
pytorch__pytorch
|
torch/profiler/_utils.py
|
{
"start": 1273,
"end": 2766
}
|
class ____:
def __init__(self, event) -> None:
self.event = event
def __hash__(self):
return hash(self.event.id)
def __eq__(self, other):
return self.event.id == other.event.id
def __repr__(self) -> str:
return f"{self.event.name}"
def intervals_overlap(self, intervals: list[Interval]):
overlap_time = 0
intervals = sorted(intervals, key=lambda x: x.start)
if intervals:
overlap_start = max(self.event.start_time_ns, intervals[0].start)
overlap_end = min(self.event.end_time_ns, intervals[0].end)
if overlap_start < overlap_end:
overlap_time += overlap_end - overlap_start
i, j = 0, 1
while j < len(intervals):
prev_interval = intervals[i]
curr_interval = intervals[j]
j += 1
if prev_interval.end > curr_interval.start:
# Completely subsumed by previous interval
if prev_interval.end > curr_interval.end:
j += 1
continue
else:
curr_interval.start = prev_interval.end
i = j
overlap_start = max(self.event.start_time_ns, curr_interval.start)
overlap_end = min(self.event.end_time_ns, curr_interval.end)
if overlap_start < overlap_end:
overlap_time += overlap_end - overlap_start
return overlap_time
|
EventKey
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-number-of-achievable-transfer-requests.py
|
{
"start": 739,
"end": 1387
}
|
class ____(object):
def maximumRequests(self, n, requests):
"""
:type n: int
:type requests: List[List[int]]
:rtype: int
"""
def evaluate(n, requests, mask):
change = [0]*n
base, count = 1, 0
for i in xrange(len(requests)):
if base & mask:
change[requests[i][0]] -= 1
change[requests[i][1]] += 1
count += 1
base <<= 1
return count if all(c == 0 for c in change) else 0
return max(evaluate(n, requests, i) for i in xrange(1 << len(requests)))
|
Solution2
|
python
|
jazzband__prettytable
|
tests/test_html.py
|
{
"start": 127,
"end": 869
}
|
class ____:
def test_html_and_back(self, city_data: PrettyTable) -> None:
html_string = city_data.get_html_string()
new_table = from_html(html_string)[0]
assert new_table.get_string() == city_data.get_string()
def test_html_one_and_back(self, city_data: PrettyTable) -> None:
html_string = city_data.get_html_string()
new_table = from_html_one(html_string)
assert new_table.get_string() == city_data.get_string()
def test_html_one_fail_on_many(self, city_data: PrettyTable) -> None:
html_string = city_data.get_html_string()
html_string += city_data.get_html_string()
with pytest.raises(ValueError):
from_html_one(html_string)
|
TestHtmlConstructor
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_object_position14.py
|
{
"start": 315,
"end": 911
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("object_position14.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column(1, 1, 5, None, {"hidden": 1})
worksheet.insert_image("E9", self.image_dir + "red.png")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
gevent__gevent
|
src/greentest/3.10/test_smtpd.py
|
{
"start": 5907,
"end": 6583
}
|
class ____(unittest.TestCase):
def setUp(self):
smtpd.socket = asyncore.socket = mock_socket
def tearDown(self):
asyncore.close_all()
asyncore.socket = smtpd.socket = socket
@unittest.skipUnless(socket_helper.IPV6_ENABLED, "IPv6 not enabled")
def test_socket_uses_IPv6(self):
server = smtpd.SMTPServer((socket_helper.HOSTv6, 0), (socket_helper.HOSTv4, 0))
self.assertEqual(server.socket.family, socket.AF_INET6)
def test_socket_uses_IPv4(self):
server = smtpd.SMTPServer((socket_helper.HOSTv4, 0), (socket_helper.HOSTv6, 0))
self.assertEqual(server.socket.family, socket.AF_INET)
|
TestFamilyDetection
|
python
|
pypa__warehouse
|
tests/unit/email/test_init.py
|
{
"start": 106027,
"end": 109288
}
|
class ____:
@pytest.fixture
def _team(self, pyramid_user):
self.user = UserFactory.create()
EmailFactory.create(user=self.user, verified=True)
self.submitter = pyramid_user
self.organization_name = "exampleorganization"
self.team_name = "Example Team"
@pytest.mark.usefixtures("_team")
@pytest.mark.parametrize(
("email_template_name", "send_team_member_email"),
[
("added-as-team-member", email.send_added_as_team_member_email),
("removed-as-team-member", email.send_removed_as_team_member_email),
("team-member-added", email.send_team_member_added_email),
("team-member-removed", email.send_team_member_removed_email),
],
)
def test_send_team_member_email(
self,
db_request,
make_email_renderers,
send_email,
email_template_name,
send_team_member_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
email_template_name
)
if email_template_name.endswith("-as-team-member"):
recipient = self.user
result = send_team_member_email(
db_request,
self.user,
submitter=self.submitter,
organization_name=self.organization_name,
team_name=self.team_name,
)
else:
recipient = self.submitter
result = send_team_member_email(
db_request,
self.submitter,
user=self.user,
submitter=self.submitter,
organization_name=self.organization_name,
team_name=self.team_name,
)
assert result == {
"username": self.user.username,
"submitter": self.submitter.username,
"organization_name": self.organization_name,
"team_name": self.team_name,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{recipient.name} <{recipient.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": recipient.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": recipient.email,
"subject": subject_renderer.string_response,
"redact_ip": recipient != self.submitter,
},
},
)
]
|
TestTeamMemberEmails
|
python
|
encode__django-rest-framework
|
tests/test_prefetch_related.py
|
{
"start": 217,
"end": 362
}
|
class ____(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'groups')
|
UserSerializer
|
python
|
getsentry__sentry
|
src/sentry/rules/conditions/event_attribute.py
|
{
"start": 2652,
"end": 2910
}
|
class ____(forms.Form):
attribute = forms.ChoiceField(choices=[(a, a) for a in ATTR_CHOICES.keys()])
match = forms.ChoiceField(choices=list(MATCH_CHOICES.items()))
value = forms.CharField(widget=forms.TextInput(), required=False)
|
EventAttributeForm
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 16031,
"end": 53067
}
|
class ____(AnyMarkConfig):
"""
AreaConfig schema wrapper.
Parameters
----------
align : dict, :class:`Align`, :class:`ExprRef`, Literal['left', 'center', 'right']
The horizontal alignment of the text or ranged marks (area, bar, image, rect, rule).
One of ``"left"``, ``"right"``, ``"center"``.
**Note:** Expression reference is *not* supported for range marks.
angle : dict, float, :class:`ExprRef`
The rotation angle of the text, in degrees.
aria : bool, dict, :class:`ExprRef`
A boolean flag indicating if `ARIA attributes
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ should be
included (SVG output only). If ``false``, the "aria-hidden" attribute will be set on
the output SVG element, removing the mark item from the ARIA accessibility tree.
ariaRole : str, dict, :class:`ExprRef`
Sets the type of user interface element of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "role" attribute. Warning: this
property is experimental and may be changed in the future.
ariaRoleDescription : str, dict, :class:`ExprRef`
A human-readable, author-localized description for the role of the mark item for
`ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "aria-roledescription" attribute.
Warning: this property is experimental and may be changed in the future.
aspect : bool, dict, :class:`ExprRef`
Whether to keep aspect ratio of image marks.
baseline : dict, :class:`ExprRef`, :class:`Baseline`, :class:`TextBaseline`, Literal['alphabetic', 'line-bottom', 'line-top', 'top', 'middle', 'bottom']
For text marks, the vertical text baseline. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, ``"line-bottom"``, or an
expression reference that provides one of the valid values. The ``"line-top"`` and
``"line-bottom"`` values operate similarly to ``"top"`` and ``"bottom"``, but are
calculated relative to the ``lineHeight`` rather than ``fontSize`` alone.
For range marks, the vertical alignment of the marks. One of ``"top"``,
``"middle"``, ``"bottom"``.
**Note:** Expression reference is *not* supported for range marks.
blend : dict, :class:`Blend`, :class:`ExprRef`, Literal[None, 'multiply', 'screen', 'overlay', 'darken', 'lighten', 'color-dodge', 'color-burn', 'hard-light', 'soft-light', 'difference', 'exclusion', 'hue', 'saturation', 'color', 'luminosity']
The color blend mode for drawing an item on its current background. Any valid `CSS
mix-blend-mode <https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode>`__
value can be used.
**Default value:** ``"source-over"``
color : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
Default color.
**Default value:** ``"#4682b4"``
**Note:**
* This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
* The ``fill`` and ``stroke`` properties have higher precedence than ``color`` and
will override ``color``.
cornerRadius : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles or arcs' corners.
**Default value:** ``0``
cornerRadiusBottomLeft : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' bottom left corner.
**Default value:** ``0``
cornerRadiusBottomRight : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' bottom right corner.
**Default value:** ``0``
cornerRadiusTopLeft : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' top right corner.
**Default value:** ``0``
cornerRadiusTopRight : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' top left corner.
**Default value:** ``0``
cursor : dict, :class:`Cursor`, :class:`ExprRef`, Literal['auto', 'default', 'none', 'context-menu', 'help', 'pointer', 'progress', 'wait', 'cell', 'crosshair', 'text', 'vertical-text', 'alias', 'copy', 'move', 'no-drop', 'not-allowed', 'e-resize', 'n-resize', 'ne-resize', 'nw-resize', 's-resize', 'se-resize', 'sw-resize', 'w-resize', 'ew-resize', 'ns-resize', 'nesw-resize', 'nwse-resize', 'col-resize', 'row-resize', 'all-scroll', 'zoom-in', 'zoom-out', 'grab', 'grabbing']
The mouse cursor used over the mark. Any valid `CSS cursor type
<https://developer.mozilla.org/en-US/docs/Web/CSS/cursor#Values>`__ can be used.
description : str, dict, :class:`ExprRef`
A text description of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the `"aria-label" attribute
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/ARIA_Techniques/Using_the_aria-label_attribute>`__.
dir : dict, :class:`ExprRef`, Literal['ltr', 'rtl'], :class:`TextDirection`
The direction of the text. One of ``"ltr"`` (left-to-right) or ``"rtl"``
(right-to-left). This property determines on which side is truncated in response to
the limit parameter.
**Default value:** ``"ltr"``
dx : dict, float, :class:`ExprRef`
The horizontal offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
dy : dict, float, :class:`ExprRef`
The vertical offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
ellipsis : str, dict, :class:`ExprRef`
The ellipsis string for text truncated in response to the limit parameter.
**Default value:** ``"…"``
endAngle : dict, float, :class:`ExprRef`
The end angle in radians for arc marks. A value of ``0`` indicates up (north),
increasing values proceed clockwise.
fill : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default fill color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove fill.
**Default value:** (None)
fillOpacity : dict, float, :class:`ExprRef`
The fill opacity (value between [0,1]).
**Default value:** ``1``
filled : bool
Whether the mark's color should be used as fill color instead of stroke color.
**Default value:** ``false`` for all ``point``, ``line``, and ``rule`` marks as well
as ``geoshape`` marks for `graticule
<https://vega.github.io/vega-lite/docs/data.html#graticule>`__ data sources;
otherwise, ``true``.
**Note:** This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
font : str, dict, :class:`ExprRef`
The typeface to set the text in (e.g., ``"Helvetica Neue"``).
fontSize : dict, float, :class:`ExprRef`
The font size, in pixels.
**Default value:** ``11``
fontStyle : str, dict, :class:`ExprRef`, :class:`FontStyle`
The font style (e.g., ``"italic"``).
fontWeight : dict, :class:`ExprRef`, :class:`FontWeight`, Literal['normal', 'bold', 'lighter', 'bolder', 100, 200, 300, 400, 500, 600, 700, 800, 900]
The font weight. This can be either a string (e.g ``"bold"``, ``"normal"``) or a
number (``100``, ``200``, ``300``, ..., ``900`` where ``"normal"`` = ``400`` and
``"bold"`` = ``700``).
height : dict, float, :class:`ExprRef`
Height of the marks.
href : str, dict, :class:`URI`, :class:`ExprRef`
A URL to load upon mouse click. If defined, the mark acts as a hyperlink.
innerRadius : dict, float, :class:`ExprRef`
The inner radius in pixels of arc marks. ``innerRadius`` is an alias for
``radius2``.
**Default value:** ``0``
interpolate : dict, :class:`ExprRef`, :class:`Interpolate`, Literal['basis', 'basis-open', 'basis-closed', 'bundle', 'cardinal', 'cardinal-open', 'cardinal-closed', 'catmull-rom', 'linear', 'linear-closed', 'monotone', 'natural', 'step', 'step-before', 'step-after']
The line interpolation method to use for line and area marks. One of the following:
* ``"linear"``: piecewise linear segments, as in a polyline.
* ``"linear-closed"``: close the linear segments to form a polygon.
* ``"step"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"step-before"``: alternate between vertical and horizontal segments, as in a
step function.
* ``"step-after"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"basis"``: a B-spline, with control point duplication on the ends.
* ``"basis-open"``: an open B-spline; may not intersect the start or end.
* ``"basis-closed"``: a closed B-spline, as in a loop.
* ``"cardinal"``: a Cardinal spline, with control point duplication on the ends.
* ``"cardinal-open"``: an open Cardinal spline; may not intersect the start or end,
but will intersect other control points.
* ``"cardinal-closed"``: a closed Cardinal spline, as in a loop.
* ``"bundle"``: equivalent to basis, except the tension parameter is used to
straighten the spline.
* ``"monotone"``: cubic interpolation that preserves monotonicity in y.
invalid : :class:`MarkInvalidDataMode`, Literal['filter', 'break-paths-filter-domains', 'break-paths-show-domains', 'break-paths-show-path-domains', 'show'], None
Invalid data mode, which defines how the marks and corresponding scales should
represent invalid values (``null`` and ``NaN`` in continuous scales *without*
defined output for invalid values).
* ``"filter"`` — *Exclude* all invalid values from the visualization's *marks* and
*scales*. For path marks (for line, area, trail), this option will create paths
that connect valid points, as if the data rows with invalid values do not exist.
* ``"break-paths-filter-domains"`` — Break path marks (for line, area, trail) at
invalid values. For non-path marks, this is equivalent to ``"filter"``. All
*scale* domains will *exclude* these filtered data points.
* ``"break-paths-show-domains"`` — Break paths (for line, area, trail) at invalid
values. Hide invalid values for non-path marks. All *scale* domains will
*include* these filtered data points (for both path and non-path marks).
* ``"show"`` or ``null`` — Show all data points in the marks and scale domains. Each
scale will use the output for invalid values defined in ``config.scale.invalid``
or, if unspecified, by default invalid values will produce the same visual values
as zero (if the scale includes zero) or the minimum value (if the scale does not
include zero).
* ``"break-paths-show-path-domains"`` (default) — This is equivalent to
``"break-paths-show-domains"`` for path-based marks (line/area/trail) and
``"filter"`` for non-path marks.
**Note**: If any channel's scale has an output for invalid values defined in
``config.scale.invalid``, all values for the scales will be considered "valid" since
they can produce a reasonable output for the scales. Thus, fields for such channels
will not be filtered and will not cause path breaks.
limit : dict, float, :class:`ExprRef`
The maximum length of the text mark in pixels. The text value will be automatically
truncated if the rendered size exceeds the limit.
**Default value:** ``0`` -- indicating no limit
line : bool, dict, :class:`OverlayMarkDef`
A flag for overlaying line on top of area marks, or an object defining the
properties of the overlayed lines.
* If this value is an empty object (``{}``) or ``true``, lines with default
properties will be used.
* If this value is ``false``, no lines would be automatically added to area marks.
**Default value:** ``false``.
lineBreak : str, dict, :class:`ExprRef`
A delimiter, such as a newline character, upon which to break text strings into
multiple lines. This property is ignored if the text is array-valued.
lineHeight : dict, float, :class:`ExprRef`
The line height in pixels (the spacing between subsequent lines of text) for
multi-line text marks.
opacity : dict, float, :class:`ExprRef`
The overall opacity (value between [0,1]).
**Default value:** ``0.7`` for non-aggregate plots with ``point``, ``tick``,
``circle``, or ``square`` marks or layered ``bar`` charts and ``1`` otherwise.
order : bool, None
For line and trail marks, this ``order`` property can be set to ``null`` or
``false`` to make the lines use the original order in the data sources.
orient : :class:`Orientation`, Literal['horizontal', 'vertical']
The orientation of a non-stacked bar, tick, area, and line charts. The value is
either horizontal (default) or vertical.
* For bar, rule and tick, this determines whether the size of the bar and tick
should be applied to x or y dimension.
* For area, this property determines the orient property of the Vega output.
* For line and trail marks, this property determines the sort order of the points in
the line if ``config.sortLineBy`` is not specified. For stacked charts, this is
always determined by the orientation of the stack; therefore explicitly specified
value will be ignored.
outerRadius : dict, float, :class:`ExprRef`
The outer radius in pixels of arc marks. ``outerRadius`` is an alias for ``radius``.
**Default value:** ``0``
padAngle : dict, float, :class:`ExprRef`
The angular padding applied to sides of the arc, in radians.
point : bool, dict, Literal['transparent'], :class:`OverlayMarkDef`
A flag for overlaying points on top of line or area marks, or an object defining the
properties of the overlayed points.
* If this property is ``"transparent"``, transparent points will be used (for
enhancing tooltips and selections).
* If this property is an empty object (``{}``) or ``true``, filled points with
default properties will be used.
* If this property is ``false``, no points would be automatically added to line or
area marks.
**Default value:** ``false``.
radius : dict, float, :class:`ExprRef`
For arc mark, the primary (outer) radius in pixels.
For text marks, polar coordinate radial offset, in pixels, of the text from the
origin determined by the ``x`` and ``y`` properties.
**Default value:** ``min(plot_width, plot_height)/2``
radius2 : dict, float, :class:`ExprRef`
The secondary (inner) radius in pixels of arc marks.
**Default value:** ``0``
shape : str, dict, :class:`ExprRef`, :class:`SymbolShape`
Shape of the point marks. Supported values include:
* plotting shapes: ``"circle"``, ``"square"``, ``"cross"``, ``"diamond"``,
``"triangle-up"``, ``"triangle-down"``, ``"triangle-right"``, or
``"triangle-left"``.
* the line symbol ``"stroke"``
* centered directional shapes ``"arrow"``, ``"wedge"``, or ``"triangle"``
* a custom `SVG path string
<https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths>`__ (For correct
sizing, custom shape paths should be defined within a square bounding box with
coordinates ranging from -1 to 1 along both the x and y dimensions.)
**Default value:** ``"circle"``
size : dict, float, :class:`ExprRef`
Default size for marks.
* For ``point``/``circle``/``square``, this represents the pixel area of the marks.
Note that this value sets the area of the symbol; the side lengths will increase
with the square root of this value.
* For ``bar``, this represents the band size of the bar, in pixels.
* For ``text``, this represents the font size, in pixels.
**Default value:**
* ``30`` for point, circle, square marks; width/height's ``step``
* ``2`` for bar marks with discrete dimensions;
* ``5`` for bar marks with continuous dimensions;
* ``11`` for text marks.
smooth : bool, dict, :class:`ExprRef`
A boolean flag (default true) indicating if the image should be smoothed when
resized. If false, individual pixels should be scaled directly rather than
interpolated with smoothing. For SVG rendering, this option may not work in some
browsers due to lack of standardization.
startAngle : dict, float, :class:`ExprRef`
The start angle in radians for arc marks. A value of ``0`` indicates up (north),
increasing values proceed clockwise.
stroke : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default stroke color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove stroke.
**Default value:** (None)
strokeCap : dict, :class:`ExprRef`, :class:`StrokeCap`, Literal['butt', 'round', 'square']
The stroke cap for line ending style. One of ``"butt"``, ``"round"``, or
``"square"``.
**Default value:** ``"butt"``
strokeDash : dict, Sequence[float], :class:`ExprRef`
An array of alternating stroke, space lengths for creating dashed or dotted lines.
strokeDashOffset : dict, float, :class:`ExprRef`
The offset (in pixels) into which to begin drawing with the stroke dash array.
strokeJoin : dict, :class:`ExprRef`, :class:`StrokeJoin`, Literal['miter', 'round', 'bevel']
The stroke line join method. One of ``"miter"``, ``"round"`` or ``"bevel"``.
**Default value:** ``"miter"``
strokeMiterLimit : dict, float, :class:`ExprRef`
The miter limit at which to bevel a line join.
strokeOffset : dict, float, :class:`ExprRef`
The offset in pixels at which to draw the group stroke and fill. If unspecified, the
default behavior is to dynamically offset stroked groups such that 1 pixel stroke
widths align with the pixel grid.
strokeOpacity : dict, float, :class:`ExprRef`
The stroke opacity (value between [0,1]).
**Default value:** ``1``
strokeWidth : dict, float, :class:`ExprRef`
The stroke width, in pixels.
tension : dict, float, :class:`ExprRef`
Depending on the interpolation type, sets the tension parameter (for line and area
marks).
text : str, dict, :class:`Text`, Sequence[str], :class:`ExprRef`
Placeholder text if the ``text`` channel is not specified
theta : dict, float, :class:`ExprRef`
* For arc marks, the arc length in radians if theta2 is not specified, otherwise the
start arc angle. (A value of 0 indicates up or “north”, increasing values proceed
clockwise.)
* For text marks, polar coordinate angle in radians.
theta2 : dict, float, :class:`ExprRef`
The end angle of arc marks in radians. A value of 0 indicates up or “north”,
increasing values proceed clockwise.
time : dict, float, :class:`ExprRef`
timeUnitBandPosition : float
Default relative band position for a time unit. If set to ``0``, the marks will be
positioned at the beginning of the time unit band step. If set to ``0.5``, the marks
will be positioned in the middle of the time unit band step.
timeUnitBandSize : float
Default relative band size for a time unit. If set to ``1``, the bandwidth of the
marks will be equal to the time unit band step. If set to ``0.5``, bandwidth of the
marks will be half of the time unit band step.
tooltip : str, bool, dict, float, :class:`ExprRef`, :class:`TooltipContent`, None
The tooltip text string to show upon mouse hover or an object defining which fields
should the tooltip be derived from.
* If ``tooltip`` is ``true`` or ``{"content": "encoding"}``, then all fields from
``encoding`` will be used.
* If ``tooltip`` is ``{"content": "data"}``, then all fields that appear in the
highlighted data point will be used.
* If set to ``null`` or ``false``, then no tooltip will be used.
See the `tooltip <https://vega.github.io/vega-lite/docs/tooltip.html>`__
documentation for a detailed discussion about tooltip in Vega-Lite.
**Default value:** ``null``
url : str, dict, :class:`URI`, :class:`ExprRef`
The URL of the image file for image marks.
width : dict, float, :class:`ExprRef`
Width of the marks.
x : dict, float, :class:`ExprRef`, Literal['width']
X coordinates of the marks, or width of horizontal ``"bar"`` and ``"area"`` without
specified ``x2`` or ``width``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
x2 : dict, float, :class:`ExprRef`, Literal['width']
X2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
y : dict, float, :class:`ExprRef`, Literal['height']
Y coordinates of the marks, or height of vertical ``"bar"`` and ``"area"`` without
specified ``y2`` or ``height``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
y2 : dict, float, :class:`ExprRef`, Literal['height']
Y2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
"""
_schema = {"$ref": "#/definitions/AreaConfig"}
def __init__(
self,
align: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
angle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
ariaRole: Optional[str | Parameter | SchemaBase | Map] = Undefined,
ariaRoleDescription: Optional[str | Parameter | SchemaBase | Map] = Undefined,
aspect: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
baseline: Optional[Parameter | SchemaBase | Map | TextBaseline_T] = Undefined,
blend: Optional[Parameter | SchemaBase | Map | Blend_T] = Undefined,
color: Optional[str | Parameter | SchemaBase | Map | ColorName_T] = Undefined,
cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusBottomLeft: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cornerRadiusBottomRight: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cornerRadiusTopLeft: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusTopRight: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cursor: Optional[Parameter | SchemaBase | Map | Cursor_T] = Undefined,
description: Optional[str | Parameter | SchemaBase | Map] = Undefined,
dir: Optional[Parameter | SchemaBase | Map | TextDirection_T] = Undefined,
dx: Optional[float | Parameter | SchemaBase | Map] = Undefined,
dy: Optional[float | Parameter | SchemaBase | Map] = Undefined,
ellipsis: Optional[str | Parameter | SchemaBase | Map] = Undefined,
endAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
fill: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
fillOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
filled: Optional[bool] = Undefined,
font: Optional[str | Parameter | SchemaBase | Map] = Undefined,
fontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
fontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
fontWeight: Optional[Parameter | SchemaBase | Map | FontWeight_T] = Undefined,
height: Optional[float | Parameter | SchemaBase | Map] = Undefined,
href: Optional[str | Parameter | SchemaBase | Map] = Undefined,
innerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[Parameter | SchemaBase | Map | Interpolate_T] = Undefined,
invalid: Optional[SchemaBase | MarkInvalidDataMode_T | None] = Undefined,
limit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
line: Optional[bool | SchemaBase | Map] = Undefined,
lineBreak: Optional[str | Parameter | SchemaBase | Map] = Undefined,
lineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
opacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
order: Optional[bool | None] = Undefined,
orient: Optional[SchemaBase | Orientation_T] = Undefined,
outerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
padAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
point: Optional[bool | SchemaBase | Literal["transparent"] | Map] = Undefined,
radius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radius2: Optional[float | Parameter | SchemaBase | Map] = Undefined,
shape: Optional[str | Parameter | SchemaBase | Map] = Undefined,
size: Optional[float | Parameter | SchemaBase | Map] = Undefined,
smooth: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
startAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
stroke: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
strokeCap: Optional[Parameter | SchemaBase | Map | StrokeCap_T] = Undefined,
strokeDash: Optional[
Parameter | SchemaBase | Sequence[float] | Map
] = Undefined,
strokeDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeJoin: Optional[Parameter | SchemaBase | Map | StrokeJoin_T] = Undefined,
strokeMiterLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
tension: Optional[float | Parameter | SchemaBase | Map] = Undefined,
text: Optional[str | Parameter | SchemaBase | Sequence[str] | Map] = Undefined,
theta: Optional[float | Parameter | SchemaBase | Map] = Undefined,
theta2: Optional[float | Parameter | SchemaBase | Map] = Undefined,
time: Optional[float | Parameter | SchemaBase | Map] = Undefined,
timeUnitBandPosition: Optional[float] = Undefined,
timeUnitBandSize: Optional[float] = Undefined,
tooltip: Optional[
str | bool | float | Parameter | SchemaBase | Map | None
] = Undefined,
url: Optional[str | Parameter | SchemaBase | Map] = Undefined,
width: Optional[float | Parameter | SchemaBase | Map] = Undefined,
x: Optional[
float | Parameter | SchemaBase | Literal["width"] | Map
] = Undefined,
x2: Optional[
float | Parameter | SchemaBase | Literal["width"] | Map
] = Undefined,
y: Optional[
float | Parameter | SchemaBase | Literal["height"] | Map
] = Undefined,
y2: Optional[
float | Parameter | SchemaBase | Literal["height"] | Map
] = Undefined,
**kwds,
):
super().__init__(
align=align,
angle=angle,
aria=aria,
ariaRole=ariaRole,
ariaRoleDescription=ariaRoleDescription,
aspect=aspect,
baseline=baseline,
blend=blend,
color=color,
cornerRadius=cornerRadius,
cornerRadiusBottomLeft=cornerRadiusBottomLeft,
cornerRadiusBottomRight=cornerRadiusBottomRight,
cornerRadiusTopLeft=cornerRadiusTopLeft,
cornerRadiusTopRight=cornerRadiusTopRight,
cursor=cursor,
description=description,
dir=dir,
dx=dx,
dy=dy,
ellipsis=ellipsis,
endAngle=endAngle,
fill=fill,
fillOpacity=fillOpacity,
filled=filled,
font=font,
fontSize=fontSize,
fontStyle=fontStyle,
fontWeight=fontWeight,
height=height,
href=href,
innerRadius=innerRadius,
interpolate=interpolate,
invalid=invalid,
limit=limit,
line=line,
lineBreak=lineBreak,
lineHeight=lineHeight,
opacity=opacity,
order=order,
orient=orient,
outerRadius=outerRadius,
padAngle=padAngle,
point=point,
radius=radius,
radius2=radius2,
shape=shape,
size=size,
smooth=smooth,
startAngle=startAngle,
stroke=stroke,
strokeCap=strokeCap,
strokeDash=strokeDash,
strokeDashOffset=strokeDashOffset,
strokeJoin=strokeJoin,
strokeMiterLimit=strokeMiterLimit,
strokeOffset=strokeOffset,
strokeOpacity=strokeOpacity,
strokeWidth=strokeWidth,
tension=tension,
text=text,
theta=theta,
theta2=theta2,
time=time,
timeUnitBandPosition=timeUnitBandPosition,
timeUnitBandSize=timeUnitBandSize,
tooltip=tooltip,
url=url,
width=width,
x=x,
x2=x2,
y=y,
y2=y2,
**kwds,
)
|
AreaConfig
|
python
|
streamlit__streamlit
|
lib/setup.py
|
{
"start": 4944,
"end": 8176
}
|
class ____(install):
"""Custom command to verify that the git tag matches our version."""
description = "verify that the git tag matches our version"
def run(self):
tag = os.getenv("TAG")
if tag != VERSION:
info = f"Git tag: {tag} does not match the version of this app: {VERSION}"
sys.exit(info)
readme_path = THIS_DIRECTORY / ".." / "README.md"
if readme_path.exists():
long_description = readme_path.read_text(encoding="utf-8")
else:
# In some build environments (specifically in conda), we may not have the README file
# readily available. In these cases, just let long_description be the empty string.
# Note that long_description isn't used at all in these build environments, so it
# being missing isn't problematic.
long_description = ""
setup(
name="streamlit",
version=VERSION,
description="A faster way to build and share data apps",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://streamlit.io",
project_urls={
"Source Code": "https://github.com/streamlit/streamlit",
"Bug Tracker": "https://github.com/streamlit/streamlit/issues",
"Release notes": "https://docs.streamlit.io/develop/quick-reference/changelog",
"Documentation": "https://docs.streamlit.io/",
"Community": "https://discuss.streamlit.io/",
"Twitter": "https://twitter.com/streamlit",
},
author="Snowflake Inc",
author_email="hello@streamlit.io",
license="Apache License 2.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3.14",
"Topic :: Database :: Front-Ends",
"Topic :: Office/Business :: Financial :: Spreadsheet",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Topic :: Software Development :: Widget Sets",
],
python_requires=">=3.10",
# PEP 561: https://mypy.readthedocs.io/en/stable/installed_packages.html
package_data={"streamlit": ["py.typed", "hello/**/*.py"]},
packages=find_packages(exclude=["tests", "tests.*"]),
# Requirements
install_requires=INSTALL_REQUIRES,
extras_require=EXTRA_REQUIRES,
zip_safe=False, # install source files not egg
include_package_data=True, # copy html and friends
entry_points={"console_scripts": ["streamlit = streamlit.web.cli:main"]},
# For Windows so that streamlit * commands work ie.
# - streamlit version
# - streamlit hello
scripts=["bin/streamlit.cmd"],
cmdclass={
"verify": VerifyVersionCommand,
},
)
|
VerifyVersionCommand
|
python
|
ray-project__ray
|
rllib/connectors/learner/compute_returns_to_go.py
|
{
"start": 267,
"end": 2337
}
|
class ____(ConnectorV2):
"""Learner ConnectorV2 piece computing discounted returns to go till end of episode.
This ConnectorV2:
- Operates on a list of Episode objects (single- or multi-agent).
- Should be used only in the Learner pipeline as a preparation for an upcoming loss
computation that requires the discounted returns to go (until the end of the
episode).
- For each agent, for each episode and at each timestep, sums up the rewards
(discounted) until the end of the episode and assigns the results to a new
column: RETURNS_TO_GO in the batch.
"""
def __init__(
self,
input_observation_space=None,
input_action_space=None,
*,
gamma,
):
"""Initializes a ComputeReturnsToGo instance.
Args:
gamma: The discount factor gamma.
"""
super().__init__(input_observation_space, input_action_space)
self.gamma = gamma
def __call__(
self,
*,
rl_module: MultiRLModule,
episodes: List[EpisodeType],
batch: Dict[str, Any],
**kwargs,
):
for sa_episode in self.single_agent_episode_iterator(
episodes, agents_that_stepped_only=False
):
# Reverse the rewards sequence.
rewards_reversed = sa_episode.get_rewards()[::-1]
# Use lfilter to compute the discounted cumulative sums.
discounted_cumsum_reversed = scipy.signal.lfilter(
[1], [1, -self.gamma], rewards_reversed
)
# Reverse the result to get the correct order.
discounted_returns = discounted_cumsum_reversed[::-1]
# Add the results to the batch under a new column: RETURNS_TO_GO.
self.add_n_batch_items(
batch=batch,
column=Columns.RETURNS_TO_GO,
items_to_add=discounted_returns,
num_items=len(sa_episode),
single_agent_episode=sa_episode,
)
return batch
|
ComputeReturnsToGo
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol35.py
|
{
"start": 452,
"end": 549
}
|
class ____:
y: A
y: P1 = A(3)
# This should generate an error.
x: P2 = B(A(3))
z: P1 = A(3)
|
B
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/generators.py
|
{
"start": 671,
"end": 1310
}
|
class ____(BaseHashGenerator):
def hash(self):
length = oauth2_settings.CLIENT_SECRET_GENERATOR_LENGTH
chars = UNICODE_ASCII_CHARACTER_SET
return oauthlib_generate_client_id(length=length, chars=chars)
def generate_client_id():
"""
Generate a suitable client id
"""
client_id_generator = oauth2_settings.CLIENT_ID_GENERATOR_CLASS()
return client_id_generator.hash()
def generate_client_secret():
"""
Generate a suitable client secret
"""
client_secret_generator = oauth2_settings.CLIENT_SECRET_GENERATOR_CLASS()
return client_secret_generator.hash()
|
ClientSecretGenerator
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0074_backport_indexes.py
|
{
"start": 149,
"end": 785
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0073_remove_protected_privacy_level"),
]
operations = [
migrations.AlterField(
model_name="project",
name="modified_date",
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name="Modified date"),
),
migrations.AlterField(
model_name="project",
name="pub_date",
field=models.DateTimeField(
auto_now_add=True, db_index=True, verbose_name="Publication date"
),
),
]
|
Migration
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 462599,
"end": 463350
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of AddComment"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "comment_edge", "subject", "timeline_edge")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
comment_edge = sgqlc.types.Field("IssueCommentEdge", graphql_name="commentEdge")
"""The edge from the subject's comment connection."""
subject = sgqlc.types.Field(Node, graphql_name="subject")
"""The subject"""
timeline_edge = sgqlc.types.Field("IssueTimelineItemEdge", graphql_name="timelineEdge")
"""The edge from the subject's timeline connection."""
|
AddCommentPayload
|
python
|
spack__spack
|
lib/spack/spack/util/s3.py
|
{
"start": 5843,
"end": 6099
}
|
class ____(urllib.request.BaseHandler):
def s3_open(self, req):
orig_url = req.get_full_url()
url, headers, stream = _s3_open(orig_url, method=req.get_method())
return urllib.response.addinfourl(stream, headers, url)
|
UrllibS3Handler
|
python
|
huggingface__transformers
|
src/transformers/models/ernie/modular_ernie.py
|
{
"start": 22063,
"end": 25546
}
|
class ____(BertForNextSentencePrediction):
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
task_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], NextSentencePredictorOutput]:
r"""
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring). Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Example:
```python
>>> from transformers import AutoTokenizer, ErnieForNextSentencePrediction
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> model = ErnieForNextSentencePrediction.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
" `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
outputs = self.ernie(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
task_type_ids=task_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
ErnieForNextSentencePrediction
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/classVar4.py
|
{
"start": 369,
"end": 841
}
|
class ____(SomeProtocol):
y = 0
z = 0
def func1() -> None:
# Previously (prior to pyright 1.1.315), this generated an error
# because x was not explicitly declared as a ClassVar. This was changed
# to match mypy, which treats this as a normal class variable -- one that
# can be accessed as both a class an instance variable.
x: int = Class.x
# Same as above.
y: int = Class.y
z: int = Class.z
Class.meth1
Class.meth2
|
Class
|
python
|
getsentry__sentry
|
src/sentry_plugins/opsgenie/client.py
|
{
"start": 46,
"end": 796
}
|
class ____(ApiClient):
monitoring_tool = "sentry"
plugin_name = "opsgenie"
allow_redirects = False
def __init__(self, api_key, alert_url, recipients=None):
self.api_key = api_key
self.alert_url = alert_url
self.recipients = recipients
super().__init__()
def build_url(self, _path):
return self.alert_url
def request(self, data):
headers = {"Authorization": "GenieKey " + self.api_key}
return self._request(path="", method="post", data=data, headers=headers)
def trigger_incident(self, payload):
if self.recipients:
payload = payload.copy()
payload["recipients"] = self.recipients
return self.request(payload)
|
OpsGenieApiClient
|
python
|
modin-project__modin
|
modin/utils.py
|
{
"start": 2445,
"end": 31164
}
|
class ____(Protocol): # noqa: PR01
"""Structural type for objects with a ``_to_numpy`` method (note the leading underscore)."""
def _to_numpy(self) -> Any: # noqa: GL08
pass
MIN_RAY_VERSION = version.parse("2.10.0")
MIN_DASK_VERSION = version.parse("2.22.0")
MIN_UNIDIST_VERSION = version.parse("0.2.1")
PANDAS_API_URL_TEMPLATE = f"https://pandas.pydata.org/pandas-docs/version/{pandas.__version__}/reference/api/{{}}.html"
# The '__reduced__' name is used internally by the query compiler as a column name to
# represent pandas Series objects that are not explicitly assigned a name, so as to
# distinguish between an N-element series and 1xN dataframe.
MODIN_UNNAMED_SERIES_LABEL = "__reduced__"
def _make_api_url(token: str) -> str:
"""
Generate the link to pandas documentation.
Parameters
----------
token : str
Part of URL to use for generation.
Returns
-------
str
URL to pandas doc.
Notes
-----
This function is extracted for better testability.
"""
return PANDAS_API_URL_TEMPLATE.format(token)
def _get_indent(doc: str) -> int:
"""
Compute indentation in docstring.
Parameters
----------
doc : str
The docstring to compute indentation for.
Returns
-------
int
Minimal indent (excluding empty lines).
"""
indents = _get_indents(doc)
return min(indents) if indents else 0
def _get_indents(source: Union[list, str]) -> list:
"""
Compute indentation for each line of the source string.
Parameters
----------
source : str or list of str
String to compute indents for. Passed list considered
as a list of lines of the source string.
Returns
-------
list of ints
List containing computed indents for each line.
"""
indents = []
if not isinstance(source, list):
source = source.splitlines()
for line in source:
if not line.strip():
continue
for pos, ch in enumerate(line):
if ch != " ":
break
indents.append(pos)
return indents
def format_string(template: str, **kwargs: str) -> str:
"""
Insert passed values at the corresponding placeholders of the specified template.
In contrast with the regular ``str.format()`` this function computes proper
indents for the placeholder values.
Parameters
----------
template : str
Template to substitute values in.
**kwargs : dict
Dictionary that maps placeholder names with values.
Returns
-------
str
Formated string.
"""
# We want to change indentation only for those values which placeholders are located
# at the start of the line, in that case the placeholder sets an indentation
# that the filling value has to obey.
# RegExp determining placeholders located at the beginning of the line.
regex = r"^( *)\{(\w+)\}"
for line in template.splitlines():
if line.strip() == "":
continue
match = re.search(regex, line)
if match is None:
continue
nspaces = len(match.group(1))
key = match.group(2)
value = kwargs.get(key)
if not value:
continue
value = dedent(value)
# Since placeholder is located at the beginning of a new line,
# it already has '\n' before it, so to avoid double new lines
# we want to discard the first leading '\n' at the value line,
# the others leading '\n' are considered as being put on purpose
if value[0] == "\n":
value = value[1:]
# `.splitlines()` doesn't preserve last empty line,
# so we have to restore it further
value_lines = value.splitlines()
# We're not indenting the first line of the value, since it's already indented
# properly because of the placeholder indentation.
indented_lines = [
indent(line, " " * nspaces) if line != "\n" else line
for line in value_lines[1:]
]
# If necessary, restoring the last line dropped by `.splitlines()`
if value[-1] == "\n":
indented_lines += [" " * nspaces]
indented_value = "\n".join([value_lines[0], *indented_lines])
kwargs[key] = indented_value
return template.format(**kwargs)
def align_indents(source: str, target: str) -> str:
"""
Align indents of two strings.
Parameters
----------
source : str
Source string to align indents with.
target : str
Target string to align indents.
Returns
-------
str
Target string with indents aligned with the source.
"""
source_indent = _get_indent(source)
target = dedent(target)
return indent(target, " " * source_indent)
def append_to_docstring(message: str) -> Callable[[Fn], Fn]:
"""
Create a decorator which appends passed message to the function's docstring.
Parameters
----------
message : str
Message to append.
Returns
-------
callable
"""
def decorator(func: Fn) -> Fn:
to_append = align_indents(func.__doc__ or "", message)
return Appender(to_append)(func)
return decorator
def _replace_doc(
source_obj: object,
target_obj: object,
overwrite: bool,
apilink: Optional[Union[str, List[str]]],
parent_cls: Optional[Fn] = None,
attr_name: Optional[str] = None,
) -> None:
"""
Replace docstring in `target_obj`, possibly taking from `source_obj` and augmenting.
Can append the link to pandas API online documentation.
Parameters
----------
source_obj : object
Any object from which to take docstring from.
target_obj : object
The object which docstring to replace.
overwrite : bool
Forces replacing the docstring with the one from `source_obj` even
if `target_obj` has its own non-empty docstring.
apilink : str | List[str], optional
If non-empty, insert the link(s) to pandas API documentation.
Should be the prefix part in the URL template, e.g. "pandas.DataFrame".
parent_cls : class, optional
If `target_obj` is an attribute of a class, `parent_cls` should be that class.
This is used for generating the API URL as well as for handling special cases
like `target_obj` being a property or a cached_property.
attr_name : str, optional
Gives the name to `target_obj` if it's an attribute of `parent_cls`.
Needed to handle some special cases and in most cases could be determined automatically.
"""
if isinstance(target_obj, (staticmethod, classmethod)):
# we cannot replace docs on decorated objects, we must replace them
# on original functions instead
target_obj = target_obj.__func__
source_doc = source_obj.__doc__ or ""
target_doc = target_obj.__doc__ or ""
overwrite = overwrite or not target_doc
doc = source_doc if overwrite else target_doc
if doc == "":
# Empty docstrings do not need to be inherited
return
if parent_cls and not attr_name:
if isinstance(target_obj, property):
attr_name = target_obj.fget.__name__ # type: ignore[union-attr]
elif isinstance(target_obj, functools.cached_property):
attr_name = target_obj.func.__name__
elif isinstance(target_obj, (staticmethod, classmethod)):
attr_name = target_obj.__func__.__name__
else:
attr_name = target_obj.__name__ # type: ignore[attr-defined]
if (
source_doc.strip()
and apilink
and "pandas API documentation for " not in target_doc
and (not (attr_name or "").startswith("_"))
):
apilink_l = [apilink] if not isinstance(apilink, list) and apilink else apilink
links = []
for link in apilink_l:
if attr_name:
token = f"{link}.{attr_name}"
else:
token = link
url = _make_api_url(token)
links.append(f"`{token} <{url}>`_")
indent_line = " " * _get_indent(doc)
notes_section = f"\n{indent_line}Notes\n{indent_line}-----\n"
url_line = f"{indent_line}See pandas API documentation for {', '.join(links)} for more.\n"
notes_section_with_url = notes_section + url_line
if notes_section in doc:
doc = doc.replace(notes_section, notes_section_with_url)
else:
doc += notes_section_with_url
if parent_cls and isinstance(target_obj, property):
if overwrite:
target_obj.fget.__doc_inherited__ = True # type: ignore[union-attr]
assert attr_name is not None
setattr(
parent_cls,
attr_name,
property(target_obj.fget, target_obj.fset, target_obj.fdel, doc),
)
elif parent_cls and isinstance(target_obj, functools.cached_property):
if overwrite:
target_obj.func.__doc_inherited__ = True # type: ignore[attr-defined]
assert attr_name is not None
target_obj.func.__doc__ = doc
setattr(
parent_cls,
attr_name,
functools.cached_property(target_obj.func),
)
# otherwise: `TypeError: Cannot use cached_property instance without calling __set_name__ on it.`
getattr(parent_cls, attr_name).__set_name__(parent_cls, attr_name)
else:
if overwrite:
target_obj.__doc_inherited__ = True # type: ignore[attr-defined]
target_obj.__doc__ = doc
# This is a map from objects whose docstrings we are overriding to functions that
# take a DocModule string and override the docstring according to the
# DocModule. When we update DocModule, we can use this map to update all
# inherited docstrings.
_docstring_inheritance_calls: list[Callable[[str], None]] = []
# This is a set of (class, attribute_name) pairs whose docstrings we have
# already replaced since we last updated DocModule. Note that we don't store
# the attributes themselves since we replace property attributes instead of
# modifying them in place:
# https://github.com/modin-project/modin/blob/e9dbcc127913db77473a83936e8b6bb94ef84f0d/modin/utils.py#L353
_attributes_with_docstrings_replaced: set[tuple[type, str]] = set()
def _documentable_obj(obj: object) -> bool:
"""
Check whether we can replace the docstring of `obj`.
Parameters
----------
obj : object
Object whose docstring we want to replace.
Returns
-------
bool
Whether we can replace the docstring.
"""
return bool(
callable(obj)
and not inspect.isclass(obj)
or (isinstance(obj, property) and obj.fget)
or (isinstance(obj, functools.cached_property))
or (isinstance(obj, (staticmethod, classmethod)) and obj.__func__)
)
def _update_inherited_docstrings(doc_module: DocModule) -> None:
"""
Update all inherited docstrings.
Parameters
----------
doc_module : DocModule
The current DocModule.
"""
_attributes_with_docstrings_replaced.clear()
_doc_module = doc_module.get()
for doc_inheritance_call in _docstring_inheritance_calls:
doc_inheritance_call(doc_module=_doc_module) # type: ignore[call-arg]
def _inherit_docstrings_in_place(
cls_or_func: Fn,
doc_module: str,
parent: object,
excluded: List[object],
overwrite_existing: bool = False,
apilink: Optional[Union[str, List[str]]] = None,
) -> None:
"""
Replace `cls_or_func` docstrings with `parent` docstrings in place.
Parameters
----------
cls_or_func : Fn
The class or function whose docstrings we need to update.
doc_module : str
The docs module.
parent : object
Parent object from which the decorated object inherits __doc__.
excluded : list, default: []
List of parent objects from which the class does not
inherit docstrings.
overwrite_existing : bool, default: False
Allow overwriting docstrings that already exist in
the decorated class.
apilink : str | List[str], optional
If non-empty, insert the link(s) to pandas API documentation.
Should be the prefix part in the URL template, e.g. "pandas.DataFrame".
"""
# Import the docs module and get the class (e.g. `DataFrame`).
imported_doc_module = importlib.import_module(doc_module)
# Set the default parent so we can use it in case some docs are missing from
# parent module.
default_parent = parent
# Try to get the parent object from the doc module, and if it isn't there,
# get it from parent instead. We only do this if we are overriding pandas
# documentation. We don't touch other docs.
if doc_module != DocModule.default and "pandas" in str(
getattr(parent, "__module__", "")
):
parent_name = (
# DocModule should use the class BasePandasDataset to override the
# docstrings of BasePandasDataset, even if BasePandasDataset
# normally inherits docstrings from a different `parent`.
"BasePandasDataset"
if getattr(cls_or_func, "__name__", "") == "BasePandasDataset"
# For other classes, override docstrings with the class that has the
# same name as the `parent` class, e.g. DataFrame inherits
# docstrings from doc_module.DataFrame.
else getattr(parent, "__name__", "")
)
parent = getattr(imported_doc_module, parent_name, parent)
if parent != default_parent:
# Reset API link in case the docs are overridden.
apilink = None
overwrite_existing = True
if parent not in excluded:
_replace_doc(parent, cls_or_func, overwrite_existing, apilink)
if not isinstance(cls_or_func, types.FunctionType):
seen = set()
for base in cls_or_func.__mro__: # type: ignore[attr-defined]
if base is object:
continue
for attr, obj in base.__dict__.items():
# only replace docstrings once to prevent https://github.com/modin-project/modin/issues/7113
if attr in seen or (base, attr) in _attributes_with_docstrings_replaced:
continue
seen.add(attr)
if hasattr(obj, "_wrapped_superclass_method"):
# If this method originally comes from a superclass, we get
# docstrings directly from the wrapped superclass method
# rather than inheriting docstrings from the usual parent.
# For example, for BasePandasDataset and Series, the behavior is:
# - If Series inherits a method from BasePandasDataset, then
# it gets the docstring from that method in BasePandasDataset.
# - If Series overrides a method or defines its own method
# that's not present in BasePandasDataset, it follows the usual
# inheritance hierarchy of `parent` and `default_parent`.
parent_obj = obj._wrapped_superclass_method
else:
# Try to get the attribute from the docs class first, then
# from the default parent (pandas), and if it's not in either,
# set `parent_obj` to `None`.
parent_obj = getattr(
parent, attr, getattr(default_parent, attr, None)
)
if (
parent_obj in excluded
or not _documentable_obj(parent_obj)
or not _documentable_obj(obj)
):
continue
_replace_doc(
parent_obj,
obj,
overwrite_existing,
apilink,
parent_cls=base,
attr_name=attr,
)
_attributes_with_docstrings_replaced.add((base, attr))
def _inherit_docstrings(
parent: object,
excluded: List[object] = [],
overwrite_existing: bool = False,
apilink: Optional[Union[str, List[str]]] = None,
) -> Callable[[Fn], Fn]:
"""
Create a decorator which overwrites decorated object docstring(s).
It takes `parent` __doc__ attribute. Also overwrites __doc__ of
methods and properties defined in the target or its ancestors if it's a class
with the __doc__ of matching methods and properties from the `parent`.
Parameters
----------
parent : object
Parent object from which the decorated object inherits __doc__.
excluded : list, default: []
List of parent objects from which the class does not
inherit docstrings.
overwrite_existing : bool, default: False
Allow overwriting docstrings that already exist in
the decorated class.
apilink : str | List[str], optional
If non-empty, insert the link(s) to pandas API documentation.
Should be the prefix part in the URL template, e.g. "pandas.DataFrame".
Returns
-------
callable
Decorator which replaces the decorated object's documentation with `parent` documentation.
Notes
-----
Keep in mind that the function will override docstrings even for attributes which
are not defined in target class (but are defined in the ancestor class),
which means that ancestor class attribute docstrings could also change.
"""
def decorator(cls_or_func: Fn) -> Fn:
inherit_docstring_in_place = functools.partial(
_inherit_docstrings_in_place,
cls_or_func=cls_or_func,
parent=parent,
excluded=excluded,
overwrite_existing=overwrite_existing,
apilink=apilink,
)
inherit_docstring_in_place(doc_module=DocModule.get())
_docstring_inheritance_calls.append(inherit_docstring_in_place)
return cls_or_func
return decorator
DocModule.subscribe(_update_inherited_docstrings)
def expanduser_path_arg(argname: str) -> Callable[[Fn], Fn]:
"""
Decorate a function replacing its path argument with "user-expanded" value.
Parameters
----------
argname : str
Name of the argument which is containing a path to be expanded.
Returns
-------
callable
Decorator which performs the replacement.
"""
def decorator(func: Fn) -> Fn:
signature = inspect.signature(func)
assert (
getattr(signature.parameters.get(argname), "name", None) == argname
), f"Function {func} does not take '{argname}' as argument"
@functools.wraps(func)
def wrapped(*args: tuple, **kw: dict) -> Any:
params = signature.bind(*args, **kw)
if patharg := params.arguments.get(argname, None):
if isinstance(patharg, str) and patharg.startswith("~"):
params.arguments[argname] = os.path.expanduser(patharg)
elif isinstance(patharg, Path):
params.arguments[argname] = patharg.expanduser()
return func(*params.args, **params.kwargs)
return func(*args, **kw)
return wrapped # type: ignore[return-value]
return decorator
def func_from_deprecated_location(
func_name: str, module: str, deprecation_message: str
) -> Callable:
"""
Create a function that decorates a function ``module.func_name`` with a ``FutureWarning``.
Parameters
----------
func_name : str
Function name to decorate.
module : str
Module where the function is located.
deprecation_message : str
Message to print in a future warning.
Returns
-------
callable
"""
def deprecated_func(*args: tuple[Any], **kwargs: dict[Any, Any]) -> Any:
"""Call deprecated function."""
func = getattr(importlib.import_module(module), func_name)
# using 'FutureWarning' as 'DeprecationWarnings' are filtered out by default
warnings.warn(deprecation_message, FutureWarning)
return func(*args, **kwargs)
return deprecated_func
def hashable(obj: bool) -> bool:
"""
Return whether the `obj` is hashable.
Parameters
----------
obj : object
The object to check.
Returns
-------
bool
"""
# Happy path: if there's no __hash__ method, the object definitely isn't hashable
if not hasattr(obj, "__hash__"):
return False
# Otherwise, we may still need to check for type errors, as in the case of `hash(([],))`.
# (e.g. an unhashable object inside a tuple)
try:
hash(obj)
except TypeError:
return False
return True
def try_cast_to_pandas(obj: Any, squeeze: bool = False) -> Any:
"""
Convert `obj` and all nested objects from Modin to pandas if it is possible.
If no convertion possible return `obj`.
Parameters
----------
obj : object
Object to convert from Modin to pandas.
squeeze : bool, default: False
Squeeze the converted object(s) before returning them.
Returns
-------
object
Converted object.
"""
if isinstance(obj, SupportsPublicToPandas) or hasattr(obj, "modin"):
result = obj.modin.to_pandas() if hasattr(obj, "modin") else obj.to_pandas()
if squeeze:
result = result.squeeze(axis=1)
# QueryCompiler/low-level ModinFrame case, it doesn't have logic about convertion to Series
if (
isinstance(getattr(result, "name", None), str)
and result.name == MODIN_UNNAMED_SERIES_LABEL
):
result.name = None
return result
if isinstance(obj, (list, tuple)):
return type(obj)([try_cast_to_pandas(o, squeeze=squeeze) for o in obj])
if isinstance(obj, dict):
return {k: try_cast_to_pandas(v, squeeze=squeeze) for k, v in obj.items()}
if callable(obj):
module_hierarchy = getattr(obj, "__module__", "").split(".")
fn_name = getattr(obj, "__name__", None)
if fn_name and module_hierarchy[0] == "modin":
return (
getattr(pandas.DataFrame, fn_name, obj)
if module_hierarchy[-1] == "dataframe"
else getattr(pandas.Series, fn_name, obj)
)
return obj
def execute(*objs: Iterable[Any]) -> None:
"""
Trigger the lazy computations for each obj in `objs`, if any, and wait for them to complete.
Parameters
----------
*objs : Iterable[Any]
A collection of objects to trigger lazy computations.
"""
for obj in objs:
if not hasattr(obj, "_query_compiler"):
continue
query_compiler = obj._query_compiler
query_compiler.execute()
def wrap_into_list(*args: Any, skipna: bool = True) -> List[Any]:
"""
Wrap a sequence of passed values in a flattened list.
If some value is a list by itself the function appends its values
to the result one by one instead inserting the whole list object.
Parameters
----------
*args : tuple
Objects to wrap into a list.
skipna : bool, default: True
Whether or not to skip nan or None values.
Returns
-------
list
Passed values wrapped in a list.
"""
def isnan(o: Any) -> bool:
return o is None or (isinstance(o, float) and np.isnan(o))
res = []
for o in args:
if skipna and isnan(o):
continue
if isinstance(o, list):
res.extend(o)
else:
res.append(o)
return res
def wrap_udf_function(func: Callable) -> Callable:
"""
Create a decorator that makes `func` return pandas objects instead of Modin.
Parameters
----------
func : callable
Function to wrap.
Returns
-------
callable
"""
def wrapper(*args: Any, **kwargs: Any) -> Any:
result = func(*args, **kwargs)
# if user accidently returns modin DataFrame or Series
# casting it back to pandas to properly process
return try_cast_to_pandas(result)
wrapper.__name__ = func.__name__
return wrapper
def get_current_execution() -> str:
"""
Return current execution name as a string.
Returns
-------
str
Returns <StorageFormat>On<Engine>-like string.
"""
return f"{StorageFormat.get()}On{Engine.get()}"
def instancer(_class: Callable[[], T]) -> T:
"""
Create a dummy instance each time this is imported.
This serves the purpose of allowing us to use all of pandas plotting methods
without aliasing and writing each of them ourselves.
Parameters
----------
_class : object
Returns
-------
object
Instance of `_class`.
"""
return _class()
def import_optional_dependency(name: str, message: str) -> types.ModuleType:
"""
Import an optional dependecy.
Parameters
----------
name : str
The module name.
message : str
Additional text to include in the ImportError message.
Returns
-------
module : ModuleType
The imported module.
"""
try:
return importlib.import_module(name)
except ImportError:
raise ImportError(
f"Missing optional dependency '{name}'. {message} "
+ f"Use pip or conda to install {name}."
) from None
def _get_modin_deps_info() -> Mapping[str, Optional[JSONSerializable]]:
"""
Return Modin-specific dependencies information as a JSON serializable dictionary.
Returns
-------
Mapping[str, Optional[pandas.JSONSerializable]]
The dictionary of Modin dependencies and their versions.
"""
import modin # delayed import so modin.__init__ is fully initialized
result = {"modin": modin.__version__}
for pkg_name, pkg_version in [
("ray", MIN_RAY_VERSION),
("dask", MIN_DASK_VERSION),
("distributed", MIN_DASK_VERSION),
]:
try:
pkg = importlib.import_module(pkg_name)
except ImportError:
result[pkg_name] = None
else:
result[pkg_name] = pkg.__version__ + (
f" (outdated; >={pkg_version} required)"
if version.parse(pkg.__version__) < pkg_version
else ""
)
return result
def show_versions(as_json: Union[str, bool] = False) -> None:
"""
Provide useful information, important for bug reports.
It comprises info about hosting operation system, pandas version,
and versions of other installed relative packages.
Parameters
----------
as_json : str or bool, default: False
* If False, outputs info in a human readable form to the console.
* If str, it will be considered as a path to a file.
Info will be written to that file in JSON format.
* If True, outputs info in JSON format to the console.
Notes
-----
This is mostly a copy of pandas.show_versions() but adds separate listing
of Modin-specific dependencies.
"""
sys_info = _get_sys_info()
sys_info["commit"] = get_versions()["full-revisionid"]
modin_deps = _get_modin_deps_info()
deps = _get_dependency_info()
if as_json:
j = {
"system": sys_info,
"modin dependencies": modin_deps,
"dependencies": deps,
}
if as_json is True:
sys.stdout.writelines(json.dumps(j, indent=2))
else:
assert isinstance(as_json, str) # needed for mypy
with codecs.open(as_json, "wb", encoding="utf8") as f:
json.dump(j, f, indent=2)
else:
assert isinstance(sys_info["LOCALE"], dict) # needed for mypy
language_code = sys_info["LOCALE"]["language-code"]
encoding = sys_info["LOCALE"]["encoding"]
sys_info["LOCALE"] = f"{language_code}.{encoding}"
maxlen = max(max(len(x) for x in d) for d in (deps, modin_deps))
print("\nINSTALLED VERSIONS\n------------------") # noqa: T201
for k, v in sys_info.items():
print(f"{k:<{maxlen}}: {v}") # noqa: T201
for name, d in (("Modin", modin_deps), ("pandas", deps)):
print(f"\n{name} dependencies\n{'-' * (len(name) + 13)}") # noqa: T201
for k, v in d.items():
print(f"{k:<{maxlen}}: {v}") # noqa: T201
|
SupportsPrivateToNumPy
|
python
|
pytorch__pytorch
|
torch/distributions/generalized_pareto.py
|
{
"start": 259,
"end": 5881
}
|
class ____(Distribution):
r"""
Creates a Generalized Pareto distribution parameterized by :attr:`loc`, :attr:`scale`, and :attr:`concentration`.
The Generalized Pareto distribution is a family of continuous probability distributions on the real line.
Special cases include Exponential (when :attr:`loc` = 0, :attr:`concentration` = 0), Pareto (when :attr:`concentration` > 0,
:attr:`loc` = :attr:`scale` / :attr:`concentration`), and Uniform (when :attr:`concentration` = -1).
This distribution is often used to model the tails of other distributions. This implementation is based on the
implementation in TensorFlow Probability.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = GeneralizedPareto(torch.tensor([0.1]), torch.tensor([2.0]), torch.tensor([0.4]))
>>> m.sample() # sample from a Generalized Pareto distribution with loc=0.1, scale=2.0, and concentration=0.4
tensor([ 1.5623])
Args:
loc (float or Tensor): Location parameter of the distribution
scale (float or Tensor): Scale parameter of the distribution
concentration (float or Tensor): Concentration parameter of the distribution
"""
# pyrefly: ignore [bad-override]
arg_constraints = {
"loc": constraints.real,
"scale": constraints.positive,
"concentration": constraints.real,
}
has_rsample = True
def __init__(self, loc, scale, concentration, validate_args=None):
self.loc, self.scale, self.concentration = broadcast_all(
loc, scale, concentration
)
if (
isinstance(loc, Number)
and isinstance(scale, Number)
and isinstance(concentration, Number)
):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super().__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(GeneralizedPareto, _instance)
batch_shape = torch.Size(batch_shape)
new.loc = self.loc.expand(batch_shape)
new.scale = self.scale.expand(batch_shape)
new.concentration = self.concentration.expand(batch_shape)
super(GeneralizedPareto, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
u = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device)
return self.icdf(u)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
z = self._z(value)
eq_zero = torch.isclose(self.concentration, torch.tensor(0.0))
safe_conc = torch.where(
eq_zero, torch.ones_like(self.concentration), self.concentration
)
y = 1 / safe_conc + torch.ones_like(z)
where_nonzero = torch.where(y == 0, y, y * torch.log1p(safe_conc * z))
log_scale = (
math.log(self.scale) if isinstance(self.scale, Real) else self.scale.log()
)
return -log_scale - torch.where(eq_zero, z, where_nonzero)
def log_survival_function(self, value):
if self._validate_args:
self._validate_sample(value)
z = self._z(value)
eq_zero = torch.isclose(self.concentration, torch.tensor(0.0))
safe_conc = torch.where(
eq_zero, torch.ones_like(self.concentration), self.concentration
)
where_nonzero = -torch.log1p(safe_conc * z) / safe_conc
return torch.where(eq_zero, -z, where_nonzero)
def log_cdf(self, value):
return torch.log1p(-torch.exp(self.log_survival_function(value)))
def cdf(self, value):
return torch.exp(self.log_cdf(value))
def icdf(self, value):
loc = self.loc
scale = self.scale
concentration = self.concentration
eq_zero = torch.isclose(concentration, torch.zeros_like(concentration))
safe_conc = torch.where(eq_zero, torch.ones_like(concentration), concentration)
logu = torch.log1p(-value)
where_nonzero = loc + scale / safe_conc * torch.expm1(-safe_conc * logu)
where_zero = loc - scale * logu
return torch.where(eq_zero, where_zero, where_nonzero)
def _z(self, x):
return (x - self.loc) / self.scale
@property
def mean(self):
concentration = self.concentration
valid = concentration < 1
safe_conc = torch.where(valid, concentration, 0.5)
result = self.loc + self.scale / (1 - safe_conc)
return torch.where(valid, result, nan)
@property
def variance(self):
concentration = self.concentration
valid = concentration < 0.5
safe_conc = torch.where(valid, concentration, 0.25)
# pyrefly: ignore [unsupported-operation]
result = self.scale**2 / ((1 - safe_conc) ** 2 * (1 - 2 * safe_conc))
return torch.where(valid, result, nan)
def entropy(self):
ans = torch.log(self.scale) + self.concentration + 1
return torch.broadcast_to(ans, self._batch_shape)
@property
def mode(self):
return self.loc
@constraints.dependent_property(is_discrete=False, event_dim=0)
# pyrefly: ignore [bad-override]
def support(self):
lower = self.loc
upper = torch.where(
self.concentration < 0, lower - self.scale / self.concentration, inf
)
return constraints.interval(lower, upper)
|
GeneralizedPareto
|
python
|
lepture__authlib
|
authlib/oauth1/rfc5849/errors.py
|
{
"start": 1850,
"end": 1948
}
|
class ____(OAuth1Error):
error = "invalid_signature"
status_code = 401
|
InvalidSignatureError
|
python
|
pytorch__pytorch
|
torch/_dynamo/symbolic_convert.py
|
{
"start": 177750,
"end": 190135
}
|
class ____(InstructionTranslatorBase):
@staticmethod
def current_tx() -> InstructionTranslator:
return tls.current_tx
@contextlib.contextmanager
def set_current_tx(self) -> Any:
prior = getattr(tls, "current_tx", None)
tls.current_tx = self
try:
yield
finally:
tls.current_tx = prior
def __init__(
self,
instructions: list[Instruction],
f_code: types.CodeType,
f_locals: dict[str, Any],
f_globals: dict[str, Any],
f_builtins: dict[str, Any],
closure: Optional[tuple[Any, ...]],
torch_function_mode_stack: Any,
code_options: dict[str, Any],
compiler_fn: Any,
one_graph: bool,
export: bool,
export_constraints: Any,
frame_state: Any,
speculation_log: SpeculationLog,
exn_vt_stack: ExceptionStack,
distributed_state: Optional[DistributedState],
package: Optional[CompilePackage],
) -> None:
_step_logger()(
logging.INFO,
f"torchdynamo start tracing {f_code.co_name} {code_options['co_filename']}:{code_options['co_firstlineno']}",
)
super().__init__(
output=OutputGraph(
code_options,
compiler_fn,
self,
export,
export_constraints,
frame_state,
local_scope=f_locals,
global_scope=f_globals,
f_code=f_code,
torch_function_mode_stack=torch_function_mode_stack,
one_graph=one_graph,
package=package,
),
instructions=instructions,
f_locals=f_locals,
f_globals=f_globals,
f_builtins=f_builtins,
closure=closure,
code_options=code_options,
symbolic_locals={}, # set below
# A global var is inserted only after a STORE_GLOBAL happens to it
symbolic_globals={},
symbolic_torch_function_state=None, # type: ignore[arg-type] # set below
symbolic_stream_state=None, # type: ignore[arg-type] # set below
f_code=f_code,
export=export,
inline_depth=0,
speculation_log=speculation_log,
exn_vt_stack=exn_vt_stack,
distributed_state=distributed_state,
package=package,
)
self._throw_if_in_functorch()
# as soon as we create the tracing context we should keep it active, so any calls
# into dynamo apis can rely on finding it
with tracing(self.output.tracing_context), self.set_current_tx():
self.one_graph: bool = one_graph
self.export = export
if self.export:
assert self.one_graph, (
"Export without one graph - something has gone wrong."
)
self.symbolic_locals = {}
# Populate `symbolic_locals` with non-cell variables.
cell_and_freevars: set[str] = set(self.cell_and_freevars())
dynamism = code_context.get_context(f_code).get("dynamism", None)
for name, value in f_locals.items():
if name not in cell_and_freevars:
local_dynamism = None
if dynamism:
local_dynamism = frozenset(dynamism.get(name, {}).items())
var = LazyVariableTracker.create(
value,
LocalSource(
name,
is_input=True,
dynamism=local_dynamism,
),
)
self.symbolic_locals[name] = var
# Populate `symbolic_locals` with cells created by this frame,
# effectively implementing the `MAKE_CELL` instructions.
side_effects = self.output.side_effects
for name in self.cellvars():
if name in f_locals:
# This models cells that are also function inputs.
value = f_locals[name]
# NOTE: root frame inputs that are captured by a nested
# function become special cell objects -- they exist in
# `f_locals` as contents of the cells, rather than the cells
# objects themselves.
#
# In Dynamo, we choose to represent such input cell objects
# as newly created (rather than pre-existing) cell objects,
# because
#
# 1. The reason for representing a pre-existing cell object
# is to emit guard or codegen mutations. However, local
# cells should never be used for guards. Moreover, at this
# point these input cell objects should've never been
# accessed by anyone else, since Dynamo intercepts the frame
# right after its evaluation starts, i.e., right after these
# cell objects are created. So they should have no external
# reference, meaning no mutation needs to be propagated.
#
# 2. This conveniently allows codegen to prune away
# mutations to these cells, unless they escape the frame.
contents_source = LocalSource(
name, is_input=True, is_derefed_cell_contents=True
)
contents_var: VariableTracker = LazyVariableTracker.create(
value, contents_source
)
cell_var = side_effects.track_cell_new()
side_effects.store_cell(cell_var, contents_var)
else:
cell_var = side_effects.track_cell_new()
cell_var.local_name = name # type: ignore[attr-defined]
self.symbolic_locals[name] = cell_var
# Populate `symbolic_locals` with cells captured by this frame,
# effectively implementing the `COPY_FREE_VARS` instruction.
assert closure is not None
for name, cell in zip(self.freevars(), closure):
cell_source = LocalCellSource(name)
contents_source = LocalSource(name, is_derefed_cell_contents=True)
try:
contents_var = LazyVariableTracker.create(
cell.cell_contents, contents_source
)
except ValueError:
# Cell has not yet been assigned
contents_var = variables.DeletedVariable()
cell_var = side_effects.track_cell_existing(
cell_source, cell, contents_var
)
cell_var.local_name = name # type: ignore[attr-defined]
self.symbolic_locals[name] = cell_var
self.symbolic_torch_function_state = SymbolicTorchFunctionState(
torch_function_mode_stack
)
self.symbolic_stream_state = SymbolicStreamState()
if export:
# export gets confused if we never realize unused inputs
# in export mode just eagerly realize everything
self.symbolic_locals = variables.LazyVariableTracker.realize_all(
self.symbolic_locals
)
def _throw_if_in_functorch(self) -> None:
# Fallback to eager in case of a graph break inside vmap
eager = torch._dynamo.lookup_backend("eager")
compiler_fn = inspect.getattr_static(
self.output.compiler_fn, "compiler_fn", self.output.compiler_fn
)
ci = torch._C._functorch.peek_interpreter_stack()
forbidden_keys = (
torch._C._functorch.TransformType.Vmap,
torch._C._functorch.TransformType.Grad,
torch._C._functorch.TransformType.Jvp,
)
if ci is not None and ci.key() in forbidden_keys and compiler_fn is not eager:
name = ci.key().name.lower()
msg = (
"If you are reaching here, it means dynamo failed for one of the following reasons:\n"
# Calling a torch.compiled function
f"- Calling torch.func.{name}(compiled_fn) function from eager mode is not supported. "
f"Ensure that torch.func.{name} is also wrapped within a torch.compile function. "
"For more information, see PyTorch issue #128711.\n"
# if it reaches here, it means Dynamo failed to inline a functorch function
f"- torch.func.{name}(fn) requires the function to be inlined by dynamo"
)
unimplemented(
gb_type="Unsupported functorch tracing attempt",
context="",
explanation=msg,
hints=[],
)
def get_example_value(self, source: Source) -> Any:
if isinstance(source, LocalSource):
return self.f_locals[source.local_name]
if isinstance(source, GlobalSource):
return self.f_globals[source.global_name]
raise KeyError
def symbolic_locals_contain_module_class(self) -> bool:
for v in self.symbolic_locals.values():
if isinstance(v, UserDefinedClassVariable) and issubclass(
v.as_python_constant(), torch.nn.Module
):
return True
return False
def replace_tos_if_return_is_generator(self) -> None:
if (
len(self.stack)
and (tos := self.stack[-1])
and isinstance(tos, LocalGeneratorObjectVariable)
):
self.stack[-1] = ListIteratorVariable(
tos.force_unpack_var_sequence(self),
mutation_type=ValueMutationNew(),
)
def _return(self, inst: Instruction) -> None:
self.replace_tos_if_return_is_generator()
assert self.instruction_pointer is not None
assert self.start_point is not None
get_metrics_context().increment(
"ir_count", self.instruction_pointer - self.start_point
)
if (
not config.allow_empty_graphs
and self.output.count_calls() == 0
and not self.inconsistent_side_effects
and not self.symbolic_locals_contain_module_class()
and not self.export
and not self.one_graph
and not self.error_on_graph_break
and not self.is_tracing_resume_prologue
):
raise exc.SkipFrame(
format_skip_frame_message(self.f_code, "no content in function call")
)
self.instruction_pointer = None
_step_logger()(
logging.INFO,
f"torchdynamo done tracing {self.f_code.co_name} ({inst.opname})",
)
log.debug("return triggered compile")
all_stack_locals_metadata = self.output.compile_subgraph(
self,
reason=GraphCompileReason(
"return_value", [self.frame_summary()], graph_break=False
),
# the value to be returned
stack_pops=1 if inst.opname == "RETURN_VALUE" else 0,
)
# check that our stack/locals meta are correct:
# we should only be tracing 1 frame, and there should not be any NULLs on the stack
assert len(all_stack_locals_metadata) == 1
assert not all_stack_locals_metadata[0].stack_null_idxes
self.output.add_output_instructions(
self.codegen_return_with_pops(inst, all_stack_locals_metadata[0].num_stack)
)
raise ReturnValueOp
def RETURN_VALUE(self, inst: Instruction) -> None:
self._return(inst)
def RETURN_CONST(self, inst: Instruction) -> None:
self._return(inst)
if sys.version_info >= (3, 11):
_binary_op_lookup = [
getattr(
InstructionTranslator,
opname[3:] if "INPLACE" in opname else f"BINARY_{opname[3:]}",
)
for opname, _ in dis._nb_ops # type: ignore[attr-defined]
]
|
InstructionTranslator
|
python
|
django__django
|
tests/admin_inlines/admin.py
|
{
"start": 7765,
"end": 8227
}
|
class ____(forms.ModelForm):
class Meta:
fields = "__all__"
model = SomeChildModel
widgets = {
"position": forms.HiddenInput,
}
labels = {"readonly_field": "Label from ModelForm.Meta"}
help_texts = {"readonly_field": "Help text from ModelForm.Meta"}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["name"].label = "new label"
|
SomeChildModelForm
|
python
|
ray-project__ray
|
rllib/utils/replay_buffers/multi_agent_mixin_replay_buffer.py
|
{
"start": 911,
"end": 16712
}
|
class ____(MultiAgentPrioritizedReplayBuffer):
"""This buffer adds replayed samples to a stream of new experiences.
- Any newly added batch (`add()`) is immediately returned upon
the next `sample` call (close to on-policy) as well as being moved
into the buffer.
- Additionally, a certain number of old samples is mixed into the
returned sample according to a given "replay ratio".
- If >1 calls to `add()` are made without any `sample()` calls
in between, all newly added batches are returned (plus some older samples
according to the "replay ratio").
.. testcode::
:skipif: True
# replay ratio 0.66 (2/3 replayed, 1/3 new samples):
buffer = MultiAgentMixInReplayBuffer(capacity=100,
replay_ratio=0.66)
buffer.add(<A>)
buffer.add(<B>)
buffer.sample(1)
.. testoutput::
..[<A>, <B>, <B>]
.. testcode::
:skipif: True
buffer.add(<C>)
buffer.sample(1)
.. testoutput::
[<C>, <A>, <B>]
or: [<C>, <A>, <A>], [<C>, <B>, <A>] or [<C>, <B>, <B>],
but always <C> as it is the newest sample
.. testcode::
:skipif: True
buffer.add(<D>)
buffer.sample(1)
.. testoutput::
[<D>, <A>, <C>]
or [<D>, <A>, <A>], [<D>, <B>, <A>] or [<D>, <B>, <C>], etc..
but always <D> as it is the newest sample
.. testcode::
:skipif: True
# replay proportion 0.0 -> replay disabled:
buffer = MixInReplay(capacity=100, replay_ratio=0.0)
buffer.add(<A>)
buffer.sample()
.. testoutput::
[<A>]
.. testcode::
:skipif: True
buffer.add(<B>)
buffer.sample()
.. testoutput::
[<B>]
"""
def __init__(
self,
capacity: int = 10000,
storage_unit: str = "timesteps",
num_shards: int = 1,
replay_mode: str = "independent",
replay_sequence_override: bool = True,
replay_sequence_length: int = 1,
replay_burn_in: int = 0,
replay_zero_init_states: bool = True,
replay_ratio: float = 0.66,
underlying_buffer_config: dict = None,
prioritized_replay_alpha: float = 0.6,
prioritized_replay_beta: float = 0.4,
prioritized_replay_eps: float = 1e-6,
**kwargs,
):
"""Initializes MultiAgentMixInReplayBuffer instance.
Args:
capacity: The capacity of the buffer, measured in `storage_unit`.
storage_unit: Either 'timesteps', 'sequences' or
'episodes'. Specifies how experiences are stored. If they
are stored in episodes, replay_sequence_length is ignored.
num_shards: The number of buffer shards that exist in total
(including this one).
replay_mode: One of "independent" or "lockstep". Determines,
whether batches are sampled independently or to an equal
amount.
replay_sequence_override: If True, ignore sequences found in incoming
batches, slicing them into sequences as specified by
`replay_sequence_length` and `replay_sequence_burn_in`. This only has
an effect if storage_unit is `sequences`.
replay_sequence_length: The sequence length (T) of a single
sample. If > 1, we will sample B x T from this buffer. This
only has an effect if storage_unit is 'timesteps'.
replay_burn_in: The burn-in length in case
`replay_sequence_length` > 0. This is the number of timesteps
each sequence overlaps with the previous one to generate a
better internal state (=state after the burn-in), instead of
starting from 0.0 each RNN rollout.
replay_zero_init_states: Whether the initial states in the
buffer (if replay_sequence_length > 0) are alwayas 0.0 or
should be updated with the previous train_batch state outputs.
replay_ratio: Ratio of replayed samples in the returned
batches. E.g. a ratio of 0.0 means only return new samples
(no replay), a ratio of 0.5 means always return newest sample
plus one old one (1:1), a ratio of 0.66 means always return
the newest sample plus 2 old (replayed) ones (1:2), etc...
underlying_buffer_config: A config that contains all necessary
constructor arguments and arguments for methods to call on
the underlying buffers. This replaces the standard behaviour
of the underlying PrioritizedReplayBuffer. The config
follows the conventions of the general
replay_buffer_config. kwargs for subsequent calls of methods
may also be included. Example:
"replay_buffer_config": {"type": PrioritizedReplayBuffer,
"capacity": 10, "storage_unit": "timesteps",
prioritized_replay_alpha: 0.5, prioritized_replay_beta: 0.5,
prioritized_replay_eps: 0.5}
prioritized_replay_alpha: Alpha parameter for a prioritized
replay buffer. Use 0.0 for no prioritization.
prioritized_replay_beta: Beta parameter for a prioritized
replay buffer.
prioritized_replay_eps: Epsilon parameter for a prioritized
replay buffer.
**kwargs: Forward compatibility kwargs.
"""
if not 0 <= replay_ratio <= 1:
raise ValueError("Replay ratio must be within [0, 1]")
MultiAgentPrioritizedReplayBuffer.__init__(
self,
capacity=capacity,
storage_unit=storage_unit,
num_shards=num_shards,
replay_mode=replay_mode,
replay_sequence_override=replay_sequence_override,
replay_sequence_length=replay_sequence_length,
replay_burn_in=replay_burn_in,
replay_zero_init_states=replay_zero_init_states,
underlying_buffer_config=underlying_buffer_config,
prioritized_replay_alpha=prioritized_replay_alpha,
prioritized_replay_beta=prioritized_replay_beta,
prioritized_replay_eps=prioritized_replay_eps,
**kwargs,
)
self.replay_ratio = replay_ratio
self.last_added_batches = collections.defaultdict(list)
@DeveloperAPI
@override(MultiAgentPrioritizedReplayBuffer)
def add(self, batch: SampleBatchType, **kwargs) -> None:
"""Adds a batch to the appropriate policy's replay buffer.
Turns the batch into a MultiAgentBatch of the DEFAULT_POLICY_ID if
it is not a MultiAgentBatch. Subsequently, adds the individual policy
batches to the storage.
Args:
batch: The batch to be added.
**kwargs: Forward compatibility kwargs.
"""
# Make a copy so the replay buffer doesn't pin plasma memory.
batch = batch.copy()
# Handle everything as if multi-agent.
batch = batch.as_multi_agent()
kwargs = merge_dicts_with_warning(self.underlying_buffer_call_args, kwargs)
pids_and_batches = self._maybe_split_into_policy_batches(batch)
# We need to split batches into timesteps, sequences or episodes
# here already to properly keep track of self.last_added_batches
# underlying buffers should not split up the batch any further
with self.add_batch_timer:
if self.storage_unit == StorageUnit.TIMESTEPS:
for policy_id, sample_batch in pids_and_batches.items():
timeslices = sample_batch.timeslices(1)
for time_slice in timeslices:
self.replay_buffers[policy_id].add(time_slice, **kwargs)
self.last_added_batches[policy_id].append(time_slice)
elif self.storage_unit == StorageUnit.SEQUENCES:
for policy_id, sample_batch in pids_and_batches.items():
timeslices = timeslice_along_seq_lens_with_overlap(
sample_batch=sample_batch,
seq_lens=sample_batch.get(SampleBatch.SEQ_LENS)
if self.replay_sequence_override
else None,
zero_pad_max_seq_len=self.replay_sequence_length,
pre_overlap=self.replay_burn_in,
zero_init_states=self.replay_zero_init_states,
)
for slice in timeslices:
self.replay_buffers[policy_id].add(slice, **kwargs)
self.last_added_batches[policy_id].append(slice)
elif self.storage_unit == StorageUnit.EPISODES:
for policy_id, sample_batch in pids_and_batches.items():
for eps in sample_batch.split_by_episode():
# Only add full episodes to the buffer
if eps.get(SampleBatch.T)[0] == 0 and (
eps.get(SampleBatch.TERMINATEDS, [True])[-1]
or eps.get(SampleBatch.TRUNCATEDS, [False])[-1]
):
self.replay_buffers[policy_id].add(eps, **kwargs)
self.last_added_batches[policy_id].append(eps)
else:
if log_once("only_full_episodes"):
logger.info(
"This buffer uses episodes as a storage "
"unit and thus allows only full episodes "
"to be added to it. Some samples may be "
"dropped."
)
elif self.storage_unit == StorageUnit.FRAGMENTS:
for policy_id, sample_batch in pids_and_batches.items():
self.replay_buffers[policy_id].add(sample_batch, **kwargs)
self.last_added_batches[policy_id].append(sample_batch)
self._num_added += batch.count
@DeveloperAPI
@override(MultiAgentReplayBuffer)
def sample(
self, num_items: int, policy_id: PolicyID = DEFAULT_POLICY_ID, **kwargs
) -> Optional[SampleBatchType]:
"""Samples a batch of size `num_items` from a specified buffer.
Concatenates old samples to new ones according to
self.replay_ratio. If not enough new samples are available, mixes in
less old samples to retain self.replay_ratio on average. Returns
an empty batch if there are no items in the buffer.
Args:
num_items: Number of items to sample from this buffer.
policy_id: ID of the policy that produced the experiences to be
sampled.
**kwargs: Forward compatibility kwargs.
Returns:
Concatenated MultiAgentBatch of items.
"""
# Merge kwargs, overwriting standard call arguments
kwargs = merge_dicts_with_warning(self.underlying_buffer_call_args, kwargs)
def mix_batches(_policy_id):
"""Mixes old with new samples.
Tries to mix according to self.replay_ratio on average.
If not enough new samples are available, mixes in less old samples
to retain self.replay_ratio on average.
"""
def round_up_or_down(value, ratio):
"""Returns an integer averaging to value*ratio."""
product = value * ratio
ceil_prob = product % 1
if random.uniform(0, 1) < ceil_prob:
return int(np.ceil(product))
else:
return int(np.floor(product))
max_num_new = round_up_or_down(num_items, 1 - self.replay_ratio)
# if num_samples * self.replay_ratio is not round,
# we need one more sample with a probability of
# (num_items*self.replay_ratio) % 1
_buffer = self.replay_buffers[_policy_id]
output_batches = self.last_added_batches[_policy_id][:max_num_new]
self.last_added_batches[_policy_id] = self.last_added_batches[_policy_id][
max_num_new:
]
# No replay desired
if self.replay_ratio == 0.0:
return concat_samples_into_ma_batch(output_batches)
# Only replay desired
elif self.replay_ratio == 1.0:
return _buffer.sample(num_items, **kwargs)
num_new = len(output_batches)
if np.isclose(num_new, num_items * (1 - self.replay_ratio)):
# The optimal case, we can mix in a round number of old
# samples on average
num_old = num_items - max_num_new
else:
# We never want to return more elements than num_items
num_old = min(
num_items - max_num_new,
round_up_or_down(
num_new, self.replay_ratio / (1 - self.replay_ratio)
),
)
output_batches.append(_buffer.sample(num_old, **kwargs))
# Depending on the implementation of underlying buffers, samples
# might be SampleBatches
output_batches = [batch.as_multi_agent() for batch in output_batches]
return concat_samples_into_ma_batch(output_batches)
def check_buffer_is_ready(_policy_id):
if (
(len(self.replay_buffers[policy_id]) == 0) and self.replay_ratio > 0.0
) or (
len(self.last_added_batches[_policy_id]) == 0
and self.replay_ratio < 1.0
):
return False
return True
with self.replay_timer:
samples = []
if self.replay_mode == ReplayMode.LOCKSTEP:
assert (
policy_id is None
), "`policy_id` specifier not allowed in `lockstep` mode!"
if check_buffer_is_ready(_ALL_POLICIES):
samples.append(mix_batches(_ALL_POLICIES).as_multi_agent())
elif policy_id is not None:
if check_buffer_is_ready(policy_id):
samples.append(mix_batches(policy_id).as_multi_agent())
else:
for policy_id, replay_buffer in self.replay_buffers.items():
if check_buffer_is_ready(policy_id):
samples.append(mix_batches(policy_id).as_multi_agent())
return concat_samples_into_ma_batch(samples)
@DeveloperAPI
@override(MultiAgentPrioritizedReplayBuffer)
def get_state(self) -> Dict[str, Any]:
"""Returns all local state.
Returns:
The serializable local state.
"""
data = {
"last_added_batches": self.last_added_batches,
}
parent = MultiAgentPrioritizedReplayBuffer.get_state(self)
parent.update(data)
return parent
@DeveloperAPI
@override(MultiAgentPrioritizedReplayBuffer)
def set_state(self, state: Dict[str, Any]) -> None:
"""Restores all local state to the provided `state`.
Args:
state: The new state to set this buffer. Can be obtained by
calling `self.get_state()`.
"""
self.last_added_batches = state["last_added_batches"]
MultiAgentPrioritizedReplayBuffer.set_state(state)
|
MultiAgentMixInReplayBuffer
|
python
|
google__pytype
|
pytype/abstract/function.py
|
{
"start": 35320,
"end": 48573
}
|
class ____(_ReturnType):
"""A PyTD return type."""
def __init__(
self,
t: _base.BaseValue,
subst: datatypes.AliasingDict[str, cfg.Variable],
sources: list[cfg.Binding],
ctx: "context.Context",
) -> None:
self._type = t
self._subst = subst
self._sources = sources
self._ctx = ctx
@property
def name(self) -> str:
return self._type.name
def instantiate_parameter(
self, node: cfg.CFGNode, param_name: str
) -> cfg.Variable:
_, instance_var = self.instantiate(node)
instance = abstract_utils.get_atomic_value(instance_var)
return instance.get_instance_type_parameter(param_name)
def instantiate(self, node: cfg.CFGNode) -> tuple[cfg.CFGNode, cfg.Variable]:
"""Instantiate the pytd return type."""
# Type parameter values, which are instantiated by the matcher, will end up
# in the return value. Since the matcher does not call __init__, we need to
# do that now. The one exception is that Type[X] does not instantiate X, so
# we do not call X.__init__.
if self._type.name != "builtins.type":
for param in pytd_utils.GetTypeParameters(self._type):
if param.full_name in self._subst:
node = self._ctx.vm.call_init(node, self._subst[param.full_name])
try:
ret = self._ctx.convert.constant_to_var(
abstract_utils.AsReturnValue(self._type),
self._subst,
node,
source_sets=[self._sources],
)
except self._ctx.convert.TypeParameterError:
# The return type contains a type parameter without a substitution.
subst = abstract_utils.with_empty_substitutions(
self._subst, self._type, node, self._ctx
)
return node, self._ctx.convert.constant_to_var(
abstract_utils.AsReturnValue(self._type),
subst,
node,
source_sets=[self._sources],
)
if not ret.bindings and isinstance(self._type, pytd.TypeParameter):
ret.AddBinding(self._ctx.convert.empty, [], node)
return node, ret
def get_parameter(self, node: cfg.CFGNode, param_name: str):
t = self._ctx.convert.constant_to_value(self._type, self._subst, node)
return t.get_formal_type_parameter(param_name)
def _splats_to_any(
seq: Sequence[cfg.Variable], ctx: "context.Context"
) -> tuple[cfg.Variable, ...]:
return tuple(
ctx.new_unsolvable(ctx.root_node) if abstract_utils.is_var_splat(v) else v
for v in seq
)
def call_function(
ctx: "context.Context",
node: cfg.CFGNode,
func_var: cfg.Variable,
args: Args,
fallback_to_unsolvable: bool = True,
allow_never: bool = False,
strict_filter: bool = True,
) -> tuple[cfg.CFGNode, cfg.Variable]:
"""Call a function.
Args:
ctx: The abstract context.
node: The current CFG node.
func_var: A variable of the possible functions to call.
args: The arguments to pass. See function.Args.
fallback_to_unsolvable: If the function call fails, create an unknown.
allow_never: Whether typing.Never is allowed in the return type.
strict_filter: Whether function bindings should be strictly filtered.
Returns:
A tuple (CFGNode, Variable). The Variable is the return value.
Raises:
DictKeyMissing: if we retrieved a nonexistent key from a dict and
fallback_to_unsolvable is False.
FailedFunctionCall: if the call fails and fallback_to_unsolvable is False.
"""
assert func_var.bindings
result = ctx.program.NewVariable()
nodes = []
error = None
has_never = False
for funcb in func_var.bindings:
func = funcb.data
one_result = None
try:
new_node, one_result = func.call(node, funcb, args)
except (error_types.DictKeyMissing, error_types.FailedFunctionCall) as e:
if e > error and (
(not strict_filter and len(func_var.bindings) == 1)
or funcb.IsVisible(node)
):
error = e
else:
if ctx.convert.never in one_result.data:
if allow_never:
# Make sure Never was the only thing returned.
assert len(one_result.data) == 1
has_never = True
else:
for b in one_result.bindings:
if b.data != ctx.convert.never:
result.PasteBinding(b)
else:
result.PasteVariable(one_result, new_node, {funcb})
nodes.append(new_node)
if nodes:
node = ctx.join_cfg_nodes(nodes)
if not result.bindings:
v = ctx.convert.never if has_never else ctx.convert.unsolvable
result.AddBinding(v, [], node)
elif isinstance(error, error_types.FailedFunctionCall) and all(
func.name.endswith(".__init__") for func in func_var.data
):
# If the function failed with a FailedFunctionCall exception, try calling
# it again with fake arguments. This allows for calls to __init__ to
# always succeed, ensuring pytype has a full view of the class and its
# attributes. If the call still fails, call_with_fake_args will return
# abstract.Unsolvable.
node, result = ctx.vm.call_with_fake_args(node, func_var)
elif ctx.options.precise_return and len(func_var.bindings) == 1:
(funcb,) = func_var.bindings
func = funcb.data
if isinstance(func, _abstract.BoundFunction):
func = func.underlying
if isinstance(func, _abstract.PyTDFunction):
node, result = PyTDReturnType(
func.signatures[0].pytd_sig.return_type,
datatypes.HashableDict(),
[funcb],
ctx,
).instantiate(node)
elif isinstance(func, _abstract.InterpreterFunction):
sig = func.signature_functions()[0].signature
ret = sig.annotations.get("return", ctx.convert.unsolvable)
result = ctx.vm.init_class(node, ret)
else:
result = ctx.new_unsolvable(node)
else:
result = ctx.new_unsolvable(node)
ctx.vm.trace_opcode(
None, func_var.data[0].name.rpartition(".")[-1], (func_var, result)
)
if (nodes and not ctx.options.strict_parameter_checks) or not error:
return node, result
elif fallback_to_unsolvable:
ctx.errorlog.invalid_function_call(ctx.vm.stack(func_var.data[0]), error)
return node, result
else:
# We were called by something that does its own error handling.
error.set_return(node, result)
raise error # pylint: disable=raising-bad-type
def match_all_args(
ctx: "context.Context",
node: cfg.CFGNode,
func: "_function_base.NativeFunction|_interpreter_function.InterpreterFunction",
args: "Args",
) -> "tuple[Args, Sequence[tuple[Exception, str, _base.BaseValue]]]":
"""Call match_args multiple times to find all type errors.
Args:
ctx: The abstract context.
node: The current CFG node.
func: An abstract function
args: An Args object to match against func
Returns:
A tuple of (new_args, errors)
where new_args = args with all incorrectly typed values set to Any
errors = a list of [(type mismatch error, arg name, value)]
Reraises any error that is not InvalidParameters
"""
positional_names = func.get_positional_names()
needs_checking = True
errors = []
while needs_checking:
try:
func.match_args(node, args)
except error_types.FailedFunctionCall as e:
if isinstance(e, error_types.WrongKeywordArgs):
errors.append((e, e.extra_keywords[0], None))
for i in e.extra_keywords:
args = args.delete_namedarg(i)
elif isinstance(e, error_types.DuplicateKeyword):
errors.append((e, e.duplicate, None))
args = args.delete_namedarg(e.duplicate)
elif isinstance(e, error_types.MissingParameter):
errors.append((e, e.missing_parameter, None))
args = args.replace_namedarg(
e.missing_parameter, ctx.new_unsolvable(node)
)
elif isinstance(e, error_types.WrongArgTypes):
arg_name = e.bad_call.bad_param.name
for name, value in e.bad_call.passed_args:
if name != arg_name:
continue
errors.append((e, name, value))
try:
pos = positional_names.index(name)
except ValueError:
args = args.replace_namedarg(name, ctx.new_unsolvable(node))
else:
args = args.replace_posarg(pos, ctx.new_unsolvable(node))
break
else:
raise AssertionError(
f"Mismatched parameter {arg_name} not found in passed_args"
) from e
else:
# This is not an InvalidParameters error.
raise
else:
needs_checking = False
return args, errors
def has_visible_namedarg(
node: cfg.CFGNode, args: Args, names: set[str]
) -> bool:
# Note: this method should be called judiciously, as HasCombination is
# potentially very expensive.
namedargs = {args.namedargs[name] for name in names}
variables = [v for v in args.get_variables() if v not in namedargs]
for name in names:
for view in cfg_utils.variable_product(variables + [args.namedargs[name]]):
if node.HasCombination(list(view)):
return True
return False
def handle_typeguard(
node: cfg.CFGNode,
ret: _ReturnType,
first_arg: cfg.Variable,
ctx: "context.Context",
func_name: str | None = None,
) -> cfg.Variable | None:
"""Returns a variable of the return value of a type guard function.
Args:
node: The current node.
ret: The function's return value.
first_arg: The first argument to the function.
ctx: The current context.
func_name: Optionally, the function name, for better error messages.
"""
frame = ctx.vm.frame
if not hasattr(frame, "f_locals"):
return None # no need to apply the type guard if we're in a dummy frame
if ret.name == "typing.TypeIs":
match_result = ctx.matcher(node).compute_one_match(
first_arg, ret.get_parameter(node, abstract_utils.T)
)
matched = [m.view[first_arg] for m in match_result.good_matches]
unmatched = [m.view[first_arg] for m in match_result.bad_matches]
elif ret.name == "typing.TypeGuard":
matched = []
unmatched = first_arg.bindings
else:
return None
if matched:
# When a TypeIs function is applied to a variable with matching bindings, it
# behaves like isinstance(), narrowing in both positive and negative cases.
typeis_return = ctx.program.NewVariable()
for b in matched:
typeis_return.AddBinding(ctx.convert.true, {b}, node)
for b in unmatched:
typeis_return.AddBinding(ctx.convert.false, {b}, node)
return typeis_return
# We have either a TypeIs function that does not match any existing bindings,
# or a TypeGuard function. Get the local/global variable that first_arg comes
# from, and add new bindings for the type guard type.
target_name = ctx.vm.get_var_name(first_arg)
if not target_name:
desc = f" function {func_name!r}" if func_name else ""
ctx.errorlog.not_supported_yet(
ctx.vm.frames,
f"Calling {ret.name}{desc} with an arbitrary expression",
"Please assign the expression to a local variable.",
)
return None
target = frame.lookup_name(target_name)
# Forward all the target's visible bindings to the current node. We're going
# to add new bindings soon, which would otherwise hide the old bindings, kinda
# like assigning the variable to a new value.
for b in target.Bindings(node):
target.PasteBinding(b, node)
# Add missing bindings to the target variable.
old_data = set(target.Data(node))
new_instance = ret.instantiate_parameter(node, abstract_utils.T)
new_data = set(new_instance.data)
for b in new_instance.bindings:
if b.data not in old_data:
target.PasteBinding(b, node)
# Create a boolean return variable with True bindings for values that
# originate from the type guard type and False for the rest.
typeguard_return = ctx.program.NewVariable()
for b in target.Bindings(node):
boolvals = {b.data not in old_data} | {b.data in new_data}
for v in boolvals:
typeguard_return.AddBinding(ctx.convert.bool_values[v], {b}, node)
return typeguard_return
def build_paramspec_signature(
pspec_match,
r_args: tuple[pytd.TypeU, ...],
return_value: _base.BaseValue,
ctx: "context.Context",
) -> Signature:
"""Build a signature from a ParamSpecMatch and Callable args."""
sig: Signature = pspec_match.sig
ann = sig.annotations.copy()
ann["return"] = return_value # pytype: disable=container-type-mismatch
ret_posargs = []
for i, typ in enumerate(r_args):
name = f"_{i}"
ret_posargs.append(name)
if not isinstance(typ, _abstract.BaseValue):
typ = ctx.convert.constant_to_value(typ)
ann[name] = typ
# We have done prefix type matching in the matcher, so we can safely strip
# off the lhs args from the sig by count.
lhs = pspec_match.paramspec
l_nargs = len(lhs.args) if isinstance(lhs, _abstract.Concatenate) else 0
param_names = tuple(ret_posargs) + sig.param_names[l_nargs:]
# All params need to be in the annotations dict or output.py crashes
sig.populate_annotation_dict(ann, ctx, param_names)
posonly_count = max(sig.posonly_count + len(r_args) - l_nargs, 0)
return sig._replace(
param_names=param_names, annotations=ann, posonly_count=posonly_count
)
|
PyTDReturnType
|
python
|
sympy__sympy
|
sympy/matrices/expressions/permutation.py
|
{
"start": 196,
"end": 4352
}
|
class ____(MatrixExpr):
"""A Permutation Matrix
Parameters
==========
perm : Permutation
The permutation the matrix uses.
The size of the permutation determines the matrix size.
See the documentation of
:class:`sympy.combinatorics.permutations.Permutation` for
the further information of how to create a permutation object.
Examples
========
>>> from sympy import Matrix, PermutationMatrix
>>> from sympy.combinatorics import Permutation
Creating a permutation matrix:
>>> p = Permutation(1, 2, 0)
>>> P = PermutationMatrix(p)
>>> P = P.as_explicit()
>>> P
Matrix([
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]])
Permuting a matrix row and column:
>>> M = Matrix([0, 1, 2])
>>> Matrix(P*M)
Matrix([
[1],
[2],
[0]])
>>> Matrix(M.T*P)
Matrix([[2, 0, 1]])
See Also
========
sympy.combinatorics.permutations.Permutation
"""
def __new__(cls, perm):
from sympy.combinatorics.permutations import Permutation
perm = _sympify(perm)
if not isinstance(perm, Permutation):
raise ValueError(
"{} must be a SymPy Permutation instance.".format(perm))
return super().__new__(cls, perm)
@property
def shape(self):
size = self.args[0].size
return (size, size)
@property
def is_Identity(self):
return self.args[0].is_Identity
def doit(self, **hints):
if self.is_Identity:
return Identity(self.rows)
return self
def _entry(self, i, j, **kwargs):
perm = self.args[0]
return KroneckerDelta(perm.apply(i), j)
def _eval_power(self, exp):
return PermutationMatrix(self.args[0] ** exp).doit()
def _eval_inverse(self):
return PermutationMatrix(self.args[0] ** -1)
_eval_transpose = _eval_adjoint = _eval_inverse
def _eval_determinant(self):
sign = self.args[0].signature()
if sign == 1:
return S.One
elif sign == -1:
return S.NegativeOne
raise NotImplementedError
def _eval_rewrite_as_BlockDiagMatrix(self, *args, **kwargs):
from sympy.combinatorics.permutations import Permutation
from .blockmatrix import BlockDiagMatrix
perm = self.args[0]
full_cyclic_form = perm.full_cyclic_form
cycles_picks = []
# Stage 1. Decompose the cycles into the blockable form.
a, b, c = 0, 0, 0
flag = False
for cycle in full_cyclic_form:
l = len(cycle)
m = max(cycle)
if not flag:
if m + 1 > a + l:
flag = True
temp = [cycle]
b = m
c = l
else:
cycles_picks.append([cycle])
a += l
else:
if m > b:
if m + 1 == a + c + l:
temp.append(cycle)
cycles_picks.append(temp)
flag = False
a = m+1
else:
b = m
temp.append(cycle)
c += l
else:
if b + 1 == a + c + l:
temp.append(cycle)
cycles_picks.append(temp)
flag = False
a = b+1
else:
temp.append(cycle)
c += l
# Stage 2. Normalize each decomposed cycles and build matrix.
p = 0
args = []
for pick in cycles_picks:
new_cycles = []
l = 0
for cycle in pick:
new_cycle = [i - p for i in cycle]
new_cycles.append(new_cycle)
l += len(cycle)
p += l
perm = Permutation(new_cycles)
mat = PermutationMatrix(perm)
args.append(mat)
return BlockDiagMatrix(*args)
|
PermutationMatrix
|
python
|
PrefectHQ__prefect
|
src/prefect/_experimental/sla/client.py
|
{
"start": 269,
"end": 1936
}
|
class ____(BaseClient):
def apply_slas_for_deployment(
self, deployment_id: "UUID", slas: "list[SlaTypes]"
) -> "SlaMergeResponse":
"""
Applies service level agreements for a deployment. Performs matching by SLA name. If a SLA with the same name already exists, it will be updated. If a SLA with the same name does not exist, it will be created. Existing SLAs that are not in the list will be deleted.
Args:
deployment_id: The ID of the deployment to update SLAs for
slas: List of SLAs to associate with the deployment
Raises:
httpx.RequestError: if the SLAs were not updated for any reason
Returns:
SlaMergeResponse: The response from the backend, containing the names of the created, updated, and deleted SLAs
"""
resource_id = f"prefect.deployment.{deployment_id}"
for sla in slas:
sla.set_deployment_id(deployment_id)
slas_spec_list = [
sla.model_dump(mode="json", exclude_unset=True) for sla in slas
]
response = self.request(
"POST",
f"/slas/apply-resource-slas/{resource_id}",
json=slas_spec_list,
)
response.raise_for_status()
response_json = response.json()
from prefect._experimental.sla.objects import SlaMergeResponse
return SlaMergeResponse(
created=[sla.get("name") for sla in response_json.get("created")],
updated=[sla.get("name") for sla in response_json.get("updated")],
deleted=[sla.get("name") for sla in response_json.get("deleted")],
)
|
SlaClient
|
python
|
doocs__leetcode
|
solution/0300-0399/0363.Max Sum of Rectangle No Larger Than K/Solution.py
|
{
"start": 0,
"end": 614
}
|
class ____:
def maxSumSubmatrix(self, matrix: List[List[int]], k: int) -> int:
m, n = len(matrix), len(matrix[0])
ans = -inf
for i in range(m):
nums = [0] * n
for j in range(i, m):
for h in range(n):
nums[h] += matrix[j][h]
s = 0
ts = SortedSet([0])
for x in nums:
s += x
p = ts.bisect_left(s - k)
if p != len(ts):
ans = max(ans, s - ts[p])
ts.add(s)
return ans
|
Solution
|
python
|
celery__celery
|
celery/events/snapshot.py
|
{
"start": 728,
"end": 3294
}
|
class ____:
"""Record event snapshots."""
timer = None
shutter_signal = Signal(name='shutter_signal', providing_args={'state'})
cleanup_signal = Signal(name='cleanup_signal')
clear_after = False
_tref = None
_ctref = None
def __init__(self, state, freq=1.0, maxrate=None,
cleanup_freq=3600.0, timer=None, app=None):
self.app = app_or_default(app)
self.state = state
self.freq = freq
self.cleanup_freq = cleanup_freq
self.timer = timer or self.timer or Timer()
self.logger = logger
self.maxrate = maxrate and TokenBucket(rate(maxrate))
def install(self):
self._tref = self.timer.call_repeatedly(self.freq, self.capture)
self._ctref = self.timer.call_repeatedly(
self.cleanup_freq, self.cleanup,
)
def on_shutter(self, state):
pass
def on_cleanup(self):
pass
def cleanup(self):
logger.debug('Cleanup: Running...')
self.cleanup_signal.send(sender=self.state)
self.on_cleanup()
def shutter(self):
if self.maxrate is None or self.maxrate.can_consume():
logger.debug('Shutter: %s', self.state)
self.shutter_signal.send(sender=self.state)
self.on_shutter(self.state)
def capture(self):
self.state.freeze_while(self.shutter, clear_after=self.clear_after)
def cancel(self):
if self._tref:
self._tref() # flush all received events.
self._tref.cancel()
if self._ctref:
self._ctref.cancel()
def __enter__(self):
self.install()
return self
def __exit__(self, *exc_info):
self.cancel()
def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
logfile=None, pidfile=None, timer=None, app=None,
**kwargs):
"""Start snapshot recorder."""
app = app_or_default(app)
if pidfile:
platforms.create_pidlock(pidfile)
app.log.setup_logging_subsystem(loglevel, logfile)
print(f'-> evcam: Taking snapshots with {camera} (every {freq} secs.)')
state = app.events.State()
cam = instantiate(camera, state, app=app, freq=freq,
maxrate=maxrate, timer=timer)
cam.install()
conn = app.connection_for_read()
recv = app.events.Receiver(conn, handlers={'*': state.event})
try:
try:
recv.capture(limit=None)
except KeyboardInterrupt:
raise SystemExit
finally:
cam.cancel()
conn.close()
|
Polaroid
|
python
|
networkx__networkx
|
networkx/algorithms/centrality/tests/test_betweenness_centrality.py
|
{
"start": 24867,
"end": 28150
}
|
class ____:
def test_K5(self):
"""Edge betweenness centrality: K5"""
G = nx.complete_graph(5)
b = nx.edge_betweenness_centrality(G, weight=None, normalized=False)
b_answer = dict.fromkeys(G.edges(), 1)
for n in sorted(G.edges()):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_normalized_K5(self):
"""Edge betweenness centrality: K5"""
G = nx.complete_graph(5)
b = nx.edge_betweenness_centrality(G, weight=None, normalized=True)
b_answer = dict.fromkeys(G.edges(), 1 / 10)
for n in sorted(G.edges()):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_C4(self):
"""Edge betweenness centrality: C4"""
G = nx.cycle_graph(4)
b = nx.edge_betweenness_centrality(G, weight=None, normalized=True)
b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2}
for n in sorted(G.edges()):
assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7)
def test_P4(self):
"""Edge betweenness centrality: P4"""
G = nx.path_graph(4)
b = nx.edge_betweenness_centrality(G, weight=None, normalized=False)
b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3}
for n in sorted(G.edges()):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_normalized_P4(self):
"""Edge betweenness centrality: P4"""
G = nx.path_graph(4)
b = nx.edge_betweenness_centrality(G, weight=None, normalized=True)
b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3}
for n in sorted(G.edges()):
assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7)
def test_balanced_tree(self):
"""Edge betweenness centrality: balanced tree"""
G = nx.balanced_tree(r=2, h=2)
b = nx.edge_betweenness_centrality(G, weight=None, normalized=False)
b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6}
for n in sorted(G.edges()):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_edge_betweenness_k(self):
"""Ensure setting `k` properly limits the number of source nodes."""
G = nx.path_graph(3)
# This choice of `k` and `seed` selects nodes 0 and 2.
# There is only one shortest path between any two pairs of nodes.
# With source nodes 0 and 2, this means that both edges are part of
# three shortest paths:
# For (0, 1): sp(0, 1), sp(0, 2), sp(2, 0).
# For (1, 2): sp(0, 2), sp(2, 0), sp(2, 1).
# We normalize by 2 because the graph is undirected, and by
# `k / n = 2 / 3` because we are only considering a subset of source
# nodes.
# This means the final eb centralities should be 3 / 2 / (2 / 3) = 9 / 4.
eb = nx.edge_betweenness_centrality(G, k=2, seed=42, normalized=False)
assert eb == {(0, 1): 9 / 4, (1, 2): 9 / 4}
# When normalization is `True`, we instead divide by the number of total
# `(s, t)` pairs, i.e. `k * (n - 1) = 4`, meaning we get an eb of `3 / 4`.
eb = nx.edge_betweenness_centrality(G, k=2, seed=42, normalized=True)
assert eb == {(0, 1): 3 / 4, (1, 2): 3 / 4}
|
TestEdgeBetweennessCentrality
|
python
|
apache__airflow
|
providers/standard/src/airflow/providers/standard/triggers/hitl.py
|
{
"start": 2198,
"end": 8516
}
|
class ____(BaseTrigger):
"""A trigger that checks whether Human-in-the-loop responses are received."""
def __init__(
self,
*,
ti_id: UUID,
options: list[str],
params: dict[str, dict[str, Any]],
defaults: list[str] | None = None,
multiple: bool = False,
timeout_datetime: datetime | None,
poke_interval: float = 5.0,
**kwargs,
):
super().__init__(**kwargs)
self.ti_id = ti_id
self.poke_interval = poke_interval
self.options = options
self.multiple = multiple
self.defaults = defaults
self.timeout_datetime = timeout_datetime
self.params = ParamsDict(
{
k: Param(
v.pop("value"),
**v,
)
if HITLTrigger._is_param(v)
else Param(v)
for k, v in params.items()
},
)
@staticmethod
def _is_param(value: Any) -> bool:
return isinstance(value, dict) and all(key in value for key in ("description", "schema", "value"))
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize HITLTrigger arguments and classpath."""
return (
"airflow.providers.standard.triggers.hitl.HITLTrigger",
{
"ti_id": self.ti_id,
"options": self.options,
"defaults": self.defaults,
"params": {k: self.params.get_param(k).serialize() for k in self.params},
"multiple": self.multiple,
"timeout_datetime": self.timeout_datetime,
"poke_interval": self.poke_interval,
},
)
async def _handle_timeout(self) -> TriggerEvent:
"""Handle HITL timeout logic and yield appropriate event."""
resp = await sync_to_async(get_hitl_detail_content_detail)(ti_id=self.ti_id)
# Case 1: Response arrived just before timeout
if resp.response_received and resp.chosen_options:
if TYPE_CHECKING:
assert resp.responded_by_user is not None
assert resp.responded_at is not None
chosen_options_list = list(resp.chosen_options or [])
self.log.info(
"[HITL] responded_by=%s (id=%s) options=%s at %s (timeout fallback skipped)",
resp.responded_by_user.name,
resp.responded_by_user.id,
chosen_options_list,
resp.responded_at,
)
return TriggerEvent(
HITLTriggerEventSuccessPayload(
chosen_options=chosen_options_list,
params_input=resp.params_input or {},
responded_at=resp.responded_at,
responded_by_user=HITLUser(
id=resp.responded_by_user.id,
name=resp.responded_by_user.name,
),
timedout=False,
)
)
# Case 2: No defaults defined → failure
if self.defaults is None:
return TriggerEvent(
HITLTriggerEventFailurePayload(
error="The timeout has passed, and the response has not yet been received.",
error_type="timeout",
)
)
# Case 3: Timeout fallback to default
resp = await sync_to_async(update_hitl_detail_response)(
ti_id=self.ti_id,
chosen_options=self.defaults,
params_input=self.params.dump(),
)
if TYPE_CHECKING:
assert resp.responded_at is not None
self.log.info(
"[HITL] timeout reached before receiving response, fallback to default %s",
self.defaults,
)
return TriggerEvent(
HITLTriggerEventSuccessPayload(
chosen_options=self.defaults,
params_input=self.params.dump(),
responded_by_user=None,
responded_at=resp.responded_at,
timedout=True,
)
)
async def _handle_response(self):
"""Check if HITL response is ready and yield success if so."""
resp = await sync_to_async(get_hitl_detail_content_detail)(ti_id=self.ti_id)
if TYPE_CHECKING:
assert resp.responded_by_user is not None
assert resp.responded_at is not None
if not (resp.response_received and resp.chosen_options):
return None
# validate input
if params_input := resp.params_input:
try:
for key, value in params_input.items():
self.params[key] = value
except ParamValidationError as err:
return TriggerEvent(
HITLTriggerEventFailurePayload(
error=str(err),
error_type="validation",
)
)
chosen_options_list = list(resp.chosen_options or [])
self.log.info(
"[HITL] responded_by=%s (id=%s) options=%s at %s",
resp.responded_by_user.name,
resp.responded_by_user.id,
chosen_options_list,
resp.responded_at,
)
return TriggerEvent(
HITLTriggerEventSuccessPayload(
chosen_options=chosen_options_list,
params_input=params_input or {},
responded_at=resp.responded_at,
responded_by_user=HITLUser(
id=resp.responded_by_user.id,
name=resp.responded_by_user.name,
),
timedout=False,
)
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Loop until the Human-in-the-loop response received or timeout reached."""
while True:
if self.timeout_datetime and self.timeout_datetime < utcnow():
event = await self._handle_timeout()
yield event
return
event = await self._handle_response()
if event:
yield event
return
await asyncio.sleep(self.poke_interval)
|
HITLTrigger
|
python
|
pytorch__pytorch
|
torch/_inductor/fx_passes/group_batch_fusion.py
|
{
"start": 48816,
"end": 49028
}
|
class ____(BatchMathOpsPreGradFusion):
def __init__(self, **kwargs):
super().__init__(torch.nn.functional.dropout, **kwargs)
@register_fusion("batch_aten_tanh", pre_grad=False)
|
BatchDropoutPreGradFusion
|
python
|
huggingface__transformers
|
src/transformers/models/swin/modeling_swin.py
|
{
"start": 31163,
"end": 35345
}
|
class ____(nn.Module):
def __init__(self, config, grid_size):
super().__init__()
self.num_layers = len(config.depths)
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu")]
self.layers = nn.ModuleList(
[
SwinStage(
config=config,
dim=int(config.embed_dim * 2**i_layer),
input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
depth=config.depths[i_layer],
num_heads=config.num_heads[i_layer],
drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
downsample=SwinPatchMerging if (i_layer < self.num_layers - 1) else None,
)
for i_layer in range(self.num_layers)
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: tuple[int, int],
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
output_hidden_states_before_downsampling: Optional[bool] = False,
always_partition: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[tuple, SwinEncoderOutput]:
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, layer_module in enumerate(self.layers):
layer_outputs = layer_module(hidden_states, input_dimensions, output_attentions, always_partition)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = layer_outputs[1]
output_dimensions = layer_outputs[2]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
if output_hidden_states and output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states_before_downsampling.shape
# rearrange b (h w) c -> b c h w
# here we use the original (not downsampled) height and width
reshaped_hidden_state = hidden_states_before_downsampling.view(
batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states_before_downsampling,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
elif output_hidden_states and not output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if output_attentions:
all_self_attentions += layer_outputs[3:]
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return SwinEncoderOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
reshaped_hidden_states=all_reshaped_hidden_states,
)
@auto_docstring
|
SwinEncoder
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/typinganndata/ann_module3.py
|
{
"start": 193,
"end": 318
}
|
class ____:
def __init__(self, x: int) -> None:
self.x: no_such_name = x # This one is OK as proposed by Guido
|
C_OK
|
python
|
scipy__scipy
|
benchmarks/benchmarks/interpolate.py
|
{
"start": 281,
"end": 1464
}
|
class ____(Benchmark):
unit = "relative increase with repeats"
def track_leaks(self):
set_mem_rlimit()
# Setup temp file, make it fit in memory
repeats = [2, 5, 10, 50, 200]
peak_mems = []
for repeat in repeats:
code = f"""
import numpy as np
from scipy.interpolate import griddata
def func(x, y):
return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
points = np.random.rand(1000, 2)
values = func(points[:,0], points[:,1])
for t in range({repeat}):
for method in ['nearest', 'linear', 'cubic']:
griddata(points, values, (grid_x, grid_y), method=method)
"""
_, peak_mem = run_monitored(code)
peak_mems.append(peak_mem)
corr, p = spearmanr(repeats, peak_mems)
if p < 0.05:
print("*"*79)
print("PROBABLE MEMORY LEAK")
print("*"*79)
else:
print("PROBABLY NO MEMORY LEAK")
return max(peak_mems) / min(peak_mems)
|
Leaks
|
python
|
astropy__astropy
|
astropy/utils/masked/tests/test_function_helpers.py
|
{
"start": 64427,
"end": 65594
}
|
class ____:
@pytest.mark.parametrize(
"one, two",
list(
itertools.combinations(
(
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(APPLY_TO_BOTH_FUNCTIONS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
def test_all_included(self):
included_in_helpers = (
MASKED_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(APPLY_TO_BOTH_FUNCTIONS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped_functions == included_in_helpers
@pytest.mark.xfail(reason="coverage not completely set up yet")
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS == untested_functions
@pytest.mark.parametrize(
"target, helper",
sorted(
DISPATCHED_FUNCTIONS.items(),
key=lambda items: items[0].__name__,
),
ids=lambda func: func.__name__,
)
|
TestFunctionHelpersCompleteness
|
python
|
django__django
|
tests/humanize_tests/tests.py
|
{
"start": 1038,
"end": 21088
}
|
class ____(SimpleTestCase):
def humanize_tester(
self, test_list, result_list, method, normalize_result_func=escape
):
for test_content, result in zip(test_list, result_list):
with self.subTest(test_content):
t = Template("{%% load humanize %%}{{ test_content|%s }}" % method)
rendered = t.render(Context(locals())).strip()
self.assertEqual(
rendered,
normalize_result_func(result),
msg="%s test failed, produced '%s', should've produced '%s'"
% (method, rendered, result),
)
def test_ordinal(self):
test_list = (
"1",
"2",
"3",
"4",
"11",
"12",
"13",
"101",
"102",
"103",
"111",
"-0",
"-1",
"-105",
"something else",
None,
)
result_list = (
"1st",
"2nd",
"3rd",
"4th",
"11th",
"12th",
"13th",
"101st",
"102nd",
"103rd",
"111th",
"0th",
"-1",
"-105",
"something else",
None,
)
with translation.override("en"):
self.humanize_tester(test_list, result_list, "ordinal")
def test_i18n_html_ordinal(self):
"""Allow html in output on i18n strings"""
test_list = (
"1",
"2",
"3",
"4",
"11",
"12",
"13",
"101",
"102",
"103",
"111",
"something else",
None,
)
result_list = (
"1<sup>er</sup>",
"2<sup>e</sup>",
"3<sup>e</sup>",
"4<sup>e</sup>",
"11<sup>e</sup>",
"12<sup>e</sup>",
"13<sup>e</sup>",
"101<sup>er</sup>",
"102<sup>e</sup>",
"103<sup>e</sup>",
"111<sup>e</sup>",
"something else",
"None",
)
with translation.override("fr-fr"):
self.humanize_tester(test_list, result_list, "ordinal", lambda x: x)
def test_intcomma(self):
test_list = (
100,
-100,
1000,
-1000,
10123,
-10123,
10311,
-10311,
1000000,
-1000000,
1234567.25,
-1234567.25,
"100",
"-100",
"100.1",
"-100.1",
"100.13",
"-100.13",
"1000",
"-1000",
"10123",
"-10123",
"10311",
"-10311",
"100000.13",
"-100000.13",
"1000000",
"-1000000",
"1234567.1234567",
"-1234567.1234567",
Decimal("1234567.1234567"),
Decimal("-1234567.1234567"),
Decimal("Infinity"),
Decimal("-Infinity"),
Decimal("NaN"),
None,
"1234567",
"-1234567",
"1234567.12",
"-1234567.12",
"the quick brown fox jumped over the lazy dog",
)
result_list = (
"100",
"-100",
"1,000",
"-1,000",
"10,123",
"-10,123",
"10,311",
"-10,311",
"1,000,000",
"-1,000,000",
"1,234,567.25",
"-1,234,567.25",
"100",
"-100",
"100.1",
"-100.1",
"100.13",
"-100.13",
"1,000",
"-1,000",
"10,123",
"-10,123",
"10,311",
"-10,311",
"100,000.13",
"-100,000.13",
"1,000,000",
"-1,000,000",
"1,234,567.1234567",
"-1,234,567.1234567",
"1,234,567.1234567",
"-1,234,567.1234567",
"Infinity",
"-Infinity",
"NaN",
None,
"1,234,567",
"-1,234,567",
"1,234,567.12",
"-1,234,567.12",
"the quick brown fox jumped over the lazy dog",
)
with translation.override("en"):
self.humanize_tester(test_list, result_list, "intcomma")
def test_l10n_intcomma(self):
test_list = (
100,
-100,
1000,
-1000,
10123,
-10123,
10311,
-10311,
1000000,
-1000000,
1234567.25,
-1234567.25,
"100",
"-100",
"1000",
"-1000",
"10123",
"-10123",
"10311",
"-10311",
"1000000",
"-1000000",
"1234567.1234567",
"-1234567.1234567",
Decimal("1234567.1234567"),
-Decimal("1234567.1234567"),
None,
"1234567",
"-1234567",
"1234567.12",
"-1234567.12",
"the quick brown fox jumped over the lazy dog",
)
result_list_en = (
"100",
"-100",
"1,000",
"-1,000",
"10,123",
"-10,123",
"10,311",
"-10,311",
"1,000,000",
"-1,000,000",
"1,234,567.25",
"-1,234,567.25",
"100",
"-100",
"1,000",
"-1,000",
"10,123",
"-10,123",
"10,311",
"-10,311",
"1,000,000",
"-1,000,000",
"1,234,567.1234567",
"-1,234,567.1234567",
"1,234,567.1234567",
"-1,234,567.1234567",
None,
"1,234,567",
"-1,234,567",
"1,234,567.12",
"-1,234,567.12",
"the quick brown fox jumped over the lazy dog",
)
result_list_de = (
"100",
"-100",
"1.000",
"-1.000",
"10.123",
"-10.123",
"10.311",
"-10.311",
"1.000.000",
"-1.000.000",
"1.234.567,25",
"-1.234.567,25",
"100",
"-100",
"1.000",
"-1.000",
"10.123",
"-10.123",
"10.311",
"-10.311",
"1.000.000",
"-1.000.000",
"1.234.567,1234567",
"-1.234.567,1234567",
"1.234.567,1234567",
"-1.234.567,1234567",
None,
"1.234.567",
"-1.234.567",
"1.234.567,12",
"-1.234.567,12",
"the quick brown fox jumped over the lazy dog",
)
with self.settings(USE_THOUSAND_SEPARATOR=False):
with translation.override("en"):
self.humanize_tester(test_list, result_list_en, "intcomma")
with translation.override("de"):
self.humanize_tester(test_list, result_list_de, "intcomma")
def test_intcomma_without_number_grouping(self):
# Regression for #17414
with translation.override("ja"):
self.humanize_tester([100], ["100"], "intcomma")
def test_intword(self):
# Positive integers.
test_list_positive = (
"100",
"1000000",
"1200000",
"1290000",
"1000000000",
"2000000000",
"6000000000000",
"1300000000000000",
"3500000000000000000000",
"8100000000000000000000000000000000",
("1" + "0" * 100),
("1" + "0" * 104),
)
result_list_positive = (
"100",
"1.0 million",
"1.2 million",
"1.3 million",
"1.0 billion",
"2.0 billion",
"6.0 trillion",
"1.3 quadrillion",
"3.5 sextillion",
"8.1 decillion",
"1.0 googol",
("1" + "0" * 104),
)
# Negative integers.
test_list_negative = ("-" + test for test in test_list_positive)
result_list_negative = ("-" + result for result in result_list_positive)
with translation.override("en"):
self.humanize_tester(
(*test_list_positive, *test_list_negative, None),
(*result_list_positive, *result_list_negative, None),
"intword",
)
def test_i18n_intcomma(self):
test_list = (
100,
1000,
10123,
10311,
1000000,
1234567.25,
"100",
"1000",
"10123",
"10311",
"1000000",
None,
)
result_list = (
"100",
"1.000",
"10.123",
"10.311",
"1.000.000",
"1.234.567,25",
"100",
"1.000",
"10.123",
"10.311",
"1.000.000",
None,
)
with self.settings(USE_THOUSAND_SEPARATOR=True):
with translation.override("de"):
self.humanize_tester(test_list, result_list, "intcomma")
def test_i18n_intword(self):
# Positive integers.
test_list_positive = (
"100",
"1000000",
"1200000",
"1290000",
"1000000000",
"2000000000",
"6000000000000",
)
result_list_positive = (
"100",
"1,0 Million",
"1,2 Millionen",
"1,3 Millionen",
"1,0 Milliarde",
"2,0 Milliarden",
"6,0 Billionen",
)
# Negative integers.
test_list_negative = ("-" + test for test in test_list_positive)
result_list_negative = ("-" + result for result in result_list_positive)
with self.settings(USE_THOUSAND_SEPARATOR=True):
with translation.override("de"):
self.humanize_tester(
(*test_list_positive, *test_list_negative),
(*result_list_positive, *result_list_negative),
"intword",
)
def test_apnumber(self):
test_list = [str(x) for x in range(1, 11)]
test_list.append(None)
result_list = (
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"10",
None,
)
with translation.override("en"):
self.humanize_tester(test_list, result_list, "apnumber")
def test_naturalday(self):
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
tomorrow = today + datetime.timedelta(days=1)
someday = today - datetime.timedelta(days=10)
notdate = "I'm not a date value"
test_list = (today, yesterday, tomorrow, someday, notdate, None)
someday_result = defaultfilters.date(someday)
result_list = (
_("today"),
_("yesterday"),
_("tomorrow"),
someday_result,
"I'm not a date value",
None,
)
self.humanize_tester(test_list, result_list, "naturalday")
def test_naturalday_tz(self):
today = datetime.date.today()
tz_one = get_fixed_timezone(-720)
tz_two = get_fixed_timezone(720)
# Can be today or yesterday
date_one = datetime.datetime(today.year, today.month, today.day, tzinfo=tz_one)
naturalday_one = humanize.naturalday(date_one)
# Can be today or tomorrow
date_two = datetime.datetime(today.year, today.month, today.day, tzinfo=tz_two)
naturalday_two = humanize.naturalday(date_two)
# As 24h of difference they will never be the same
self.assertNotEqual(naturalday_one, naturalday_two)
def test_naturalday_uses_localtime(self):
# Regression for #18504
# This is 2012-03-08HT19:30:00-06:00 in America/Chicago
dt = datetime.datetime(2012, 3, 9, 1, 30, tzinfo=datetime.UTC)
orig_humanize_datetime, humanize.datetime = humanize.datetime, MockDateTime
try:
with override_settings(TIME_ZONE="America/Chicago", USE_TZ=True):
with translation.override("en"):
self.humanize_tester([dt], ["yesterday"], "naturalday")
finally:
humanize.datetime = orig_humanize_datetime
def test_naturaltime(self):
class naive(datetime.tzinfo):
def utcoffset(self, dt):
return None
test_list = [
"test",
now,
now - datetime.timedelta(microseconds=1),
now - datetime.timedelta(seconds=1),
now - datetime.timedelta(seconds=30),
now - datetime.timedelta(minutes=1, seconds=30),
now - datetime.timedelta(minutes=2),
now - datetime.timedelta(hours=1, minutes=30, seconds=30),
now - datetime.timedelta(hours=23, minutes=50, seconds=50),
now - datetime.timedelta(days=1),
now - datetime.timedelta(days=500),
now + datetime.timedelta(seconds=1),
now + datetime.timedelta(seconds=30),
now + datetime.timedelta(minutes=1, seconds=30),
now + datetime.timedelta(minutes=2),
now + datetime.timedelta(hours=1, minutes=30, seconds=30),
now + datetime.timedelta(hours=23, minutes=50, seconds=50),
now + datetime.timedelta(days=1),
now + datetime.timedelta(days=2, hours=6),
now + datetime.timedelta(days=500),
now.replace(tzinfo=naive()),
now.replace(tzinfo=datetime.UTC),
]
result_list = [
"test",
"now",
"now",
"a second ago",
"30\xa0seconds ago",
"a minute ago",
"2\xa0minutes ago",
"an hour ago",
"23\xa0hours ago",
"1\xa0day ago",
"1\xa0year, 4\xa0months ago",
"a second from now",
"30\xa0seconds from now",
"a minute from now",
"2\xa0minutes from now",
"an hour from now",
"23\xa0hours from now",
"1\xa0day from now",
"2\xa0days, 6\xa0hours from now",
"1\xa0year, 4\xa0months from now",
"now",
"now",
]
# Because of the DST change, 2 days and 6 hours after the chosen
# date in naive arithmetic is only 2 days and 5 hours after in
# aware arithmetic.
result_list_with_tz_support = result_list[:]
assert result_list_with_tz_support[-4] == "2\xa0days, 6\xa0hours from now"
result_list_with_tz_support[-4] == "2\xa0days, 5\xa0hours from now"
orig_humanize_datetime, humanize.datetime = humanize.datetime, MockDateTime
try:
with translation.override("en"):
self.humanize_tester(test_list, result_list, "naturaltime")
with override_settings(USE_TZ=True):
self.humanize_tester(
test_list, result_list_with_tz_support, "naturaltime"
)
finally:
humanize.datetime = orig_humanize_datetime
def test_naturaltime_as_documented(self):
"""
#23340 -- Verify the documented behavior of humanize.naturaltime.
"""
time_format = "%d %b %Y %H:%M:%S"
documented_now = datetime.datetime.strptime("17 Feb 2007 16:30:00", time_format)
test_data = (
("17 Feb 2007 16:30:00", "now"),
("17 Feb 2007 16:29:31", "29 seconds ago"),
("17 Feb 2007 16:29:00", "a minute ago"),
("17 Feb 2007 16:25:35", "4 minutes ago"),
("17 Feb 2007 15:30:29", "59 minutes ago"),
("17 Feb 2007 15:30:01", "59 minutes ago"),
("17 Feb 2007 15:30:00", "an hour ago"),
("17 Feb 2007 13:31:29", "2 hours ago"),
("16 Feb 2007 13:31:29", "1 day, 2 hours ago"),
("16 Feb 2007 13:30:01", "1 day, 2 hours ago"),
("16 Feb 2007 13:30:00", "1 day, 3 hours ago"),
("17 Feb 2007 16:30:30", "30 seconds from now"),
("17 Feb 2007 16:30:29", "29 seconds from now"),
("17 Feb 2007 16:31:00", "a minute from now"),
("17 Feb 2007 16:34:35", "4 minutes from now"),
("17 Feb 2007 17:30:29", "an hour from now"),
("17 Feb 2007 18:31:29", "2 hours from now"),
("18 Feb 2007 16:31:29", "1 day from now"),
("26 Feb 2007 18:31:29", "1 week, 2 days from now"),
)
class DocumentedMockDateTime(datetime.datetime):
@classmethod
def now(cls, tz=None):
if tz is None or tz.utcoffset(documented_now) is None:
return documented_now
else:
return documented_now.replace(tzinfo=tz) + tz.utcoffset(now)
orig_humanize_datetime = humanize.datetime
humanize.datetime = DocumentedMockDateTime
try:
for test_time_string, expected_natural_time in test_data:
with self.subTest(test_time_string):
test_time = datetime.datetime.strptime(
test_time_string, time_format
)
natural_time = humanize.naturaltime(test_time).replace("\xa0", " ")
self.assertEqual(expected_natural_time, natural_time)
finally:
humanize.datetime = orig_humanize_datetime
def test_inflection_for_timedelta(self):
"""
Translation of '%d day'/'%d month'/… may differ depending on the
context of the string it is inserted in.
"""
test_list = [
# "%(delta)s ago" translations
now - datetime.timedelta(days=1),
now - datetime.timedelta(days=2),
now - datetime.timedelta(days=30),
now - datetime.timedelta(days=60),
now - datetime.timedelta(days=500),
now - datetime.timedelta(days=865),
# "%(delta)s from now" translations
now + datetime.timedelta(days=1),
now + datetime.timedelta(days=2),
now + datetime.timedelta(days=31),
now + datetime.timedelta(days=61),
now + datetime.timedelta(days=500),
now + datetime.timedelta(days=865),
]
result_list = [
"před 1\xa0dnem",
"před 2\xa0dny",
"před 1\xa0měsícem",
"před 2\xa0měsíci",
"před 1\xa0rokem, 4\xa0měsíci",
"před 2\xa0lety, 4\xa0měsíci",
"za 1\xa0den",
"za 2\xa0dny",
"za 1\xa0měsíc",
"za 2\xa0měsíce",
"za 1\xa0rok, 4\xa0měsíce",
"za 2\xa0roky, 4\xa0měsíce",
]
orig_humanize_datetime, humanize.datetime = humanize.datetime, MockDateTime
try:
# Choose a language with different
# naturaltime-past/naturaltime-future translations.
with translation.override("cs"):
self.humanize_tester(test_list, result_list, "naturaltime")
finally:
humanize.datetime = orig_humanize_datetime
|
HumanizeTests
|
python
|
pydata__xarray
|
xarray/tests/test_plot.py
|
{
"start": 29722,
"end": 32665
}
|
class ____(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self) -> None:
self.darray = DataArray(easy_array((2, 3, 4)))
def test_step(self) -> None:
hdl = self.darray[0, 0].plot.step()
assert "steps" in hdl[0].get_drawstyle()
@pytest.mark.parametrize("where", ["pre", "post", "mid"])
def test_step_with_where(self, where) -> None:
hdl = self.darray[0, 0].plot.step(where=where)
assert hdl[0].get_drawstyle() == f"steps-{where}"
def test_step_with_hue(self) -> None:
hdl = self.darray[0].plot.step(hue="dim_2")
assert hdl[0].get_drawstyle() == "steps-pre"
@pytest.mark.parametrize("where", ["pre", "post", "mid"])
def test_step_with_hue_and_where(self, where) -> None:
hdl = self.darray[0].plot.step(hue="dim_2", where=where)
assert hdl[0].get_drawstyle() == f"steps-{where}"
def test_drawstyle_steps(self) -> None:
hdl = self.darray[0].plot(hue="dim_2", drawstyle="steps") # type: ignore[call-arg]
assert hdl[0].get_drawstyle() == "steps"
@pytest.mark.parametrize("where", ["pre", "post", "mid"])
def test_drawstyle_steps_with_where(self, where) -> None:
hdl = self.darray[0].plot(hue="dim_2", drawstyle=f"steps-{where}") # type: ignore[call-arg]
assert hdl[0].get_drawstyle() == f"steps-{where}"
def test_coord_with_interval_step(self) -> None:
"""Test step plot with intervals."""
bins = [-1, 0, 1, 2]
self.darray.groupby_bins("dim_0", bins).mean(...).plot.step()
line = plt.gca().lines[0]
assert isinstance(line, mpl.lines.Line2D)
assert len(np.asarray(line.get_xdata())) == ((len(bins) - 1) * 2)
def test_coord_with_interval_step_x(self) -> None:
"""Test step plot with intervals explicitly on x axis."""
bins = [-1, 0, 1, 2]
self.darray.groupby_bins("dim_0", bins).mean(...).plot.step(x="dim_0_bins")
line = plt.gca().lines[0]
assert isinstance(line, mpl.lines.Line2D)
assert len(np.asarray(line.get_xdata())) == ((len(bins) - 1) * 2)
def test_coord_with_interval_step_y(self) -> None:
"""Test step plot with intervals explicitly on y axis."""
bins = [-1, 0, 1, 2]
self.darray.groupby_bins("dim_0", bins).mean(...).plot.step(y="dim_0_bins")
line = plt.gca().lines[0]
assert isinstance(line, mpl.lines.Line2D)
assert len(np.asarray(line.get_xdata())) == ((len(bins) - 1) * 2)
def test_coord_with_interval_step_x_and_y_raises_valueeerror(self) -> None:
"""Test that step plot with intervals both on x and y axes raises an error."""
arr = xr.DataArray(
[pd.Interval(0, 1), pd.Interval(1, 2)],
coords=[("x", [pd.Interval(0, 1), pd.Interval(1, 2)])],
)
with pytest.raises(TypeError, match="intervals against intervals"):
arr.plot.step()
|
TestPlotStep
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_custom_resource_column_definition.py
|
{
"start": 383,
"end": 9655
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'description': 'str',
'format': 'str',
'json_path': 'str',
'name': 'str',
'priority': 'int',
'type': 'str'
}
attribute_map = {
'description': 'description',
'format': 'format',
'json_path': 'jsonPath',
'name': 'name',
'priority': 'priority',
'type': 'type'
}
def __init__(self, description=None, format=None, json_path=None, name=None, priority=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1CustomResourceColumnDefinition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._description = None
self._format = None
self._json_path = None
self._name = None
self._priority = None
self._type = None
self.discriminator = None
if description is not None:
self.description = description
if format is not None:
self.format = format
self.json_path = json_path
self.name = name
if priority is not None:
self.priority = priority
self.type = type
@property
def description(self):
"""Gets the description of this V1CustomResourceColumnDefinition. # noqa: E501
description is a human readable description of this column. # noqa: E501
:return: The description of this V1CustomResourceColumnDefinition. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1CustomResourceColumnDefinition.
description is a human readable description of this column. # noqa: E501
:param description: The description of this V1CustomResourceColumnDefinition. # noqa: E501
:type: str
"""
self._description = description
@property
def format(self):
"""Gets the format of this V1CustomResourceColumnDefinition. # noqa: E501
format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. # noqa: E501
:return: The format of this V1CustomResourceColumnDefinition. # noqa: E501
:rtype: str
"""
return self._format
@format.setter
def format(self, format):
"""Sets the format of this V1CustomResourceColumnDefinition.
format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. # noqa: E501
:param format: The format of this V1CustomResourceColumnDefinition. # noqa: E501
:type: str
"""
self._format = format
@property
def json_path(self):
"""Gets the json_path of this V1CustomResourceColumnDefinition. # noqa: E501
jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column. # noqa: E501
:return: The json_path of this V1CustomResourceColumnDefinition. # noqa: E501
:rtype: str
"""
return self._json_path
@json_path.setter
def json_path(self, json_path):
"""Sets the json_path of this V1CustomResourceColumnDefinition.
jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column. # noqa: E501
:param json_path: The json_path of this V1CustomResourceColumnDefinition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and json_path is None: # noqa: E501
raise ValueError("Invalid value for `json_path`, must not be `None`") # noqa: E501
self._json_path = json_path
@property
def name(self):
"""Gets the name of this V1CustomResourceColumnDefinition. # noqa: E501
name is a human readable name for the column. # noqa: E501
:return: The name of this V1CustomResourceColumnDefinition. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1CustomResourceColumnDefinition.
name is a human readable name for the column. # noqa: E501
:param name: The name of this V1CustomResourceColumnDefinition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def priority(self):
"""Gets the priority of this V1CustomResourceColumnDefinition. # noqa: E501
priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0. # noqa: E501
:return: The priority of this V1CustomResourceColumnDefinition. # noqa: E501
:rtype: int
"""
return self._priority
@priority.setter
def priority(self, priority):
"""Sets the priority of this V1CustomResourceColumnDefinition.
priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0. # noqa: E501
:param priority: The priority of this V1CustomResourceColumnDefinition. # noqa: E501
:type: int
"""
self._priority = priority
@property
def type(self):
"""Gets the type of this V1CustomResourceColumnDefinition. # noqa: E501
type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. # noqa: E501
:return: The type of this V1CustomResourceColumnDefinition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1CustomResourceColumnDefinition.
type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. # noqa: E501
:param type: The type of this V1CustomResourceColumnDefinition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CustomResourceColumnDefinition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CustomResourceColumnDefinition):
return True
return self.to_dict() != other.to_dict()
|
V1CustomResourceColumnDefinition
|
python
|
huggingface__transformers
|
src/transformers/models/dbrx/modeling_dbrx.py
|
{
"start": 12962,
"end": 14681
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.mlp = DbrxExpertGLU(config)
self.hidden_size = config.hidden_size
self.ffn_hidden_size = config.ffn_hidden_size
self.num_experts = config.moe_num_experts
def forward(
self,
hidden_states: torch.Tensor,
top_k_index: torch.Tensor,
top_k_weights: torch.Tensor,
) -> torch.Tensor:
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.ffn_hidden_size)
next_states = torch.zeros_like(hidden_states, dtype=hidden_states.dtype, device=hidden_states.device)
with torch.no_grad():
expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts)
expert_mask = expert_mask.permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
split_expert_shape = (-1, self.ffn_hidden_size, self.hidden_size)
for expert_idx in expert_hit:
expert_idx = expert_idx[0]
with torch.no_grad():
idx, token_idx = torch.where(expert_mask[expert_idx])
v1 = self.mlp.v1.view(split_expert_shape)[expert_idx]
w1 = self.mlp.w1.view(split_expert_shape)[expert_idx]
w2 = self.mlp.w2.view(split_expert_shape)[expert_idx]
states = self.mlp(hidden_states[token_idx], w1, v1, w2)
states = states.view(-1, self.ffn_hidden_size) * top_k_weights[token_idx, idx, None]
next_states.index_add_(0, token_idx, states)
next_states = next_states.view(batch_size, -1, self.ffn_hidden_size)
return next_states
|
DbrxExperts
|
python
|
astropy__astropy
|
astropy/io/ascii/daophot.py
|
{
"start": 457,
"end": 7283
}
|
class ____(core.BaseHeader):
"""
Read the header from a file produced by the IRAF DAOphot routine.
"""
comment = r"\s*#K"
# Regex for extracting the format strings
re_format = re.compile(r"%-?(\d+)\.?\d?[sdfg]")
re_header_keyword = re.compile(
r"[#]K\s+ (?P<name> \w+)\s* = (?P<stuff> .+) $", re.VERBOSE
)
aperture_values = ()
def __init__(self):
core.BaseHeader.__init__(self)
def parse_col_defs(self, grouped_lines_dict):
"""Parse a series of column definition lines.
Examples
--------
When parsing, there may be several such blocks in a single file
(where continuation characters have already been stripped).
#N ID XCENTER YCENTER MAG MERR MSKY NITER
#U ## pixels pixels magnitudes magnitudes counts ##
#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
"""
line_ids = ("#N", "#U", "#F")
coldef_dict = defaultdict(list)
# Function to strip identifier lines
stripper = lambda s: s[2:].strip(" \\")
for defblock in zip(*map(grouped_lines_dict.get, line_ids)):
for key, line in zip(line_ids, map(stripper, defblock)):
coldef_dict[key].append(line.split())
# Save the original columns so we can use it later to reconstruct the
# original header for writing
if self.data.is_multiline:
# Database contains multi-aperture data.
# Autogen column names, units, formats from last row of column headers
last_names, last_units, last_formats = list(
zip(*map(coldef_dict.get, line_ids))
)[-1]
N_multiline = len(self.data.first_block)
for i in np.arange(1, N_multiline + 1).astype("U2"):
# extra column names eg. RAPERT2, SUM2 etc...
extended_names = list(map("".join, zip(last_names, itt.repeat(i))))
if i == "1": # Enumerate the names starting at 1
coldef_dict["#N"][-1] = extended_names
else:
coldef_dict["#N"].append(extended_names)
coldef_dict["#U"].append(last_units)
coldef_dict["#F"].append(last_formats)
# Get column widths from column format specifiers
get_col_width = lambda s: int(self.re_format.search(s).groups()[0])
col_widths = [
[get_col_width(f) for f in formats] for formats in coldef_dict["#F"]
]
# original data format might be shorter than 80 characters and filled with spaces
row_widths = np.fromiter(map(sum, col_widths), int)
row_short = Daophot.table_width - row_widths
# fix last column widths
for w, r in zip(col_widths, row_short):
w[-1] += r
self.col_widths = col_widths
# merge the multi-line header data into single line data
return {k: list(itt.chain(*v)) for (k, v) in coldef_dict.items()}
def update_meta(self, lines, meta):
"""
Extract table-level keywords for DAOphot table. These are indicated by
a leading '#K ' prefix.
"""
table_meta = meta["table"]
# self.lines = self.get_header_lines(lines)
Nlines = len(self.lines)
if Nlines > 0:
# Group the header lines according to their line identifiers (#K,
# #N, #U, #F or just # (spacer line)) function that grabs the line
# identifier
get_line_id = lambda s: s.split(None, 1)[0]
# Group lines by the line identifier ('#N', '#U', '#F', '#K') and
# capture line index
gid, groups = zip(*groupmore(get_line_id, self.lines, range(Nlines)))
# Groups of lines and their indices
grouped_lines, gix = zip(*groups)
# Dict of line groups keyed by line identifiers
grouped_lines_dict = dict(zip(gid, grouped_lines))
# Update the table_meta keywords if necessary
if "#K" in grouped_lines_dict:
keywords = dict(
map(self.extract_keyword_line, grouped_lines_dict["#K"])
)
table_meta["keywords"] = keywords
coldef_dict = self.parse_col_defs(grouped_lines_dict)
line_ids = ("#N", "#U", "#F")
for name, unit, fmt in zip(*map(coldef_dict.get, line_ids)):
meta["cols"][name] = {"unit": unit, "format": fmt}
self.meta = meta
self.names = coldef_dict["#N"]
def extract_keyword_line(self, line):
"""
Extract info from a header keyword line (#K).
"""
m = self.re_header_keyword.match(line)
if m:
vals = m.group("stuff").strip().rsplit(None, 2)
keyword_dict = {
"units": vals[-2],
"format": vals[-1],
"value": (vals[0] if len(vals) > 2 else ""),
}
return m.group("name"), keyword_dict
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a DAOphot
header. The DAOphot header is specialized so that we just copy the entire BaseHeader
get_cols routine and modify as needed.
Parameters
----------
lines : list
List of table lines
Returns
-------
col : list
List of table Columns
"""
if not self.names:
raise core.InconsistentTableError("No column names found in DAOphot header")
# Create the list of io.ascii column objects
self._set_cols_from_names()
# Set unit and format as needed.
coldefs = self.meta["cols"]
for col in self.cols:
unit, fmt = map(coldefs[col.name].get, ("unit", "format"))
if unit != "##":
col.unit = unit
if fmt != "##":
col.format = fmt
# Set column start and end positions.
col_width = list(itt.chain.from_iterable(self.col_widths))
ends = np.cumsum(col_width)
starts = ends - col_width
for i, col in enumerate(self.cols):
col.start, col.end = starts[i], ends[i]
col.span = col.end - col.start
if hasattr(col, "format"):
if any(x in col.format for x in "fg"):
col.type = core.FloatType
elif "d" in col.format:
col.type = core.IntType
elif "s" in col.format:
col.type = core.StrType
# INDEF is the missing value marker
self.data.fill_values.append(("INDEF", "0"))
|
DaophotHeader
|
python
|
facebook__pyre-check
|
client/tests/filesystem_test.py
|
{
"start": 260,
"end": 597
}
|
class ____(unittest.TestCase):
def test_expand_relative_path__globs_are_unchanged(self) -> None:
self.assertEqual(expand_relative_path("foo", "bar/*/baz"), "foo/bar/*/baz")
self.assertEqual(
expand_relative_path("dontcare", "/absolute/path/*/foo"),
"/absolute/path/*/foo",
)
|
FilesystemTest
|
python
|
numba__numba
|
numba/tests/test_dyn_array.py
|
{
"start": 45129,
"end": 49348
}
|
class ____(MemoryLeakMixin, TestCase):
"""
Tests for np.concatenate().
"""
def _3d_arrays(self):
a = np.arange(24).reshape((4, 3, 2))
b = a + 10
c = (b + 10).copy(order='F')
d = (c + 10)[::-1]
e = (d + 10)[...,::-1]
return a, b, c, d, e
@contextlib.contextmanager
def assert_invalid_sizes_over_dim(self, axis):
with self.assertRaises(ValueError) as raises:
yield
self.assertIn("input sizes over dimension %d do not match" % axis,
str(raises.exception))
def test_3d(self):
pyfunc = np_concatenate2
cfunc = nrtjit(pyfunc)
def check(a, b, c, axis):
for ax in (axis, -3 + axis):
expected = pyfunc(a, b, c, axis=ax)
got = cfunc(a, b, c, axis=ax)
self.assertPreciseEqual(got, expected)
def check_all_axes(a, b, c):
for axis in range(3):
check(a, b, c, axis)
a, b, c, d, e = self._3d_arrays()
# Inputs with equal sizes
# C, C, C
check_all_axes(a, b, b)
# C, C, F
check_all_axes(a, b, c)
# F, F, F
check_all_axes(a.T, b.T, a.T)
# F, F, C
check_all_axes(a.T, b.T, c.T)
# F, F, A
check_all_axes(a.T, b.T, d.T)
# A, A, A
# (note Numpy may select the layout differently for other inputs)
check_all_axes(d.T, e.T, d.T)
# Inputs with compatible sizes
check(a[1:], b, c[::-1], axis=0)
check(a, b[:,1:], c, axis=1)
check(a, b, c[:,:,1:], axis=2)
# Different but compatible dtypes
check_all_axes(a, b.astype(np.float64), b)
# Exceptions leak references
self.disable_leak_check()
# Incompatible sizes
for axis in (1, 2, -2, -1):
with self.assert_invalid_sizes_over_dim(0):
cfunc(a[1:], b, b, axis)
for axis in (0, 2, -3, -1):
with self.assert_invalid_sizes_over_dim(1):
cfunc(a, b[:,1:], b, axis)
def test_3d_no_axis(self):
pyfunc = np_concatenate1
cfunc = nrtjit(pyfunc)
def check(a, b, c):
expected = pyfunc(a, b, c)
got = cfunc(a, b, c)
self.assertPreciseEqual(got, expected)
a, b, c, d, e = self._3d_arrays()
# Inputs with equal sizes
# C, C, C
check(a, b, b)
# C, C, F
check(a, b, c)
# F, F, F
check(a.T, b.T, a.T)
# F, F, C
check(a.T, b.T, c.T)
# F, F, A
check(a.T, b.T, d.T)
# A, A, A
# (note Numpy may select the layout differently for other inputs)
check(d.T, e.T, d.T)
# Inputs with compatible sizes
check(a[1:], b, c[::-1])
# Exceptions leak references
self.disable_leak_check()
# Incompatible sizes
with self.assert_invalid_sizes_over_dim(1):
cfunc(a, b[:,1:], b)
def test_typing_errors(self):
pyfunc = np_concatenate1
cfunc = nrtjit(pyfunc)
a = np.arange(15)
b = a.reshape((3, 5))
c = a.astype(np.dtype([('x', np.int8)]))
d = np.array(42)
# Different dimensionalities
with self.assertTypingError() as raises:
cfunc(a, b, b)
self.assertIn("all the input arrays must have same number of dimensions",
str(raises.exception))
# Incompatible dtypes
with self.assertTypingError() as raises:
cfunc(a, c, c)
self.assertIn("input arrays must have compatible dtypes",
str(raises.exception))
# 0-d arrays
with self.assertTypingError() as raises:
cfunc(d, d, d)
self.assertIn("zero-dimensional arrays cannot be concatenated",
str(raises.exception))
# non-tuple input
with self.assertTypingError() as raises:
cfunc(c, 1, c)
self.assertIn('expecting a non-empty tuple of arrays', str(raises.exception))
@unittest.skipUnless(hasattr(np, "stack"), "this Numpy doesn't have np.stack()")
|
TestNpConcatenate
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scatterpolargl/_selected.py
|
{
"start": 233,
"end": 3413
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolargl"
_path_str = "scatterpolargl.selected"
_valid_props = {"marker", "textfont"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolargl.selected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.scatterpolargl.selected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolargl.selected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.scatterpolargl.selected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scatterpolargl.selected.Ma
rker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterpolargl.selected.Te
xtfont` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolargl.Selected`
marker
:class:`plotly.graph_objects.scatterpolargl.selected.Ma
rker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterpolargl.selected.Te
xtfont` instance or dict with compatible properties
Returns
-------
Selected
"""
super().__init__("selected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolargl.Selected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.Selected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._set_property("textfont", arg, textfont)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Selected
|
python
|
realpython__materials
|
python-getter-setter/person2.py
|
{
"start": 0,
"end": 205
}
|
class ____:
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
|
Person
|
python
|
pypa__packaging
|
src/packaging/version.py
|
{
"start": 1522,
"end": 4763
}
|
class ____:
__slots__ = ()
@property
def _key(self) -> tuple[Any, ...]:
raise NotImplementedError # pragma: no cover
def __hash__(self) -> int:
return hash(self._key)
# Please keep the duplicated `isinstance` check
# in the six comparisons hereunder
# unless you find a way to avoid adding overhead function calls.
def __lt__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key < other._key
def __le__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key <= other._key
def __eq__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key == other._key
def __ge__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key >= other._key
def __gt__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key > other._key
def __ne__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key != other._key
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
# Note that ++ doesn't behave identically on CPython and PyPy, so not using it here
_VERSION_PATTERN = r"""
v?+ # optional leading v
(?:
(?:(?P<epoch>[0-9]+)!)?+ # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*+) # release segment
(?P<pre> # pre-release
[._-]?+
(?P<pre_l>alpha|a|beta|b|preview|pre|c|rc)
[._-]?+
(?P<pre_n>[0-9]+)?
)?+
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[._-]?
(?P<post_l>post|rev|r)
[._-]?
(?P<post_n2>[0-9]+)?
)
)?+
(?P<dev> # dev release
[._-]?+
(?P<dev_l>dev)
[._-]?+
(?P<dev_n>[0-9]+)?
)?+
)
(?:\+
(?P<local> # local version
[a-z0-9]+
(?:[._-][a-z0-9]+)*+
)
)?+
"""
_VERSION_PATTERN_OLD = _VERSION_PATTERN.replace("*+", "*").replace("?+", "?")
VERSION_PATTERN = (
_VERSION_PATTERN_OLD if sys.version_info < (3, 11) else _VERSION_PATTERN
)
"""
A string containing the regular expression used to match a valid version.
The pattern is not anchored at either end, and is intended for embedding in larger
expressions (for example, matching a version number as part of a file name). The
regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
flags set.
:meta hide-value:
"""
|
_BaseVersion
|
python
|
spack__spack
|
lib/spack/spack/vendor/jinja2/nodes.py
|
{
"start": 13114,
"end": 13835
}
|
class ____(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ("template", "names", "with_context")
template: "Expr"
names: t.List[t.Union[str, t.Tuple[str, str]]]
with_context: bool
|
FromImport
|
python
|
ray-project__ray
|
python/ray/air/_internal/device_manager/torch_device_manager.py
|
{
"start": 67,
"end": 1138
}
|
class ____(ABC):
"""This class contains the function needed for supporting
an acclerator family in Ray AI Library.
"""
def is_available(self) -> bool:
"""Validate if device is available."""
...
def get_devices(self) -> List[torch.device]:
"""Gets the correct torch device configured for this process"""
...
def set_device(self, device: Union[torch.device, int, str, None]):
"""Set the correct device for this process"""
...
def supports_stream(self) -> bool:
"""Validate if the device type support create a stream"""
...
def create_stream(self, device: torch.device):
"""Create a device stream"""
...
def get_stream_context(self, stream):
"""Get a stream context of device. If device didn't support stream,
this should return a empty context manager instead of None.
"""
...
def get_current_stream(self):
"""Get current stream on accelerators like torch.cuda.current_stream"""
...
|
TorchDeviceManager
|
python
|
pytorch__pytorch
|
test/distributed/elastic/rendezvous/api_test.py
|
{
"start": 7155,
"end": 7715
}
|
class ____(RendezvousHandler):
def __init__(self, params: RendezvousParameters) -> None:
self.params = params
def get_backend(self) -> str:
return "dummy_backend"
def next_rendezvous(self) -> RendezvousInfo:
raise NotImplementedError
def is_closed(self) -> bool:
return False
def set_closed(self) -> None:
pass
def num_nodes_waiting(self) -> int:
return 0
def get_run_id(self) -> str:
return ""
def shutdown(self) -> bool:
return False
|
_DummyRendezvousHandler
|
python
|
mlflow__mlflow
|
mlflow/genai/scorers/registry.py
|
{
"start": 1180,
"end": 3624
}
|
class ____(metaclass=ABCMeta):
"""
Abstract class defining the interface for scorer store implementations.
This class defines the API interface for scorer operations that can be implemented
by different backend stores (e.g., MLflow tracking store, Databricks API).
"""
@abstractmethod
def register_scorer(self, experiment_id: str | None, scorer: Scorer) -> int | None:
"""
Register a scorer for an experiment.
Args:
experiment_id: The experiment ID.
scorer: The scorer object.
Returns:
The registered scorer version. If versioning is not supported, return None.
"""
@abstractmethod
def list_scorers(self, experiment_id) -> list["Scorer"]:
"""
List all scorers for an experiment.
Args:
experiment_id: The experiment ID.
Returns:
List of mlflow.genai.scorers.Scorer objects (latest version for each scorer name).
"""
@abstractmethod
def get_scorer(self, experiment_id, name, version=None) -> "Scorer":
"""
Get a specific scorer for an experiment.
Args:
experiment_id: The experiment ID.
name: The scorer name.
version: The scorer version. If None, returns the scorer with maximum version.
Returns:
A list of tuple, each tuple contains `mlflow.genai.scorers.Scorer` object.
Raises:
mlflow.MlflowException: If scorer is not found.
"""
@abstractmethod
def list_scorer_versions(self, experiment_id, name) -> list[tuple["Scorer", int]]:
"""
List all versions of a specific scorer for an experiment.
Args:
experiment_id: The experiment ID.
name: The scorer name.
Returns:
A list of tuple, each tuple contains `mlflow.genai.scorers.Scorer` object
and the version number.
Raises:
mlflow.MlflowException: If scorer is not found.
"""
@abstractmethod
def delete_scorer(self, experiment_id, name, version):
"""
Delete a scorer by name and optional version.
Args:
experiment_id: The experiment ID.
name: The scorer name.
version: The scorer version to delete.
Raises:
mlflow.MlflowException: If scorer is not found.
"""
|
AbstractScorerStore
|
python
|
Textualize__textual
|
src/textual/containers.py
|
{
"start": 5382,
"end": 5658
}
|
class ____(ScrollableContainer):
"""A container with horizontal layout and an automatic scrollbar on the X axis."""
DEFAULT_CSS = """
HorizontalScroll {
layout: horizontal;
overflow-y: hidden;
overflow-x: auto;
}
"""
|
HorizontalScroll
|
python
|
PrefectHQ__prefect
|
tests/cli/test_flow_run.py
|
{
"start": 15430,
"end": 27316
}
|
class ____:
LOGS_DEFAULT_PAGE_SIZE = 200
async def test_when_num_logs_smaller_than_page_size_then_no_pagination(
self, flow_run_factory
):
# Given
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE - 1)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
],
expected_code=0,
expected_output_contains=[
f"Flow run '{flow_run.name}' - Log {i} from flow_run {flow_run.id}."
for i in range(self.LOGS_DEFAULT_PAGE_SIZE - 1)
],
)
async def test_when_num_logs_greater_than_page_size_then_pagination(
self, flow_run_factory
):
# Given
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE + 1)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
],
expected_code=0,
expected_output_contains=[
f"Flow run '{flow_run.name}' - Log {i} from flow_run {flow_run.id}."
for i in range(self.LOGS_DEFAULT_PAGE_SIZE + 1)
],
)
async def test_when_flow_run_not_found_then_exit_with_error(self, flow_run_factory):
# Given
bad_id = str(uuid4())
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
bad_id,
],
expected_code=1,
expected_output_contains=f"Flow run '{bad_id}' not found!\n",
)
async def test_when_num_logs_smaller_than_page_size_with_head_then_no_pagination(
self, flow_run_factory
):
# Given
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE + 1)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
"--head",
"--num-logs",
"10",
],
expected_code=0,
expected_output_contains=[
f"Flow run '{flow_run.name}' - Log {i} from flow_run {flow_run.id}."
for i in range(10)
],
expected_line_count=10,
)
async def test_when_num_logs_greater_than_page_size_with_head_then_pagination(
self, flow_run_factory
):
# Given
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE + 1)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
"--head",
"--num-logs",
self.LOGS_DEFAULT_PAGE_SIZE + 1,
],
expected_code=0,
expected_output_contains=[
f"Flow run '{flow_run.name}' - Log {i} from flow_run {flow_run.id}."
for i in range(self.LOGS_DEFAULT_PAGE_SIZE + 1)
],
expected_line_count=self.LOGS_DEFAULT_PAGE_SIZE + 1,
)
async def test_when_num_logs_greater_than_page_size_with_head_outputs_correct_num_logs(
self, flow_run_factory
):
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE + 50)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
"--head",
"--num-logs",
self.LOGS_DEFAULT_PAGE_SIZE + 50,
],
expected_code=0,
expected_output_contains=[
f"Flow run '{flow_run.name}' - Log {i} from flow_run {flow_run.id}."
for i in range(self.LOGS_DEFAULT_PAGE_SIZE + 50)
],
expected_line_count=self.LOGS_DEFAULT_PAGE_SIZE + 50,
)
async def test_default_head_returns_default_num_logs(self, flow_run_factory):
# Given
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE + 1)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
"--head",
],
expected_code=0,
expected_output_contains=[
f"Flow run '{flow_run.name}' - Log {i} from flow_run {flow_run.id}."
for i in range(LOGS_WITH_LIMIT_FLAG_DEFAULT_NUM_LOGS)
],
expected_line_count=LOGS_WITH_LIMIT_FLAG_DEFAULT_NUM_LOGS,
)
async def test_h_and_n_shortcuts_for_head_and_num_logs(self, flow_run_factory):
# Given
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE + 1)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
"-h",
"-n",
"10",
],
expected_code=0,
expected_output_contains=[
f"Flow run '{flow_run.name}' - Log {i} from flow_run {flow_run.id}."
for i in range(10)
],
expected_line_count=10,
)
async def test_num_logs_passed_standalone_returns_num_logs(self, flow_run_factory):
# Given
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE + 1)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
"--num-logs",
"10",
],
expected_code=0,
expected_output_contains=[
f"Flow run '{flow_run.name}' - Log {i} from flow_run {flow_run.id}."
for i in range(10)
],
expected_line_count=10,
)
@pytest.mark.skip(reason="we need to disable colors for this test to pass")
async def test_when_num_logs_is_smaller_than_one_then_exit_with_error(
self, flow_run_factory
):
# Given
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE + 1)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
"--num-logs",
"0",
],
expected_code=2,
expected_output_contains=(
"Invalid value for '--num-logs' / '-n': 0 is not in the range x>=1."
),
)
async def test_when_num_logs_passed_with_reverse_param_and_num_logs(
self, flow_run_factory
):
# Given
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE + 1)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
"--num-logs",
"10",
"--reverse",
],
expected_code=0,
expected_output_contains=[
f"Flow run '{flow_run.name}' - Log {i} from flow_run {flow_run.id}."
for i in range(
self.LOGS_DEFAULT_PAGE_SIZE, self.LOGS_DEFAULT_PAGE_SIZE - 10, -1
)
],
expected_line_count=10,
)
async def test_passing_head_and_tail_raises(self, flow_run_factory):
# Given
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE + 1)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
"--tail",
"--num-logs",
"10",
"--head",
],
expected_code=1,
expected_output_contains=(
"Please provide either a `head` or `tail` option but not both."
),
)
async def test_default_tail_returns_default_num_logs(self, flow_run_factory):
# Given
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE + 1)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=["flow-run", "logs", str(flow_run.id), "-t"],
expected_code=0,
expected_output_contains=[
f"Flow run '{flow_run.name}' - Log {i} from flow_run {flow_run.id}."
for i in range(
self.LOGS_DEFAULT_PAGE_SIZE - 9, self.LOGS_DEFAULT_PAGE_SIZE
)
],
expected_line_count=20,
)
async def test_reverse_tail_with_num_logs(self, flow_run_factory):
# Given
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE + 1)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
"--tail",
"--num-logs",
"10",
"--reverse",
],
expected_code=0,
expected_output_contains=[
f"Flow run '{flow_run.name}' - Log {i} from flow_run {flow_run.id}."
for i in range(
self.LOGS_DEFAULT_PAGE_SIZE, self.LOGS_DEFAULT_PAGE_SIZE - 10, -1
)
],
expected_line_count=10,
)
async def test_reverse_tail_returns_default_num_logs(self, flow_run_factory):
# Given
flow_run = await flow_run_factory(num_logs=self.LOGS_DEFAULT_PAGE_SIZE + 1)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
"--tail",
"--reverse",
],
expected_code=0,
expected_output_contains=[
f"Flow run '{flow_run.name}' - Log {i} from flow_run {flow_run.id}."
for i in range(
self.LOGS_DEFAULT_PAGE_SIZE, self.LOGS_DEFAULT_PAGE_SIZE - 20, -1
)
],
expected_line_count=20,
)
async def test_when_num_logs_greater_than_page_size_with_tail_outputs_correct_num_logs(
self, flow_run_factory
):
# Given
num_logs = 300
flow_run = await flow_run_factory(num_logs=num_logs)
# When/Then
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow-run",
"logs",
str(flow_run.id),
"--tail",
"--num-logs",
"251",
],
expected_code=0,
expected_output_contains=[
f"Flow run '{flow_run.name}' - Log {i} from flow_run {flow_run.id}."
for i in range(num_logs - 250, num_logs)
],
expected_line_count=251,
)
|
TestFlowRunLogs
|
python
|
django__django
|
tests/auth_tests/test_auth_backends.py
|
{
"start": 34323,
"end": 35018
}
|
class ____:
"""
Always raises PermissionDenied in `authenticate`, `has_perm` and
`has_module_perms`.
"""
def authenticate(self, request, username=None, password=None):
raise PermissionDenied
async def aauthenticate(self, request, username=None, password=None):
raise PermissionDenied
def has_perm(self, user_obj, perm, obj=None):
raise PermissionDenied
async def ahas_perm(self, user_obj, perm, obj=None):
raise PermissionDenied
def has_module_perms(self, user_obj, app_label):
raise PermissionDenied
async def ahas_module_perms(self, user_obj, app_label):
raise PermissionDenied
|
PermissionDeniedBackend
|
python
|
ansible__ansible
|
lib/ansible/plugins/inventory/generator.py
|
{
"start": 3933,
"end": 6542
}
|
class ____(BaseInventoryPlugin):
""" constructs groups and vars using Jinja2 template expressions """
NAME = 'generator'
# implicit trust behavior is already added by the YAML parser invoked by the loader
def __init__(self):
super(InventoryModule, self).__init__()
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
file_name, ext = os.path.splitext(path)
if not ext or ext in ['.config'] + C.YAML_FILENAME_EXTENSIONS:
valid = True
return valid
def template(self, pattern, variables):
# Allow pass-through of data structures for templating later (if applicable).
# This limitation was part of the original plugin implementation and was updated to maintain feature parity with the new templating API.
if not isinstance(pattern, str):
return pattern
return self.templar.copy_with_new_env(available_variables=variables).template(pattern)
def add_parents(self, inventory, child, parents, template_vars):
for parent in parents:
groupname = self.template(parent.get('name'), template_vars)
if not groupname:
raise AnsibleParserError(f"Element {child} has a parent with no name.")
if groupname not in inventory.groups:
inventory.add_group(groupname)
group = inventory.groups[groupname]
for (k, v) in parent.get('vars', {}).items():
group.set_variable(k, self.template(v, template_vars))
inventory.add_child(groupname, child)
self.add_parents(inventory, groupname, parent.get('parents', []), template_vars)
def parse(self, inventory, loader, path, cache=False):
""" parses the inventory file """
super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
config = self._read_config_data(path)
if self.get_option('use_extra_vars'):
extra_vars = load_extra_vars(loader)
else:
extra_vars = {}
template_inputs = product(*config['layers'].values())
for item in template_inputs:
template_vars = dict()
template_vars.update(extra_vars)
for i, key in enumerate(config['layers'].keys()):
template_vars[key] = item[i]
host = self.template(config['hosts']['name'], template_vars)
inventory.add_host(host)
self.add_parents(inventory, host, config['hosts'].get('parents', []), template_vars)
|
InventoryModule
|
python
|
pypa__pip
|
src/pip/_vendor/platformdirs/api.py
|
{
"start": 250,
"end": 9281
}
|
class ____(ABC): # noqa: PLR0904
"""Abstract base class for platform directories."""
def __init__( # noqa: PLR0913, PLR0917
self,
appname: str | None = None,
appauthor: str | Literal[False] | None = None,
version: str | None = None,
roaming: bool = False, # noqa: FBT001, FBT002
multipath: bool = False, # noqa: FBT001, FBT002
opinion: bool = True, # noqa: FBT001, FBT002
ensure_exists: bool = False, # noqa: FBT001, FBT002
) -> None:
"""
Create a new platform directory.
:param appname: See `appname`.
:param appauthor: See `appauthor`.
:param version: See `version`.
:param roaming: See `roaming`.
:param multipath: See `multipath`.
:param opinion: See `opinion`.
:param ensure_exists: See `ensure_exists`.
"""
self.appname = appname #: The name of application.
self.appauthor = appauthor
"""
The name of the app author or distributing body for this application.
Typically, it is the owning company name. Defaults to `appname`. You may pass ``False`` to disable it.
"""
self.version = version
"""
An optional version path element to append to the path.
You might want to use this if you want multiple versions of your app to be able to run independently. If used,
this would typically be ``<major>.<minor>``.
"""
self.roaming = roaming
"""
Whether to use the roaming appdata directory on Windows.
That means that for users on a Windows network setup for roaming profiles, this user data will be synced on
login (see
`here <https://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>`_).
"""
self.multipath = multipath
"""
An optional parameter which indicates that the entire list of data dirs should be returned.
By default, the first item would only be returned.
"""
self.opinion = opinion #: A flag to indicating to use opinionated values.
self.ensure_exists = ensure_exists
"""
Optionally create the directory (and any missing parents) upon access if it does not exist.
By default, no directories are created.
"""
def _append_app_name_and_version(self, *base: str) -> str:
params = list(base[1:])
if self.appname:
params.append(self.appname)
if self.version:
params.append(self.version)
path = os.path.join(base[0], *params) # noqa: PTH118
self._optionally_create_directory(path)
return path
def _optionally_create_directory(self, path: str) -> None:
if self.ensure_exists:
Path(path).mkdir(parents=True, exist_ok=True)
def _first_item_as_path_if_multipath(self, directory: str) -> Path:
if self.multipath:
# If multipath is True, the first path is returned.
directory = directory.partition(os.pathsep)[0]
return Path(directory)
@property
@abstractmethod
def user_data_dir(self) -> str:
""":return: data directory tied to the user"""
@property
@abstractmethod
def site_data_dir(self) -> str:
""":return: data directory shared by users"""
@property
@abstractmethod
def user_config_dir(self) -> str:
""":return: config directory tied to the user"""
@property
@abstractmethod
def site_config_dir(self) -> str:
""":return: config directory shared by the users"""
@property
@abstractmethod
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user"""
@property
@abstractmethod
def site_cache_dir(self) -> str:
""":return: cache directory shared by users"""
@property
@abstractmethod
def user_state_dir(self) -> str:
""":return: state directory tied to the user"""
@property
@abstractmethod
def user_log_dir(self) -> str:
""":return: log directory tied to the user"""
@property
@abstractmethod
def user_documents_dir(self) -> str:
""":return: documents directory tied to the user"""
@property
@abstractmethod
def user_downloads_dir(self) -> str:
""":return: downloads directory tied to the user"""
@property
@abstractmethod
def user_pictures_dir(self) -> str:
""":return: pictures directory tied to the user"""
@property
@abstractmethod
def user_videos_dir(self) -> str:
""":return: videos directory tied to the user"""
@property
@abstractmethod
def user_music_dir(self) -> str:
""":return: music directory tied to the user"""
@property
@abstractmethod
def user_desktop_dir(self) -> str:
""":return: desktop directory tied to the user"""
@property
@abstractmethod
def user_runtime_dir(self) -> str:
""":return: runtime directory tied to the user"""
@property
@abstractmethod
def site_runtime_dir(self) -> str:
""":return: runtime directory shared by users"""
@property
def user_data_path(self) -> Path:
""":return: data path tied to the user"""
return Path(self.user_data_dir)
@property
def site_data_path(self) -> Path:
""":return: data path shared by users"""
return Path(self.site_data_dir)
@property
def user_config_path(self) -> Path:
""":return: config path tied to the user"""
return Path(self.user_config_dir)
@property
def site_config_path(self) -> Path:
""":return: config path shared by the users"""
return Path(self.site_config_dir)
@property
def user_cache_path(self) -> Path:
""":return: cache path tied to the user"""
return Path(self.user_cache_dir)
@property
def site_cache_path(self) -> Path:
""":return: cache path shared by users"""
return Path(self.site_cache_dir)
@property
def user_state_path(self) -> Path:
""":return: state path tied to the user"""
return Path(self.user_state_dir)
@property
def user_log_path(self) -> Path:
""":return: log path tied to the user"""
return Path(self.user_log_dir)
@property
def user_documents_path(self) -> Path:
""":return: documents a path tied to the user"""
return Path(self.user_documents_dir)
@property
def user_downloads_path(self) -> Path:
""":return: downloads path tied to the user"""
return Path(self.user_downloads_dir)
@property
def user_pictures_path(self) -> Path:
""":return: pictures path tied to the user"""
return Path(self.user_pictures_dir)
@property
def user_videos_path(self) -> Path:
""":return: videos path tied to the user"""
return Path(self.user_videos_dir)
@property
def user_music_path(self) -> Path:
""":return: music path tied to the user"""
return Path(self.user_music_dir)
@property
def user_desktop_path(self) -> Path:
""":return: desktop path tied to the user"""
return Path(self.user_desktop_dir)
@property
def user_runtime_path(self) -> Path:
""":return: runtime path tied to the user"""
return Path(self.user_runtime_dir)
@property
def site_runtime_path(self) -> Path:
""":return: runtime path shared by users"""
return Path(self.site_runtime_dir)
def iter_config_dirs(self) -> Iterator[str]:
""":yield: all user and site configuration directories."""
yield self.user_config_dir
yield self.site_config_dir
def iter_data_dirs(self) -> Iterator[str]:
""":yield: all user and site data directories."""
yield self.user_data_dir
yield self.site_data_dir
def iter_cache_dirs(self) -> Iterator[str]:
""":yield: all user and site cache directories."""
yield self.user_cache_dir
yield self.site_cache_dir
def iter_runtime_dirs(self) -> Iterator[str]:
""":yield: all user and site runtime directories."""
yield self.user_runtime_dir
yield self.site_runtime_dir
def iter_config_paths(self) -> Iterator[Path]:
""":yield: all user and site configuration paths."""
for path in self.iter_config_dirs():
yield Path(path)
def iter_data_paths(self) -> Iterator[Path]:
""":yield: all user and site data paths."""
for path in self.iter_data_dirs():
yield Path(path)
def iter_cache_paths(self) -> Iterator[Path]:
""":yield: all user and site cache paths."""
for path in self.iter_cache_dirs():
yield Path(path)
def iter_runtime_paths(self) -> Iterator[Path]:
""":yield: all user and site runtime paths."""
for path in self.iter_runtime_dirs():
yield Path(path)
|
PlatformDirsABC
|
python
|
walkccc__LeetCode
|
solutions/2225. Find Players With Zero or One Losses/2225.py
|
{
"start": 0,
"end": 444
}
|
class ____:
def findWinners(self, matches: list[list[int]]) -> list[list[int]]:
ans = [[] for _ in range(2)]
lossesCount = collections.Counter()
for winner, loser in matches:
if winner not in lossesCount:
lossesCount[winner] = 0
lossesCount[loser] += 1
for player, nLosses in lossesCount.items():
if nLosses < 2:
ans[nLosses].append(player)
return [sorted(ans[0]), sorted(ans[1])]
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/nn_grad_test.py
|
{
"start": 6750,
"end": 7943
}
|
class ____(test.TestCase):
@test_util.run_deprecated_v1
def testEluGradGradWRTgrad_ys(self):
inputs = constant_op.constant(
[[-2, -1, 1, 3], [5, 7, 8, 9]], dtype=dtypes.float32)
dummy = constant_op.constant(
[[3, 1, -1, -2], [9, 8, 7, 6]], dtype=dtypes.float32)
elu = gen_nn_ops.elu(inputs)
elu_grad = gradients_impl.gradients(elu, inputs, grad_ys=dummy)[0]
with self.cached_session():
error = gradient_checker.compute_gradient_error(
dummy,
dummy.shape,
elu_grad,
elu_grad.shape)
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testEluGradGradWRTinputs(self):
inputs = constant_op.constant(
[[-2, -1, 1, 3], [5, 7, 8, 9]], dtype=dtypes.float32)
dummy = constant_op.constant(
[[3, 1, -1, -2], [9, 8, 7, 6]], dtype=dtypes.float32)
elu = gen_nn_ops.elu(inputs)
elu_grad = gradients_impl.gradients(elu, inputs, grad_ys=dummy)[0]
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs,
inputs.shape,
elu_grad,
elu_grad.shape)
self.assertLess(error, 1e-4)
|
EluGradOpTest
|
python
|
euske__pdfminer
|
pdfminer/cmapdb.py
|
{
"start": 2580,
"end": 2785
}
|
class ____(CMapBase):
def decode(self, code):
n = len(code)//2
if n:
return struct.unpack('>%dH' % n, code)
else:
return ()
## UnicodeMap
##
|
IdentityCMap
|
python
|
keras-team__keras
|
keras/src/ops/linalg.py
|
{
"start": 5325,
"end": 6018
}
|
class ____(Operation):
def call(self, x):
return _inv(x)
def compute_output_spec(self, x):
_assert_2d(x)
_assert_square(x)
return KerasTensor(x.shape, x.dtype)
@keras_export(["keras.ops.inv", "keras.ops.linalg.inv"])
def inv(x):
"""Computes the inverse of a square tensor.
Args:
x: Input tensor of shape `(..., M, M)`.
Returns:
A tensor of shape `(..., M, M)` representing the inverse of `x`.
"""
if any_symbolic_tensors((x,)):
return Inv().symbolic_call(x)
return _inv(x)
def _inv(x):
x = backend.convert_to_tensor(x)
_assert_2d(x)
_assert_square(x)
return backend.linalg.inv(x)
|
Inv
|
python
|
getsentry__sentry
|
src/sentry/utils/locking/backends/redis.py
|
{
"start": 328,
"end": 1575
}
|
class ____(LockBackend):
def __init__(
self,
cluster: rb.Cluster | RedisCluster[str] | StrictRedis[str],
prefix: str = "l:",
uuid: str | None = None,
):
if uuid is None:
uuid = uuid4().hex
self.prefix = prefix
self.uuid = uuid
self.cluster = cluster
def get_client(self, key: str, routing_key: int | str | None = None) -> Any:
raise NotImplementedError
def prefix_key(self, key: str) -> str:
return f"{self.prefix}{key}"
def acquire(self, key: str, duration: int, routing_key: str | None = None) -> None:
client = self.get_client(key, routing_key)
full_key = self.prefix_key(key)
if client.set(full_key, self.uuid, ex=duration, nx=True) is not True:
raise Exception(f"Could not set key: {full_key!r}")
def release(self, key: str, routing_key: str | None = None) -> None:
client = self.get_client(key, routing_key)
delete_lock((self.prefix_key(key),), (self.uuid,), client)
def locked(self, key: str, routing_key: str | None = None) -> bool:
client = self.get_client(key, routing_key)
return client.get(self.prefix_key(key)) is not None
|
BaseRedisLockBackend
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config_base.py
|
{
"start": 175,
"end": 493
}
|
class ____(BaseModel):
model_config = ConfigDict(strict=True)
def _to_dict(self) -> Dict[str, Any]:
ret = cast(dict, self.model_dump(exclude_none=True))
for key, val in ret.items():
if isinstance(val, Enum):
ret[key] = val.value
return ret
|
_ConfigCreateModel
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/match1.py
|
{
"start": 126,
"end": 3869
}
|
class ____:
x: int
match (1, ):
case a1, b1 if True:
pass
case (a2, b2):
pass
case [a3, b3]:
pass
case () | []:
pass
# This should generate an error because of a missing pattern.
case :
pass
# This should generate an error because it is an irrefutable pattern
# and is not at the end.
case (a4):
pass
case (a5,):
pass
case [a6,]:
pass
case a7 as b7, c7 as d7 if True:
pass
case (a8, b8, ) as c8 if 1 == 3:
pass
case a9, *b8:
pass
# This should generate an error because multiple star
# patterns in a sequence are not allowed.
case *a10, *b10:
pass
# This should generate an error because star
# patterns cannot be used with "as".
case *a11 as b11, b12:
pass
case value_obj.a, value_obj.b:
pass
# This should generate an error because star
# patterns can't be used with |.
case (3 as b13) | (4 as b13) | *b13:
pass
case *a14, b14:
pass
case (a20, (b20,), [c20, *d20]) as e20:
pass
case 3 | -3:
pass
case 3.2 - 2.1j | -3.2 + 2.1j | 3j:
pass
# This should generate an error because the grammar
# indicates that imaginary number must come second.
case 2j + 4:
pass
# This should generate an error because the grammar
# indicates that imaginary number must come second.
case - 2j + 4:
pass
case "hi" """hi""" | r"hi" r"""hi""":
pass
# This should generate an error because f-strings are
# not allowed.
case "hi" f"""hi""":
pass
# This should generate an error.
case {}:
pass
case {"a": 3, -3 + 4j: a30, value_obj.a: b30, **c30}:
pass
# This should generate an error because only one ** expression
# can be used.
case {"a": 3, **a31, "b": -3j, **b31}:
pass
# This should generate an error because ** cannot be used with
# wildcard "_".
case {"a": 3, **_, "b": -3}:
pass
case (3 as x) as y:
pass
case int():
pass
case Foo(1, a40, value_obj.b as b40, c40=3|-2 + 5j|"hi" as d40, y=[e40, f40] as g40,):
pass
# This should generate an error because positional arguments
# cannot appear after keyword arguments.
case Foo(1, a41, x=3, value_obj.b as b41, c41=3, y=[d41, e41] as f41):
pass
# This should generate three errors because irrefutable patterns
# must appear only as the last entry in an or pattern.
case (_ as x) | x:
pass
# This should generate an error because it's an irrefutable pattern
# but is not the last case statement.
case _:
pass
# This should generate an error because it's an irrefutable pattern
# but is not the last case statement.
case (x):
pass
case _ if value_obj:
pass
# This should generate an error because or patterns must target the
# same names.
case 3 | x:
pass
case _:
pass
def func1():
match = Foo()
# This should be treated as an expression statement, not a match statement.
match.x
def func2():
match = [3]
# This should be treated as an expression statement, not a match statement.
match[0]
match [0]:
case _:
pass
def func3():
def match(a: int): ...
# This should be treated as a call statement.
match(0)
match (0):
case _:
pass
def func4():
match 1, 2, "3":
case _:
pass
def func5(match: Any):
# This should be treated as a list, not a match statement.
match[2:8, 2:8] = 0
|
Foo
|
python
|
pydantic__pydantic
|
pydantic/v1/networks.py
|
{
"start": 14732,
"end": 15106
}
|
class ____(MultiHostDsn):
allowed_schemes = {
'postgres',
'postgresql',
'postgresql+asyncpg',
'postgresql+pg8000',
'postgresql+psycopg',
'postgresql+psycopg2',
'postgresql+psycopg2cffi',
'postgresql+py-postgresql',
'postgresql+pygresql',
}
user_required = True
__slots__ = ()
|
PostgresDsn
|
python
|
graphql-python__graphene
|
graphene/tests/issues/test_1293.py
|
{
"start": 769,
"end": 1066
}
|
class ____(graphene.ObjectType):
set_datetime = SetDatetime.Field()
def test_schema_printable_with_default_datetime_value():
schema = graphene.Schema(query=Query, mutation=Mutations)
schema_str = print_schema(schema.graphql_schema)
assert schema_str, "empty schema printed"
|
Mutations
|
python
|
pytorch__pytorch
|
torch/_functorch/_aot_autograd/descriptors.py
|
{
"start": 19445,
"end": 19899
}
|
class ____(DifferentiableAOTInput):
"""This is similar to ViewBaseAOTInput, but this happens when none of the views were differentiable, so
we weren't able to get our hands on the true original view and constructed a synthetic one instead
for the sake of autograd.
"""
base_of: AOTInput
def expr(self) -> str:
return f"__make_synthetic_base({self.base_of.expr()})"
@dataclasses.dataclass(frozen=True)
|
SyntheticBaseAOTInput
|
python
|
pytorch__pytorch
|
torch/_dynamo/symbolic_convert.py
|
{
"start": 11206,
"end": 13051
}
|
class ____:
# These are the set of string symfloats names (eg. "zf0") that we collect
# from the tensorify_python_scalars.py joint fx pass to inform us about
# which float inputs we should specialize when we restart analysis.
force_specializations: set[str] = set()
@classmethod
def specialize(cls, index: str) -> None:
cls.force_specializations.add(index)
@classmethod
def should_specialize(cls, index: str) -> bool:
return index in cls.force_specializations
@classmethod
def clear(cls) -> None:
cls.force_specializations.clear()
@classmethod
def empty(cls) -> bool:
return len(cls.force_specializations) == 0
@functools.cache
def _step_logger() -> Callable[..., None]:
return torchdynamo_logging.get_step_logger(log)
@contextlib.contextmanager
def save_and_restart_speculation_log(
tx: InstructionTranslatorBase,
) -> Generator[None, None, None]:
# When reconstructing a generator after a graph break, we advance it until
# it is fully exhausted. This process adds new entries to the speculation
# log that were not previously observed. Without temporarily clearing the
# speculation log, this could lead to a divergence error.
entries = tx.speculation_log.entries
index = tx.speculation_log.index
try:
tx.speculation_log.entries = []
tx.speculation_log.index = 0
yield
finally:
tx.speculation_log.entries = entries
tx.speculation_log.index = index
@contextlib.contextmanager
def temporarely_allow_writes_to_output_graph(
tx: InstructionTranslatorBase,
) -> Generator[None, None, None]:
try:
tmp = tx.output.should_exit
tx.output.should_exit = False
yield
finally:
tx.output.should_exit = tmp
@dataclasses.dataclass
|
TensorifyState
|
python
|
pola-rs__polars
|
py-polars/src/polars/expr/categorical.py
|
{
"start": 214,
"end": 9393
}
|
class ____:
"""Namespace for categorical related expressions."""
_accessor = "cat"
def __init__(self, expr: Expr) -> None:
self._pyexpr = expr._pyexpr
def get_categories(self) -> Expr:
"""
Get the categories stored in this data type.
Examples
--------
>>> df = pl.Series(
... "cats", ["foo", "bar", "foo", "foo", "ham"], dtype=pl.Categorical
... ).to_frame()
>>> df.select(pl.col("cats").cat.get_categories()) # doctest: +SKIP
shape: (3, 1)
┌──────┐
│ cats │
│ --- │
│ str │
╞══════╡
│ foo │
│ bar │
│ ham │
└──────┘
"""
return wrap_expr(self._pyexpr.cat_get_categories())
def len_bytes(self) -> Expr:
"""
Return the byte-length of the string representation of each value.
Returns
-------
Expr
Expression of data type :class:`UInt32`.
See Also
--------
len_chars
Notes
-----
When working with non-ASCII text, the length in bytes is not the same as the
length in characters. You may want to use :func:`len_chars` instead.
Note that :func:`len_bytes` is much more performant (_O(1)_) than
:func:`len_chars` (_O(n)_).
Examples
--------
>>> df = pl.DataFrame(
... {"a": pl.Series(["Café", "345", "東京", None], dtype=pl.Categorical)}
... )
>>> df.with_columns(
... pl.col("a").cat.len_bytes().alias("n_bytes"),
... pl.col("a").cat.len_chars().alias("n_chars"),
... )
shape: (4, 3)
┌──────┬─────────┬─────────┐
│ a ┆ n_bytes ┆ n_chars │
│ --- ┆ --- ┆ --- │
│ cat ┆ u32 ┆ u32 │
╞══════╪═════════╪═════════╡
│ Café ┆ 5 ┆ 4 │
│ 345 ┆ 3 ┆ 3 │
│ 東京 ┆ 6 ┆ 2 │
│ null ┆ null ┆ null │
└──────┴─────────┴─────────┘
"""
return wrap_expr(self._pyexpr.cat_len_bytes())
def len_chars(self) -> Expr:
"""
Return the number of characters of the string representation of each value.
Returns
-------
Expr
Expression of data type :class:`UInt32`.
See Also
--------
len_bytes
Notes
-----
When working with ASCII text, use :func:`len_bytes` instead to achieve
equivalent output with much better performance:
:func:`len_bytes` runs in _O(1)_, while :func:`len_chars` runs in (_O(n)_).
A character is defined as a `Unicode scalar value`_. A single character is
represented by a single byte when working with ASCII text, and a maximum of
4 bytes otherwise.
.. _Unicode scalar value: https://www.unicode.org/glossary/#unicode_scalar_value
Examples
--------
>>> df = pl.DataFrame(
... {"a": pl.Series(["Café", "345", "東京", None], dtype=pl.Categorical)}
... )
>>> df.with_columns(
... pl.col("a").cat.len_chars().alias("n_chars"),
... pl.col("a").cat.len_bytes().alias("n_bytes"),
... )
shape: (4, 3)
┌──────┬─────────┬─────────┐
│ a ┆ n_chars ┆ n_bytes │
│ --- ┆ --- ┆ --- │
│ cat ┆ u32 ┆ u32 │
╞══════╪═════════╪═════════╡
│ Café ┆ 4 ┆ 5 │
│ 345 ┆ 3 ┆ 3 │
│ 東京 ┆ 2 ┆ 6 │
│ null ┆ null ┆ null │
└──────┴─────────┴─────────┘
"""
return wrap_expr(self._pyexpr.cat_len_chars())
def starts_with(self, prefix: str) -> Expr:
"""
Check if string representations of values start with a substring.
Parameters
----------
prefix
Prefix substring.
See Also
--------
contains : Check if string repr contains a substring that matches a pattern.
ends_with : Check if string repr end with a substring.
Notes
-----
Whereas `str.starts_with` allows expression inputs, `cat.starts_with` requires
a literal string value.
Examples
--------
>>> df = pl.DataFrame(
... {"fruits": pl.Series(["apple", "mango", None], dtype=pl.Categorical)}
... )
>>> df.with_columns(
... pl.col("fruits").cat.starts_with("app").alias("has_prefix"),
... )
shape: (3, 2)
┌────────┬────────────┐
│ fruits ┆ has_prefix │
│ --- ┆ --- │
│ cat ┆ bool │
╞════════╪════════════╡
│ apple ┆ true │
│ mango ┆ false │
│ null ┆ null │
└────────┴────────────┘
Using `starts_with` as a filter condition:
>>> df.filter(pl.col("fruits").cat.starts_with("app"))
shape: (1, 1)
┌────────┐
│ fruits │
│ --- │
│ cat │
╞════════╡
│ apple │
└────────┘
"""
if not isinstance(prefix, str):
msg = f"'prefix' must be a string; found {qualified_type_name(prefix)!r}"
raise TypeError(msg)
return wrap_expr(self._pyexpr.cat_starts_with(prefix))
def ends_with(self, suffix: str) -> Expr:
"""
Check if string representations of values end with a substring.
Parameters
----------
suffix
Suffix substring.
See Also
--------
contains : Check if string reprs contains a substring that matches a pattern.
starts_with : Check if string reprs start with a substring.
Notes
-----
Whereas `str.ends_with` allows expression inputs, `cat.ends_with` requires a
literal string value.
Examples
--------
>>> df = pl.DataFrame(
... {"fruits": pl.Series(["apple", "mango", None], dtype=pl.Categorical)}
... )
>>> df.with_columns(pl.col("fruits").cat.ends_with("go").alias("has_suffix"))
shape: (3, 2)
┌────────┬────────────┐
│ fruits ┆ has_suffix │
│ --- ┆ --- │
│ cat ┆ bool │
╞════════╪════════════╡
│ apple ┆ false │
│ mango ┆ true │
│ null ┆ null │
└────────┴────────────┘
Using `ends_with` as a filter condition:
>>> df.filter(pl.col("fruits").cat.ends_with("go"))
shape: (1, 1)
┌────────┐
│ fruits │
│ --- │
│ cat │
╞════════╡
│ mango │
└────────┘
"""
if not isinstance(suffix, str):
msg = f"'suffix' must be a string; found {qualified_type_name(suffix)!r}"
raise TypeError(msg)
return wrap_expr(self._pyexpr.cat_ends_with(suffix))
def slice(self, offset: int, length: int | None = None) -> Expr:
"""
Extract a substring from the string representation of each value.
Parameters
----------
offset
Start index. Negative indexing is supported.
length
Length of the slice. If set to `None` (default), the slice is taken to the
end of the string.
Returns
-------
Expr
Expression of data type :class:`String`.
Notes
-----
Both the `offset` and `length` inputs are defined in terms of the number
of characters in the (UTF8) string. A character is defined as a
`Unicode scalar value`_. A single character is represented by a single byte
when working with ASCII text, and a maximum of 4 bytes otherwise.
.. _Unicode scalar value: https://www.unicode.org/glossary/#unicode_scalar_value
Examples
--------
>>> df = pl.DataFrame(
... {
... "s": pl.Series(
... ["pear", None, "papaya", "dragonfruit"],
... dtype=pl.Categorical,
... )
... }
... )
>>> df.with_columns(pl.col("s").cat.slice(-3).alias("slice"))
shape: (4, 2)
┌─────────────┬───────┐
│ s ┆ slice │
│ --- ┆ --- │
│ cat ┆ str │
╞═════════════╪═══════╡
│ pear ┆ ear │
│ null ┆ null │
│ papaya ┆ aya │
│ dragonfruit ┆ uit │
└─────────────┴───────┘
Using the optional `length` parameter
>>> df.with_columns(pl.col("s").cat.slice(4, length=3).alias("slice"))
shape: (4, 2)
┌─────────────┬───────┐
│ s ┆ slice │
│ --- ┆ --- │
│ cat ┆ str │
╞═════════════╪═══════╡
│ pear ┆ │
│ null ┆ null │
│ papaya ┆ ya │
│ dragonfruit ┆ onf │
└─────────────┴───────┘
"""
return wrap_expr(self._pyexpr.cat_slice(offset, length))
|
ExprCatNameSpace
|
python
|
django__django
|
tests/model_enums/tests.py
|
{
"start": 7698,
"end": 7883
}
|
class ____(bytes, models.Choices):
FS = b"\x1c", "File Separator"
GS = b"\x1d", "Group Separator"
RS = b"\x1e", "Record Separator"
US = b"\x1f", "Unit Separator"
|
Separator
|
python
|
spack__spack
|
lib/spack/spack/spec.py
|
{
"start": 53800,
"end": 192040
}
|
class ____:
compiler = DeprecatedCompilerSpec()
@staticmethod
def default_arch():
"""Return an anonymous spec for the default architecture"""
s = Spec()
s.architecture = ArchSpec.default_arch()
return s
def __init__(self, spec_like=None, *, external_path=None, external_modules=None):
"""Create a new Spec.
Arguments:
spec_like: if not provided, we initialize an anonymous Spec that matches any Spec;
if provided we parse this as a Spec string, or we copy the provided Spec.
Keyword arguments:
external_path: prefix, if this is a spec for an external package
external_modules: list of external modules, if this is an external package
using modules.
"""
# Copy if spec_like is a Spec.
if isinstance(spec_like, Spec):
self._dup(spec_like)
return
# init an empty spec that matches anything.
self.name: str = ""
self.versions = vn.VersionList(":")
self.variants = VariantMap(self)
self.architecture = None
self.compiler_flags = FlagMap(self)
self._dependents = _EdgeMap(store_by_child=False)
self._dependencies = _EdgeMap(store_by_child=True)
self.namespace = None
self.abstract_hash = None
# initial values for all spec hash types
for h in ht.HASHES:
setattr(self, h.attr, None)
# cache for spec's prefix, computed lazily by prefix property
self._prefix = None
# Python __hash__ is handled separately from the cached spec hashes
self._dunder_hash = None
# cache of package for this spec
self._package = None
# whether the spec is concrete or not; set at the end of concretization
self._concrete = False
# External detection details that can be set by internal Spack calls
# in the constructor.
self._external_path = external_path
self.external_modules = Spec._format_module_list(external_modules)
# This attribute is used to store custom information for external specs.
self.extra_attributes: Dict[str, Any] = {}
# This attribute holds the original build copy of the spec if it is
# deployed differently than it was built. None signals that the spec
# is deployed "as built."
# Build spec should be the actual build spec unless marked dirty.
self._build_spec = None
self.annotations = SpecAnnotations()
if isinstance(spec_like, str):
spack.spec_parser.parse_one_or_raise(spec_like, self)
elif spec_like is not None:
raise TypeError(f"Can't make spec out of {type(spec_like)}")
@staticmethod
def _format_module_list(modules):
"""Return a module list that is suitable for YAML serialization
and hash computation.
Given a module list, possibly read from a configuration file,
return an object that serializes to a consistent YAML string
before/after round-trip serialization to/from a Spec dictionary
(stored in JSON format): when read in, the module list may
contain YAML formatting that is discarded (non-essential)
when stored as a Spec dictionary; we take care in this function
to discard such formatting such that the Spec hash does not
change before/after storage in JSON.
"""
if modules:
modules = list(modules)
return modules
@property
def external_path(self):
return spack.llnl.path.path_to_os_path(self._external_path)[0]
@external_path.setter
def external_path(self, ext_path):
self._external_path = ext_path
@property
def external(self):
return bool(self.external_path) or bool(self.external_modules)
@property
def is_develop(self):
"""Return whether the Spec represents a user-developed package
in a Spack Environment (i.e. using ``spack develop``).
"""
return bool(self.variants.get("dev_path", False))
def clear_dependencies(self):
"""Trim the dependencies of this spec."""
self._dependencies.clear()
def clear_edges(self):
"""Trim the dependencies and dependents of this spec."""
self._dependencies.clear()
self._dependents.clear()
def detach(self, deptype="all"):
"""Remove any reference that dependencies have of this node.
Args:
deptype (str or tuple): dependency types tracked by the
current spec
"""
key = self.dag_hash()
# Go through the dependencies
for dep in self.dependencies(deptype=deptype):
# Remove the spec from dependents
if self.name in dep._dependents:
dependents_copy = dep._dependents.edges[self.name]
del dep._dependents.edges[self.name]
for edge in dependents_copy:
if edge.parent.dag_hash() == key:
continue
dep._dependents.add(edge)
def _get_dependency(self, name):
# WARNING: This function is an implementation detail of the
# WARNING: original concretizer. Since with that greedy
# WARNING: algorithm we don't allow multiple nodes from
# WARNING: the same package in a DAG, here we hard-code
# WARNING: using index 0 i.e. we assume that we have only
# WARNING: one edge from package "name"
deps = self.edges_to_dependencies(name=name)
if len(deps) != 1:
err_msg = 'expected only 1 "{0}" dependency, but got {1}'
raise spack.error.SpecError(err_msg.format(name, len(deps)))
return deps[0]
def edges_from_dependents(
self,
name: Optional[str] = None,
depflag: dt.DepFlag = dt.ALL,
*,
virtuals: Optional[Union[str, Sequence[str]]] = None,
) -> List[DependencySpec]:
"""Return a list of edges connecting this node in the DAG
to parents.
Args:
name: filter dependents by package name
depflag: allowed dependency types
virtuals: allowed virtuals
"""
return [
d for d in self._dependents.select(parent=name, depflag=depflag, virtuals=virtuals)
]
def edges_to_dependencies(
self,
name: Optional[str] = None,
depflag: dt.DepFlag = dt.ALL,
*,
virtuals: Optional[Union[str, Sequence[str]]] = None,
) -> List[DependencySpec]:
"""Returns a list of edges connecting this node in the DAG to children.
Args:
name: filter dependencies by package name
depflag: allowed dependency types
virtuals: allowed virtuals
"""
return [
d for d in self._dependencies.select(child=name, depflag=depflag, virtuals=virtuals)
]
@property
def edge_attributes(self) -> str:
"""Helper method to print edge attributes in spec strings."""
edges = self.edges_from_dependents()
if not edges:
return ""
union = DependencySpec(parent=Spec(), spec=self, depflag=0, virtuals=())
all_direct_edges = all(x.direct for x in edges)
dep_conditions = set()
for edge in edges:
union.update_deptypes(edge.depflag)
union.update_virtuals(edge.virtuals)
dep_conditions.add(edge.when)
deptypes_str = ""
if not all_direct_edges and union.depflag:
deptypes_str = f"deptypes={','.join(dt.flag_to_tuple(union.depflag))}"
virtuals_str = f"virtuals={','.join(union.virtuals)}" if union.virtuals else ""
conditions = [str(c) for c in dep_conditions if c != Spec()]
when_str = f"when='{','.join(conditions)}'" if conditions else ""
result = " ".join(filter(lambda x: bool(x), (when_str, deptypes_str, virtuals_str)))
if result:
result = f"[{result}]"
return result
def dependencies(
self,
name: Optional[str] = None,
deptype: Union[dt.DepTypes, dt.DepFlag] = dt.ALL,
*,
virtuals: Optional[Union[str, Sequence[str]]] = None,
) -> List["Spec"]:
"""Returns a list of direct dependencies (nodes in the DAG)
Args:
name: filter dependencies by package name
deptype: allowed dependency types
virtuals: allowed virtuals
"""
if not isinstance(deptype, dt.DepFlag):
deptype = dt.canonicalize(deptype)
return [
d.spec for d in self.edges_to_dependencies(name, depflag=deptype, virtuals=virtuals)
]
def dependents(
self, name: Optional[str] = None, deptype: Union[dt.DepTypes, dt.DepFlag] = dt.ALL
) -> List["Spec"]:
"""Return a list of direct dependents (nodes in the DAG).
Args:
name: filter dependents by package name
deptype: allowed dependency types
"""
if not isinstance(deptype, dt.DepFlag):
deptype = dt.canonicalize(deptype)
return [d.parent for d in self.edges_from_dependents(name, depflag=deptype)]
def _dependencies_dict(self, depflag: dt.DepFlag = dt.ALL):
"""Return a dictionary, keyed by package name, of the direct
dependencies.
Each value in the dictionary is a list of edges.
Args:
deptype: allowed dependency types
"""
_sort_fn = lambda x: (x.spec.name, _sort_by_dep_types(x))
_group_fn = lambda x: x.spec.name
selected_edges = self._dependencies.select(depflag=depflag)
result = {}
for key, group in itertools.groupby(sorted(selected_edges, key=_sort_fn), key=_group_fn):
result[key] = list(group)
return result
def _add_flag(
self, name: str, value: Union[str, bool], propagate: bool, concrete: bool
) -> None:
"""Called by the parser to add a known flag"""
if propagate and name in vt.RESERVED_NAMES:
raise UnsupportedPropagationError(
f"Propagation with '==' is not supported for '{name}'."
)
valid_flags = FlagMap.valid_compiler_flags()
if name == "arch" or name == "architecture":
assert type(value) is str, "architecture have a string value"
parts = tuple(value.split("-"))
plat, os, tgt = parts if len(parts) == 3 else (None, None, value)
self._set_architecture(platform=plat, os=os, target=tgt)
elif name == "platform":
self._set_architecture(platform=value)
elif name == "os" or name == "operating_system":
self._set_architecture(os=value)
elif name == "target":
self._set_architecture(target=value)
elif name == "namespace":
self.namespace = value
elif name in valid_flags:
assert self.compiler_flags is not None
assert type(value) is str, f"{name} must have a string value"
flags_and_propagation = spack.compilers.flags.tokenize_flags(value, propagate)
flag_group = " ".join(x for (x, y) in flags_and_propagation)
for flag, propagation in flags_and_propagation:
self.compiler_flags.add_flag(name, flag, propagation, flag_group)
else:
self.variants[name] = vt.VariantValue.from_string_or_bool(
name, value, propagate=propagate, concrete=concrete
)
def _set_architecture(self, **kwargs):
"""Called by the parser to set the architecture."""
arch_attrs = ["platform", "os", "target"]
if self.architecture and self.architecture.concrete:
raise DuplicateArchitectureError("Spec cannot have two architectures.")
if not self.architecture:
new_vals = tuple(kwargs.get(arg, None) for arg in arch_attrs)
self.architecture = ArchSpec(new_vals)
else:
new_attrvals = [(a, v) for a, v in kwargs.items() if a in arch_attrs]
for new_attr, new_value in new_attrvals:
if getattr(self.architecture, new_attr):
raise DuplicateArchitectureError(f"Cannot specify '{new_attr}' twice")
else:
setattr(self.architecture, new_attr, new_value)
def _add_dependency(
self,
spec: "Spec",
*,
depflag: dt.DepFlag,
virtuals: Tuple[str, ...],
direct: bool = False,
propagation: PropagationPolicy = PropagationPolicy.NONE,
when: Optional["Spec"] = None,
):
"""Called by the parser to add another spec as a dependency.
Args:
depflag: dependency type for this edge
virtuals: virtuals on this edge
direct: if True denotes a direct dependency (associated with the % sigil)
propagation: propagation policy for this edge
when: optional condition under which dependency holds
"""
if when is None:
when = Spec()
if spec.name not in self._dependencies or not spec.name:
self.add_dependency_edge(
spec,
depflag=depflag,
virtuals=virtuals,
direct=direct,
when=when,
propagation=propagation,
)
return
# Keep the intersection of constraints when a dependency is added multiple times with
# the same deptype. Add a new dependency if it is added with a compatible deptype
# (for example, a build-only dependency is compatible with a link-only dependency).
# The only restrictions, currently, are that we cannot add edges with overlapping
# dependency types and we cannot add multiple edges that have link/run dependency types.
# See ``spack.deptypes.compatible``.
orig = self._dependencies[spec.name]
try:
dspec = next(
dspec for dspec in orig if depflag == dspec.depflag and when == dspec.when
)
except StopIteration:
# Error if we have overlapping or incompatible deptypes
if any(not dt.compatible(dspec.depflag, depflag) for dspec in orig) and all(
dspec.when == when for dspec in orig
):
edge_attrs = f"deptypes={dt.flag_to_chars(depflag).strip()}"
required_dep_str = f"^[{edge_attrs}] {str(spec)}"
raise DuplicateDependencyError(
f"{spec.name} is a duplicate dependency, with conflicting dependency types\n"
f"\t'{str(self)}' cannot depend on '{required_dep_str}'"
)
self.add_dependency_edge(
spec, depflag=depflag, virtuals=virtuals, direct=direct, when=when
)
return
try:
dspec.spec.constrain(spec)
dspec.update_virtuals(virtuals=virtuals)
except spack.error.UnsatisfiableSpecError:
raise DuplicateDependencyError(
f"Cannot depend on incompatible specs '{dspec.spec}' and '{spec}'"
)
def add_dependency_edge(
self,
dependency_spec: "Spec",
*,
depflag: dt.DepFlag,
virtuals: Tuple[str, ...],
direct: bool = False,
propagation: PropagationPolicy = PropagationPolicy.NONE,
when: Optional["Spec"] = None,
):
"""Add a dependency edge to this spec.
Args:
dependency_spec: spec of the dependency
depflag: dependency type for this edge
virtuals: virtuals provided by this edge
direct: if True denotes a direct dependency
propagation: propagation policy for this edge
when: if non-None, condition under which dependency holds
"""
if when is None:
when = Spec()
# Check if we need to update edges that are already present
selected = self._dependencies.select(child=dependency_spec.name)
for edge in selected:
has_errors, details = False, []
msg = f"cannot update the edge from {edge.parent.name} to {edge.spec.name}"
if edge.when != when:
continue
# If the dependency is to an existing spec, we can update dependency
# types. If it is to a new object, check deptype compatibility.
if id(edge.spec) != id(dependency_spec) and not dt.compatible(edge.depflag, depflag):
has_errors = True
details.append(
(
f"{edge.parent.name} has already an edge matching any"
f" of these types {depflag}"
)
)
if any(v in edge.virtuals for v in virtuals):
details.append(
(
f"{edge.parent.name} has already an edge matching any"
f" of these virtuals {virtuals}"
)
)
if has_errors:
raise spack.error.SpecError(msg, "\n".join(details))
for edge in selected:
if id(dependency_spec) == id(edge.spec) and edge.when == when:
# If we are here, it means the edge object was previously added to
# both the parent and the child. When we update this object they'll
# both see the deptype modification.
edge.update_deptypes(depflag=depflag)
edge.update_virtuals(virtuals=virtuals)
return
edge = DependencySpec(
self,
dependency_spec,
depflag=depflag,
virtuals=virtuals,
direct=direct,
propagation=propagation,
when=when,
)
self._dependencies.add(edge)
dependency_spec._dependents.add(edge)
#
# Public interface
#
@property
def fullname(self):
return (
f"{self.namespace}.{self.name}" if self.namespace else (self.name if self.name else "")
)
@property
def anonymous(self):
return not self.name and not self.abstract_hash
@property
def root(self):
"""Follow dependent links and find the root of this spec's DAG.
Spack specs have a single root (the package being installed).
"""
# FIXME: In the case of multiple parents this property does not
# FIXME: make sense. Should we revisit the semantics?
if not self._dependents:
return self
edges_by_package = next(iter(self._dependents.values()))
return edges_by_package[0].parent.root
@property
def package(self):
assert self.concrete, "{0}: Spec.package can only be called on concrete specs".format(
self.name
)
if not self._package:
self._package = spack.repo.PATH.get(self)
return self._package
@property
def concrete(self):
"""A spec is concrete if it describes a single build of a package.
More formally, a spec is concrete if concretize() has been called
on it and it has been marked ``_concrete``.
Concrete specs either can be or have been built. All constraints
have been resolved, optional dependencies have been added or
removed, a compiler has been chosen, and all variants have
values.
"""
return self._concrete
@property
def spliced(self):
"""Returns whether or not this Spec is being deployed as built i.e.
whether or not this Spec has ever been spliced.
"""
return any(s.build_spec is not s for s in self.traverse(root=True))
@property
def installed(self):
"""Installation status of a package.
Returns:
True if the package has been installed, False otherwise.
"""
if not self.concrete:
return False
try:
# If the spec is in the DB, check the installed
# attribute of the record
from spack.store import STORE
return STORE.db.get_record(self).installed
except KeyError:
# If the spec is not in the DB, the method
# above raises a Key error
return False
@property
def installed_upstream(self):
"""Whether the spec is installed in an upstream repository.
Returns:
True if the package is installed in an upstream, False otherwise.
"""
if not self.concrete:
return False
from spack.store import STORE
upstream, record = STORE.db.query_by_spec_hash(self.dag_hash())
return upstream and record and record.installed
@overload
def traverse(
self,
*,
root: bool = ...,
order: spack.traverse.OrderType = ...,
cover: spack.traverse.CoverType = ...,
direction: spack.traverse.DirectionType = ...,
deptype: Union[dt.DepFlag, dt.DepTypes] = ...,
depth: Literal[False] = False,
key: Callable[["Spec"], Any] = ...,
visited: Optional[Set[Any]] = ...,
) -> Iterable["Spec"]: ...
@overload
def traverse(
self,
*,
root: bool = ...,
order: spack.traverse.OrderType = ...,
cover: spack.traverse.CoverType = ...,
direction: spack.traverse.DirectionType = ...,
deptype: Union[dt.DepFlag, dt.DepTypes] = ...,
depth: Literal[True],
key: Callable[["Spec"], Any] = ...,
visited: Optional[Set[Any]] = ...,
) -> Iterable[Tuple[int, "Spec"]]: ...
def traverse(
self,
*,
root: bool = True,
order: spack.traverse.OrderType = "pre",
cover: spack.traverse.CoverType = "nodes",
direction: spack.traverse.DirectionType = "children",
deptype: Union[dt.DepFlag, dt.DepTypes] = "all",
depth: bool = False,
key: Callable[["Spec"], Any] = id,
visited: Optional[Set[Any]] = None,
) -> Iterable[Union["Spec", Tuple[int, "Spec"]]]:
"""Shorthand for :meth:`~spack.traverse.traverse_nodes`"""
return spack.traverse.traverse_nodes(
[self],
root=root,
order=order,
cover=cover,
direction=direction,
deptype=deptype,
depth=depth,
key=key,
visited=visited,
)
@overload
def traverse_edges(
self,
*,
root: bool = ...,
order: spack.traverse.OrderType = ...,
cover: spack.traverse.CoverType = ...,
direction: spack.traverse.DirectionType = ...,
deptype: Union[dt.DepFlag, dt.DepTypes] = ...,
depth: Literal[False] = False,
key: Callable[["Spec"], Any] = ...,
visited: Optional[Set[Any]] = ...,
) -> Iterable[DependencySpec]: ...
@overload
def traverse_edges(
self,
*,
root: bool = ...,
order: spack.traverse.OrderType = ...,
cover: spack.traverse.CoverType = ...,
direction: spack.traverse.DirectionType = ...,
deptype: Union[dt.DepFlag, dt.DepTypes] = ...,
depth: Literal[True],
key: Callable[["Spec"], Any] = ...,
visited: Optional[Set[Any]] = ...,
) -> Iterable[Tuple[int, DependencySpec]]: ...
def traverse_edges(
self,
*,
root: bool = True,
order: spack.traverse.OrderType = "pre",
cover: spack.traverse.CoverType = "nodes",
direction: spack.traverse.DirectionType = "children",
deptype: Union[dt.DepFlag, dt.DepTypes] = "all",
depth: bool = False,
key: Callable[["Spec"], Any] = id,
visited: Optional[Set[Any]] = None,
) -> Iterable[Union[DependencySpec, Tuple[int, DependencySpec]]]:
"""Shorthand for :meth:`~spack.traverse.traverse_edges`"""
return spack.traverse.traverse_edges(
[self],
root=root,
order=order,
cover=cover,
direction=direction,
deptype=deptype,
depth=depth,
key=key,
visited=visited,
)
@property
def prefix(self) -> spack.util.prefix.Prefix:
if not self._concrete:
raise spack.error.SpecError(f"Spec is not concrete: {self}")
if self._prefix is None:
from spack.store import STORE
_, record = STORE.db.query_by_spec_hash(self.dag_hash())
if record and record.path:
self.set_prefix(record.path)
else:
self.set_prefix(STORE.layout.path_for_spec(self))
assert self._prefix is not None
return self._prefix
def set_prefix(self, value: str) -> None:
self._prefix = spack.util.prefix.Prefix(spack.llnl.path.convert_to_platform_path(value))
def spec_hash(self, hash: ht.SpecHashDescriptor) -> str:
"""Utility method for computing different types of Spec hashes.
Arguments:
hash: type of hash to generate.
"""
# TODO: currently we strip build dependencies by default. Rethink
# this when we move to using package hashing on all specs.
if hash.override is not None:
return hash.override(self)
node_dict = self.to_node_dict(hash=hash)
json_text = json.dumps(
node_dict, ensure_ascii=True, indent=None, separators=(",", ":"), sort_keys=False
)
# This implements "frankenhashes", preserving the last 7 characters of the
# original hash when splicing so that we can avoid relocation issues
out = spack.util.hash.b32_hash(json_text)
if self.build_spec is not self:
return out[:-7] + self.build_spec.spec_hash(hash)[-7:]
return out
def _cached_hash(
self, hash: ht.SpecHashDescriptor, length: Optional[int] = None, force: bool = False
) -> str:
"""Helper function for storing a cached hash on the spec.
This will run spec_hash() with the deptype and package_hash
parameters, and if this spec is concrete, it will store the value
in the supplied attribute on this spec.
Arguments:
hash: type of hash to generate.
length: length of hash prefix to return (default is full hash string)
force: cache the hash even if spec is not concrete (default False)
"""
hash_string = getattr(self, hash.attr, None)
if hash_string:
return hash_string[:length]
hash_string = self.spec_hash(hash)
if force or self.concrete:
setattr(self, hash.attr, hash_string)
return hash_string[:length]
def package_hash(self):
"""Compute the hash of the contents of the package for this node"""
# Concrete specs with the old DAG hash did not have the package hash, so we do
# not know what the package looked like at concretization time
if self.concrete and not self._package_hash:
raise ValueError(
"Cannot call package_hash() on concrete specs with the old dag_hash()"
)
return self._cached_hash(ht.package_hash)
def dag_hash(self, length=None):
"""This is Spack's default hash, used to identify installations.
NOTE: Versions of Spack prior to 0.18 only included link and run deps.
NOTE: Versions of Spack prior to 1.0 only did not include test deps.
"""
return self._cached_hash(ht.dag_hash, length)
def dag_hash_bit_prefix(self, bits):
"""Get the first <bits> bits of the DAG hash as an integer type."""
return spack.util.hash.base32_prefix_bits(self.dag_hash(), bits)
def _lookup_hash(self):
"""Lookup just one spec with an abstract hash, returning a spec from the the environment,
store, or finally, binary caches."""
from spack.binary_distribution import BinaryCacheQuery
from spack.environment import active_environment
from spack.store import STORE
active_env = active_environment()
# First env, then store, then binary cache
matches = (
(active_env.all_matching_specs(self) if active_env else [])
or STORE.db.query(self, installed=InstallRecordStatus.ANY)
or BinaryCacheQuery(True)(self)
)
if not matches:
raise InvalidHashError(self, self.abstract_hash)
if len(matches) != 1:
raise AmbiguousHashError(
f"Multiple packages specify hash beginning '{self.abstract_hash}'.", *matches
)
return matches[0]
def lookup_hash(self):
"""Given a spec with an abstract hash, return a copy of the spec with all properties and
dependencies by looking up the hash in the environment, store, or finally, binary caches.
This is non-destructive."""
if self.concrete or not any(node.abstract_hash for node in self.traverse()):
return self
spec = self.copy(deps=False)
# root spec is replaced
if spec.abstract_hash:
spec._dup(self._lookup_hash())
return spec
# Map the dependencies that need to be replaced
node_lookup = {
id(node): node._lookup_hash()
for node in self.traverse(root=False)
if node.abstract_hash
}
# Reconstruct dependencies
for edge in self.traverse_edges(root=False):
key = edge.parent.name
current_node = spec if key == spec.name else spec[key]
child_node = node_lookup.get(id(edge.spec), edge.spec.copy())
current_node._add_dependency(
child_node, depflag=edge.depflag, virtuals=edge.virtuals, direct=edge.direct
)
return spec
def replace_hash(self):
"""Given a spec with an abstract hash, attempt to populate all properties and dependencies
by looking up the hash in the environment, store, or finally, binary caches.
This is destructive."""
if not any(node for node in self.traverse(order="post") if node.abstract_hash):
return
self._dup(self.lookup_hash())
def to_node_dict(self, hash: ht.SpecHashDescriptor = ht.dag_hash) -> Dict[str, Any]:
"""Create a dictionary representing the state of this Spec.
This method creates the content that is eventually hashed by Spack to create identifiers
like the DAG hash (see :meth:`dag_hash()`). Example result of this function for the
``sqlite`` package::
{
"name": "sqlite",
"version": "3.46.0",
"arch": {"platform": "linux", "platform_os": "ubuntu24.04", "target": "x86_64_v3"},
"namespace": "builtin",
"parameters": {
"build_system": "autotools",
"column_metadata": True,
"dynamic_extensions": True,
"fts": True,
"functions": False,
"rtree": True,
"cflags": [],
"cppflags": [],
"cxxflags": [],
"fflags": [],
"ldflags": [],
"ldlibs": [],
},
"package_hash": "umcghjlve5347o3q2odo7vfcso2zhxdzmfdba23nkdhe5jntlhia====",
"dependencies": [
{
"name": "compiler-wrapper",
"hash": "c5bxlim3zge4snwrwtd6rzuvq2unek6s",
"parameters": {"deptypes": ("build",), "virtuals": ()},
},
{
"name": "gcc",
"hash": "6dzveld2rtt2dkhklxfnery5wbtb5uus",
"parameters": {"deptypes": ("build",), "virtuals": ("c",)},
},
...
],
"annotations": {"original_specfile_version": 5},
}
Note that the dictionary returned does *not* include the hash of the *root* of the spec,
though it does include hashes for each dependency and its own package hash.
See :meth:`to_dict()` for a "complete" spec hash, with hashes for each node and nodes for
each dependency (instead of just their hashes).
Arguments:
hash: type of hash to generate.
"""
d: Dict[str, Any] = {"name": self.name}
if self.versions:
d.update(self.versions.to_dict())
if self.architecture:
d.update(self.architecture.to_dict())
if self.namespace:
d["namespace"] = self.namespace
params: Dict[str, Any] = dict(sorted(v.yaml_entry() for v in self.variants.values()))
# Only need the string compiler flag for yaml file
params.update(
sorted(
self.compiler_flags.yaml_entry(flag_type)
for flag_type in self.compiler_flags.keys()
)
)
if params:
d["parameters"] = params
if params and not self.concrete:
flag_names = [
name
for name, flags in self.compiler_flags.items()
if any(x.propagate for x in flags)
]
d["propagate"] = sorted(
itertools.chain(
[v.name for v in self.variants.values() if v.propagate], flag_names
)
)
d["abstract"] = sorted(v.name for v in self.variants.values() if not v.concrete)
if self.external:
d["external"] = {
"path": self.external_path,
"module": self.external_modules or None,
"extra_attributes": syaml.sorted_dict(self.extra_attributes),
}
if not self._concrete:
d["concrete"] = False
if "patches" in self.variants:
variant = self.variants["patches"]
if hasattr(variant, "_patches_in_order_of_appearance"):
d["patches"] = variant._patches_in_order_of_appearance
if (
self._concrete
and hash.package_hash
and hasattr(self, "_package_hash")
and self._package_hash
):
# We use the attribute here instead of `self.package_hash()` because this
# should *always* be assignhed at concretization time. We don't want to try
# to compute a package hash for concrete spec where a) the package might not
# exist, or b) the `dag_hash` didn't include the package hash when the spec
# was concretized.
package_hash = self._package_hash
# Full hashes are in bytes
if not isinstance(package_hash, str) and isinstance(package_hash, bytes):
package_hash = package_hash.decode("utf-8")
d["package_hash"] = package_hash
# Note: Relies on sorting dict by keys later in algorithm.
deps = self._dependencies_dict(depflag=hash.depflag)
if deps:
dependencies = []
for name, edges_for_name in sorted(deps.items()):
for dspec in edges_for_name:
dep_attrs = {
"name": name,
hash.name: dspec.spec._cached_hash(hash),
"parameters": {
"deptypes": dt.flag_to_tuple(dspec.depflag),
"virtuals": dspec.virtuals,
},
}
if dspec.direct:
dep_attrs["parameters"]["direct"] = True
dependencies.append(dep_attrs)
d["dependencies"] = dependencies
# Name is included in case this is replacing a virtual.
if self._build_spec:
d["build_spec"] = {
"name": self.build_spec.name,
hash.name: self.build_spec._cached_hash(hash),
}
# Annotations
d["annotations"] = {"original_specfile_version": self.annotations.original_spec_format}
if self.annotations.original_spec_format < 5:
d["annotations"]["compiler"] = str(self.annotations.compiler_node_attribute)
return d
def to_dict(self, hash: ht.SpecHashDescriptor = ht.dag_hash) -> Dict[str, Any]:
"""Create a dictionary suitable for writing this spec to YAML or JSON.
This dictionary is like the one that is ultimately written to a ``spec.json`` file in each
Spack installation directory. For example, for sqlite::
{
"spec": {
"_meta": {"version": 5},
"nodes": [
{
"name": "sqlite",
"version": "3.46.0",
"arch": {
"platform": "linux",
"platform_os": "ubuntu24.04",
"target": "x86_64_v3"
},
"namespace": "builtin",
"parameters": {
"build_system": "autotools",
"column_metadata": True,
"dynamic_extensions": True,
"fts": True,
"functions": False,
"rtree": True,
"cflags": [],
"cppflags": [],
"cxxflags": [],
"fflags": [],
"ldflags": [],
"ldlibs": [],
},
"package_hash": "umcghjlve5347o...xdzmfdba23nkdhe5jntlhia====",
"dependencies": [
{
"name": "compiler-wrapper",
"hash": "c5bxlim3zge4snwrwtd6rzuvq2unek6s",
"parameters": {"deptypes": ("build",), "virtuals": ()},
},
{
"name": "gcc",
"hash": "6dzveld2rtt2dkhklxfnery5wbtb5uus",
"parameters": {"deptypes": ("build",), "virtuals": ("c",)},
},
...
],
"annotations": {"original_specfile_version": 5},
"hash": "a2ubvvqnula6zdppckwqrjf3zmsdzpoh",
},
...
],
}
}
Note that this dictionary starts with the ``spec`` key, and what follows is a list starting
with the root spec, followed by its dependencies in preorder.
The method :meth:`from_dict()` can be used to read back in a spec that has been converted
to a dictionary, serialized, and read back in.
"""
node_list = [] # Using a list to preserve preorder traversal for hash.
hash_set = set()
for s in self.traverse(order="pre", deptype=hash.depflag):
spec_hash = s._cached_hash(hash)
if spec_hash not in hash_set:
node_list.append(s.node_dict_with_hashes(hash))
hash_set.add(spec_hash)
if s.build_spec is not s:
build_spec_list = s.build_spec.to_dict(hash)["spec"]["nodes"]
for node in build_spec_list:
node_hash = node[hash.name]
if node_hash not in hash_set:
node_list.append(node)
hash_set.add(node_hash)
return {"spec": {"_meta": {"version": SPECFILE_FORMAT_VERSION}, "nodes": node_list}}
def node_dict_with_hashes(self, hash: ht.SpecHashDescriptor = ht.dag_hash) -> Dict[str, Any]:
"""Returns a node dict of this spec with the dag hash, and the provided hash (if not
the dag hash)."""
node = self.to_node_dict(hash)
# All specs have at least a DAG hash
node[ht.dag_hash.name] = self.dag_hash()
if not self.concrete:
node["concrete"] = False
# we can also give them other hash types if we want
if hash.name != ht.dag_hash.name:
node[hash.name] = self._cached_hash(hash)
return node
def to_yaml(self, stream=None, hash=ht.dag_hash):
return syaml.dump(self.to_dict(hash), stream=stream, default_flow_style=False)
def to_json(self, stream=None, hash=ht.dag_hash):
return sjson.dump(self.to_dict(hash), stream)
@staticmethod
def from_specfile(path):
"""Construct a spec from a JSON or YAML spec file path"""
with open(path, "r", encoding="utf-8") as fd:
file_content = fd.read()
if path.endswith(".json"):
return Spec.from_json(file_content)
return Spec.from_yaml(file_content)
@staticmethod
def override(init_spec, change_spec):
# TODO: this doesn't account for the case where the changed spec
# (and the user spec) have dependencies
new_spec = init_spec.copy()
package_cls = spack.repo.PATH.get_pkg_class(new_spec.name)
if change_spec.versions and not change_spec.versions == vn.any_version:
new_spec.versions = change_spec.versions
for vname, value in change_spec.variants.items():
if vname in package_cls.variant_names():
if vname in new_spec.variants:
new_spec.variants.substitute(value)
else:
new_spec.variants[vname] = value
else:
raise ValueError("{0} is not a variant of {1}".format(vname, new_spec.name))
if change_spec.compiler_flags:
for flagname, flagvals in change_spec.compiler_flags.items():
new_spec.compiler_flags[flagname] = flagvals
if change_spec.architecture:
new_spec.architecture = ArchSpec.override(
new_spec.architecture, change_spec.architecture
)
return new_spec
@staticmethod
def from_literal(spec_dict: dict, normal: bool = True) -> "Spec":
"""Builds a Spec from a dictionary containing the spec literal.
The dictionary must have a single top level key, representing the root,
and as many secondary level keys as needed in the spec.
The keys can be either a string or a Spec or a tuple containing the
Spec and the dependency types.
Args:
spec_dict: the dictionary containing the spec literal
normal: if :data:`True` the same key appearing at different levels
of the ``spec_dict`` will map to the same object in memory.
Examples:
A simple spec ``foo`` with no dependencies::
{"foo": None}
A spec ``foo`` with a ``(build, link)`` dependency ``bar``::
{"foo":
{"bar:build,link": None}
}
A spec with a diamond dependency and various build types::
{"dt-diamond": {
"dt-diamond-left:build,link": {
"dt-diamond-bottom:build": None
},
"dt-diamond-right:build,link": {
"dt-diamond-bottom:build,link,run": None
}
}}
The same spec with a double copy of ``dt-diamond-bottom`` and
no diamond structure::
Spec.from_literal({"dt-diamond": {
"dt-diamond-left:build,link": {
"dt-diamond-bottom:build": None
},
"dt-diamond-right:build,link": {
"dt-diamond-bottom:build,link,run": None
}
}, normal=False}
Constructing a spec using a Spec object as key::
mpich = Spec("mpich")
libelf = Spec("libelf@1.8.11")
expected_normalized = Spec.from_literal({
"mpileaks": {
"callpath": {
"dyninst": {
"libdwarf": {libelf: None},
libelf: None
},
mpich: None
},
mpich: None
},
})
"""
# Maps a literal to a Spec, to be sure we are reusing the same object
spec_cache = LazySpecCache()
def spec_builder(d):
# The invariant is that the top level dictionary must have
# only one key
assert len(d) == 1
# Construct the top-level spec
spec_like, dep_like = next(iter(d.items()))
# If the requirements was for unique nodes (default)
# then reuse keys from the local cache. Otherwise build
# a new node every time.
if not isinstance(spec_like, Spec):
spec = spec_cache[spec_like] if normal else Spec(spec_like)
else:
spec = spec_like
if dep_like is None:
return spec
def name_and_dependency_types(s: str) -> Tuple[str, dt.DepFlag]:
"""Given a key in the dictionary containing the literal,
extracts the name of the spec and its dependency types.
Args:
s: key in the dictionary containing the literal
"""
t = s.split(":")
if len(t) > 2:
msg = 'more than one ":" separator in key "{0}"'
raise KeyError(msg.format(s))
name = t[0]
if len(t) == 2:
depflag = dt.flag_from_strings(dep_str.strip() for dep_str in t[1].split(","))
else:
depflag = 0
return name, depflag
def spec_and_dependency_types(
s: Union[Spec, Tuple[Spec, str]],
) -> Tuple[Spec, dt.DepFlag]:
"""Given a non-string key in the literal, extracts the spec
and its dependency types.
Args:
s: either a Spec object, or a tuple of Spec and string of dependency types
"""
if isinstance(s, Spec):
return s, 0
spec_obj, dtypes = s
return spec_obj, dt.flag_from_strings(dt.strip() for dt in dtypes.split(","))
# Recurse on dependencies
for s, s_dependencies in dep_like.items():
if isinstance(s, str):
dag_node, dep_flag = name_and_dependency_types(s)
else:
dag_node, dep_flag = spec_and_dependency_types(s)
dependency_spec = spec_builder({dag_node: s_dependencies})
spec._add_dependency(dependency_spec, depflag=dep_flag, virtuals=())
return spec
return spec_builder(spec_dict)
@staticmethod
def from_dict(data) -> "Spec":
"""Construct a spec from JSON/YAML.
Args:
data: a nested dict/list data structure read from YAML or JSON.
"""
# Legacy specfile format
if isinstance(data["spec"], list):
spec = SpecfileV1.load(data)
elif int(data["spec"]["_meta"]["version"]) == 2:
spec = SpecfileV2.load(data)
elif int(data["spec"]["_meta"]["version"]) == 3:
spec = SpecfileV3.load(data)
elif int(data["spec"]["_meta"]["version"]) == 4:
spec = SpecfileV4.load(data)
else:
spec = SpecfileV5.load(data)
# Any git version should
for s in spec.traverse():
s.attach_git_version_lookup()
return spec
@staticmethod
def from_yaml(stream) -> "Spec":
"""Construct a spec from YAML.
Args:
stream: string or file object to read from.
"""
data = syaml.load(stream)
return Spec.from_dict(data)
@staticmethod
def from_json(stream) -> "Spec":
"""Construct a spec from JSON.
Args:
stream: string or file object to read from.
"""
try:
data = sjson.load(stream)
return Spec.from_dict(data)
except Exception as e:
raise sjson.SpackJSONError("error parsing JSON spec:", e) from e
@staticmethod
def extract_json_from_clearsig(data):
m = CLEARSIGN_FILE_REGEX.search(data)
if m:
return sjson.load(m.group(1))
return sjson.load(data)
@staticmethod
def from_signed_json(stream):
"""Construct a spec from clearsigned json spec file.
Args:
stream: string or file object to read from.
"""
data = stream
if hasattr(stream, "read"):
data = stream.read()
extracted_json = Spec.extract_json_from_clearsig(data)
return Spec.from_dict(extracted_json)
@staticmethod
def from_detection(
spec_str: str,
*,
external_path: str,
external_modules: Optional[List[str]] = None,
extra_attributes: Optional[Dict] = None,
) -> "Spec":
"""Construct a spec from a spec string determined during external
detection and attach extra attributes to it.
Args:
spec_str: spec string
external_path: prefix of the external spec
external_modules: optional module files to be loaded when the external spec is used
extra_attributes: dictionary containing extra attributes
"""
s = Spec(spec_str, external_path=external_path, external_modules=external_modules)
extra_attributes = syaml.sorted_dict(extra_attributes or {})
# This is needed to be able to validate multi-valued variants,
# otherwise they'll still be abstract in the context of detection.
substitute_abstract_variants(s)
s.extra_attributes = extra_attributes
return s
def _patches_assigned(self):
"""Whether patches have been assigned to this spec by the concretizer."""
# FIXME: _patches_in_order_of_appearance is attached after concretization
# FIXME: to store the order of patches.
# FIXME: Probably needs to be refactored in a cleaner way.
if "patches" not in self.variants:
return False
# ensure that patch state is consistent
patch_variant = self.variants["patches"]
assert hasattr(
patch_variant, "_patches_in_order_of_appearance"
), "patches should always be assigned with a patch variant."
return True
@staticmethod
def ensure_no_deprecated(root: "Spec") -> None:
"""Raise if a deprecated spec is in the dag of the given root spec.
Raises:
spack.spec.SpecDeprecatedError: if any deprecated spec is found
"""
deprecated = []
from spack.store import STORE
with STORE.db.read_transaction():
for x in root.traverse():
_, rec = STORE.db.query_by_spec_hash(x.dag_hash())
if rec and rec.deprecated_for:
deprecated.append(rec)
if deprecated:
msg = "\n The following specs have been deprecated"
msg += " in favor of specs with the hashes shown:\n"
for rec in deprecated:
msg += " %s --> %s\n" % (rec.spec, rec.deprecated_for)
msg += "\n"
msg += " For each package listed, choose another spec\n"
raise SpecDeprecatedError(msg)
def _mark_root_concrete(self, value=True):
"""Mark just this spec (not dependencies) concrete."""
if (not value) and self.concrete and self.installed:
return
self._concrete = value
self._validate_version()
for variant in self.variants.values():
variant.concrete = True
def _validate_version(self):
# Specs that were concretized with just a git sha as version, without associated
# Spack version, get their Spack version mapped to develop. This should only apply
# when reading specs concretized with Spack 0.19 or earlier. Currently Spack always
# ensures that GitVersion specs have an associated Spack version.
v = self.versions.concrete
if not isinstance(v, vn.GitVersion):
return
try:
v.ref_version
except vn.VersionLookupError:
before = self.cformat("{name}{@version}{/hash:7}")
v.std_version = vn.StandardVersion.from_string("develop")
tty.debug(
f"the git sha of {before} could not be resolved to spack version; "
f"it has been replaced by {self.cformat('{name}{@version}{/hash:7}')}."
)
def _mark_concrete(self, value=True):
"""Mark this spec and its dependencies as concrete.
Only for internal use -- client code should use "concretize"
unless there is a need to force a spec to be concrete.
"""
# if set to false, clear out all hashes (set to None or remove attr)
# may need to change references to respect None
for s in self.traverse():
if (not value) and s.concrete and s.installed:
continue
elif not value:
s.clear_caches()
s._mark_root_concrete(value)
def _finalize_concretization(self):
"""Assign hashes to this spec, and mark it concrete.
There are special semantics to consider for ``package_hash``, because we can't
call it on *already* concrete specs, but we need to assign it *at concretization
time* to just-concretized specs. So, the concretizer must assign the package
hash *before* marking their specs concrete (so that we know which specs were
already concrete before this latest concretization).
``dag_hash`` is also tricky, since it cannot compute ``package_hash()`` lazily.
Because ``package_hash`` needs to be assigned *at concretization time*,
``to_node_dict()`` can't just assume that it can compute ``package_hash`` itself
-- it needs to either see or not see a ``_package_hash`` attribute.
Rules of thumb for ``package_hash``:
1. Old-style concrete specs from *before* ``dag_hash`` included ``package_hash``
will not have a ``_package_hash`` attribute at all.
2. New-style concrete specs will have a ``_package_hash`` assigned at
concretization time.
3. Abstract specs will not have a ``_package_hash`` attribute at all.
"""
for spec in self.traverse():
# Already concrete specs either already have a package hash (new dag_hash())
# or they never will b/c we can't know it (old dag_hash()). Skip them.
#
# We only assign package hash to not-yet-concrete specs, for which we know
# we can compute the hash.
if not spec.concrete:
# we need force=True here because package hash assignment has to happen
# before we mark concrete, so that we know what was *already* concrete.
spec._cached_hash(ht.package_hash, force=True)
# keep this check here to ensure package hash is saved
assert getattr(spec, ht.package_hash.attr)
# Mark everything in the spec as concrete
self._mark_concrete()
# Assign dag_hash (this *could* be done lazily, but it's assigned anyway in
# ensure_no_deprecated, and it's clearer to see explicitly where it happens).
# Any specs that were concrete before finalization will already have a cached
# DAG hash.
for spec in self.traverse():
spec._cached_hash(ht.dag_hash)
def index(self, deptype="all"):
"""Return a dictionary that points to all the dependencies in this
spec.
"""
dm = collections.defaultdict(list)
for spec in self.traverse(deptype=deptype):
dm[spec.name].append(spec)
return dm
def validate_or_raise(self):
"""Checks that names and values in this spec are real. If they're not,
it will raise an appropriate exception.
"""
# FIXME: this function should be lazy, and collect all the errors
# FIXME: before raising the exceptions, instead of being greedy and
# FIXME: raise just the first one encountered
for spec in self.traverse():
# raise an UnknownPackageError if the spec's package isn't real.
if spec.name and not spack.repo.PATH.is_virtual(spec.name):
spack.repo.PATH.get_pkg_class(spec.fullname)
# FIXME: atm allow '%' on abstract specs only if they depend on C, C++, or Fortran
if spec.dependencies(deptype="build"):
pkg_cls = spack.repo.PATH.get_pkg_class(spec.fullname)
pkg_dependencies = pkg_cls.dependency_names()
if not any(x in pkg_dependencies for x in ("c", "cxx", "fortran")):
raise UnsupportedCompilerError(
f"{spec.fullname} does not depend on 'c', 'cxx, or 'fortran'"
)
# Ensure correctness of variants (if the spec is not virtual)
if not spack.repo.PATH.is_virtual(spec.name):
Spec.ensure_valid_variants(spec)
substitute_abstract_variants(spec)
@staticmethod
def ensure_valid_variants(spec: "Spec") -> None:
"""Ensures that the variant attached to the given spec are valid.
Raises:
spack.variant.UnknownVariantError: on the first unknown variant found
"""
# concrete variants are always valid
if spec.concrete:
return
pkg_cls = spack.repo.PATH.get_pkg_class(spec.fullname)
pkg_variants = pkg_cls.variant_names()
# reserved names are variants that may be set on any package
# but are not necessarily recorded by the package's class
propagate_variants = [name for name, variant in spec.variants.items() if variant.propagate]
not_existing = set(spec.variants)
not_existing.difference_update(pkg_variants, vt.RESERVED_NAMES, propagate_variants)
if not_existing:
raise vt.UnknownVariantError(
f"No such variant {not_existing} for spec: '{spec}'", list(not_existing)
)
def constrain(self, other, deps=True) -> bool:
"""Constrains self with other, and returns True if self changed, False otherwise.
Args:
other: constraint to be added to self
deps: if False, constrain only the root node, otherwise constrain dependencies as well
Raises:
spack.error.UnsatisfiableSpecError: when self cannot be constrained
"""
return self._constrain(other, deps=deps, resolve_virtuals=True)
def _constrain_symbolically(self, other, deps=True) -> bool:
"""Constrains self with other, and returns True if self changed, False otherwise.
This function has no notion of virtuals, so it does not need a repository.
Args:
other: constraint to be added to self
deps: if False, constrain only the root node, otherwise constrain dependencies as well
Raises:
spack.error.UnsatisfiableSpecError: when self cannot be constrained
Examples:
>>> from spack.spec import Spec, UnsatisfiableDependencySpecError
>>> s = Spec("hdf5 ^mpi@4")
>>> t = Spec("hdf5 ^mpi=openmpi")
>>> try:
... s.constrain(t)
... except UnsatisfiableDependencySpecError as e:
... print(e)
...
hdf5 ^mpi=openmpi does not satisfy hdf5 ^mpi@4
>>> s._constrain_symbolically(t)
True
>>> s
hdf5 ^mpi@4 ^mpi=openmpi
"""
return self._constrain(other, deps=deps, resolve_virtuals=False)
def _constrain(self, other, deps=True, *, resolve_virtuals: bool):
# If we are trying to constrain a concrete spec, either the spec
# already satisfies the constraint (and the method returns False)
# or it raises an exception
if self.concrete:
if self._satisfies(other, resolve_virtuals=resolve_virtuals):
return False
else:
raise spack.error.UnsatisfiableSpecError(self, other, "constrain a concrete spec")
other = self._autospec(other)
if other.concrete and other._satisfies(self, resolve_virtuals=resolve_virtuals):
self._dup(other)
return True
if other.abstract_hash:
if not self.abstract_hash or other.abstract_hash.startswith(self.abstract_hash):
self.abstract_hash = other.abstract_hash
elif not self.abstract_hash.startswith(other.abstract_hash):
raise InvalidHashError(self, other.abstract_hash)
if not (self.name == other.name or (not self.name) or (not other.name)):
raise UnsatisfiableSpecNameError(self.name, other.name)
if (
other.namespace is not None
and self.namespace is not None
and other.namespace != self.namespace
):
raise UnsatisfiableSpecNameError(self.fullname, other.fullname)
if not self.versions.overlaps(other.versions):
raise UnsatisfiableVersionSpecError(self.versions, other.versions)
for v in [x for x in other.variants if x in self.variants]:
if not self.variants[v].intersects(other.variants[v]):
raise vt.UnsatisfiableVariantSpecError(self.variants[v], other.variants[v])
sarch, oarch = self.architecture, other.architecture
if (
sarch is not None
and oarch is not None
and not self.architecture.intersects(other.architecture)
):
raise UnsatisfiableArchitectureSpecError(sarch, oarch)
changed = False
if not self.name and other.name:
self.name = other.name
changed = True
if not self.namespace and other.namespace:
self.namespace = other.namespace
changed = True
changed |= self.versions.intersect(other.versions)
changed |= self.variants.constrain(other.variants)
changed |= self.compiler_flags.constrain(other.compiler_flags)
sarch, oarch = self.architecture, other.architecture
if sarch is not None and oarch is not None:
changed |= self.architecture.constrain(other.architecture)
elif oarch is not None:
self.architecture = oarch
changed = True
if deps:
changed |= self._constrain_dependencies(other, resolve_virtuals=resolve_virtuals)
if other.concrete and not self.concrete and other.satisfies(self):
self._finalize_concretization()
return changed
def _constrain_dependencies(self, other: "Spec", resolve_virtuals: bool = True) -> bool:
"""Apply constraints of other spec's dependencies to this spec."""
if not other._dependencies:
return False
# TODO: might want more detail than this, e.g. specific deps
# in violation. if this becomes a priority get rid of this
# check and be more specific about what's wrong.
if not other._intersects_dependencies(self, resolve_virtuals=resolve_virtuals):
raise UnsatisfiableDependencySpecError(other, self)
if any(not d.name for d in other.traverse(root=False)):
raise UnconstrainableDependencySpecError(other)
reference_spec = self.copy(deps=True)
for edge in other.edges_to_dependencies():
existing = [
e for e in self.edges_to_dependencies(edge.spec.name) if e.when == edge.when
]
if existing:
existing[0].spec.constrain(edge.spec)
existing[0].update_deptypes(edge.depflag)
existing[0].update_virtuals(edge.virtuals)
existing[0].direct |= edge.direct
else:
self.add_dependency_edge(
edge.spec,
depflag=edge.depflag,
virtuals=edge.virtuals,
direct=edge.direct,
propagation=edge.propagation,
when=edge.when,
)
return self != reference_spec
def constrained(self, other, deps=True):
"""Return a constrained copy without modifying this spec."""
clone = self.copy(deps=deps)
clone.constrain(other, deps)
return clone
def _autospec(self, spec_like):
"""
Used to convert arguments to specs. If spec_like is a spec, returns
it. If it's a string, tries to parse a string. If that fails, tries
to parse a local spec from it (i.e. name is assumed to be self's name).
"""
if isinstance(spec_like, Spec):
return spec_like
return Spec(spec_like)
def intersects(self, other: Union[str, "Spec"], deps: bool = True) -> bool:
"""Return True if there exists at least one concrete spec that matches both
self and other, otherwise False.
This operation is commutative, and if two specs intersect it means that one
can constrain the other.
Args:
other: spec to be checked for compatibility
deps: if True check compatibility of dependency nodes too, if False only check root
"""
return self._intersects(other=other, deps=deps, resolve_virtuals=True)
def _intersects(
self, other: Union[str, "Spec"], deps: bool = True, resolve_virtuals: bool = True
) -> bool:
other = self._autospec(other)
if other.concrete and self.concrete:
return self.dag_hash() == other.dag_hash()
elif self.concrete:
return self._satisfies(other, resolve_virtuals=resolve_virtuals)
elif other.concrete:
return other._satisfies(self, resolve_virtuals=resolve_virtuals)
# From here we know both self and other are not concrete
self_hash = self.abstract_hash
other_hash = other.abstract_hash
if (
self_hash
and other_hash
and not (self_hash.startswith(other_hash) or other_hash.startswith(self_hash))
):
return False
# If the names are different, we need to consider virtuals
if self.name != other.name and self.name and other.name:
if not resolve_virtuals:
return False
self_virtual = spack.repo.PATH.is_virtual(self.name)
other_virtual = spack.repo.PATH.is_virtual(other.name)
if self_virtual and other_virtual:
# Two virtual specs intersect only if there are providers for both
lhs = spack.repo.PATH.providers_for(str(self))
rhs = spack.repo.PATH.providers_for(str(other))
intersection = [s for s in lhs if any(s.intersects(z) for z in rhs)]
return bool(intersection)
# A provider can satisfy a virtual dependency.
elif self_virtual or other_virtual:
virtual_spec, non_virtual_spec = (self, other) if self_virtual else (other, self)
try:
# Here we might get an abstract spec
pkg_cls = spack.repo.PATH.get_pkg_class(non_virtual_spec.fullname)
pkg = pkg_cls(non_virtual_spec)
except spack.repo.UnknownEntityError:
# If we can't get package info on this spec, don't treat
# it as a provider of this vdep.
return False
if pkg.provides(virtual_spec.name):
for when_spec, provided in pkg.provided.items():
if non_virtual_spec.intersects(when_spec, deps=False):
if any(vpkg.intersects(virtual_spec) for vpkg in provided):
return True
return False
# namespaces either match, or other doesn't require one.
if (
other.namespace is not None
and self.namespace is not None
and self.namespace != other.namespace
):
return False
if self.versions and other.versions:
if not self.versions.intersects(other.versions):
return False
if not self.variants.intersects(other.variants):
return False
if self.architecture and other.architecture:
if not self.architecture.intersects(other.architecture):
return False
if not self.compiler_flags.intersects(other.compiler_flags):
return False
# If we need to descend into dependencies, do it, otherwise we're done.
if deps:
return self._intersects_dependencies(other, resolve_virtuals=resolve_virtuals)
return True
def _intersects_dependencies(self, other, resolve_virtuals: bool = True):
if not other._dependencies or not self._dependencies:
# one spec *could* eventually satisfy the other
return True
# Handle first-order constraints directly
common_dependencies = {x.name for x in self.dependencies()}
common_dependencies &= {x.name for x in other.dependencies()}
for name in common_dependencies:
if not self[name]._intersects(
other[name], deps=True, resolve_virtuals=resolve_virtuals
):
return False
if not resolve_virtuals:
return True
# For virtual dependencies, we need to dig a little deeper.
self_index = spack.provider_index.ProviderIndex(
repository=spack.repo.PATH, specs=self.traverse(), restrict=True
)
other_index = spack.provider_index.ProviderIndex(
repository=spack.repo.PATH, specs=other.traverse(), restrict=True
)
# These two loops handle cases where there is an overly restrictive
# vpkg in one spec for a provider in the other (e.g., mpi@3: is not
# compatible with mpich2)
for spec in self.traverse():
if (
spack.repo.PATH.is_virtual(spec.name)
and spec.name in other_index
and not other_index.providers_for(spec)
):
return False
for spec in other.traverse():
if (
spack.repo.PATH.is_virtual(spec.name)
and spec.name in self_index
and not self_index.providers_for(spec)
):
return False
return True
def satisfies(self, other: Union[str, "Spec"], deps: bool = True) -> bool:
"""Return True if all concrete specs matching self also match other, otherwise False.
Args:
other: spec to be satisfied
deps: if True, descend to dependencies, otherwise only check root node
"""
return self._satisfies(other=other, deps=deps, resolve_virtuals=True)
def _satisfies(
self, other: Union[str, "Spec"], deps: bool = True, resolve_virtuals: bool = True
) -> bool:
"""Return True if all concrete specs matching self also match other, otherwise False.
Args:
other: spec to be satisfied
deps: if True, descend to dependencies, otherwise only check root node
resolve_virtuals: if True, resolve virtuals in self and other. This requires a
repository to be available.
"""
other = self._autospec(other)
if other.concrete:
# The left-hand side must be the same singleton with identical hash. Notice that
# package hashes can be different for otherwise indistinguishable concrete Spec
# objects.
return self.concrete and self.dag_hash() == other.dag_hash()
# If the right-hand side has an abstract hash, make sure it's a prefix of the
# left-hand side's (abstract) hash.
if other.abstract_hash:
compare_hash = self.dag_hash() if self.concrete else self.abstract_hash
if not compare_hash or not compare_hash.startswith(other.abstract_hash):
return False
# If the names are different, we need to consider virtuals
if self.name != other.name and self.name and other.name and resolve_virtuals:
# A concrete provider can satisfy a virtual dependency.
if not spack.repo.PATH.is_virtual(self.name) and spack.repo.PATH.is_virtual(
other.name
):
try:
# Here we might get an abstract spec
pkg_cls = spack.repo.PATH.get_pkg_class(self.fullname)
pkg = pkg_cls(self)
except spack.repo.UnknownEntityError:
# If we can't get package info on this spec, don't treat
# it as a provider of this vdep.
return False
if pkg.provides(other.name):
for when_spec, provided in pkg.provided.items():
if self.satisfies(when_spec, deps=False):
if any(vpkg.intersects(other) for vpkg in provided):
return True
return False
# namespaces either match, or other doesn't require one.
if (
other.namespace is not None
and self.namespace is not None
and self.namespace != other.namespace
):
return False
if not self.versions.satisfies(other.versions):
return False
if not self.variants.satisfies(other.variants):
return False
if self.architecture and other.architecture:
if not self.architecture.satisfies(other.architecture):
return False
elif other.architecture and not self.architecture:
return False
if not self.compiler_flags.satisfies(other.compiler_flags):
return False
# If we need to descend into dependencies, do it, otherwise we're done.
if not deps:
return True
# If there are no constraints to satisfy, we're done.
if not other._dependencies:
return True
# If we arrived here, the lhs root node satisfies the rhs root node. Now we need to check
# all the edges that have an abstract parent, and verify that they match some edge in the
# lhs.
#
# It might happen that the rhs brings in concrete sub-DAGs. For those we don't need to
# verify the edge properties, cause everything is encoded in the hash of the nodes that
# will be verified later.
lhs_edges: Dict[str, Set[DependencySpec]] = collections.defaultdict(set)
mock_nodes_from_old_specfiles = set()
for rhs_edge in other.traverse_edges(root=False, cover="edges"):
# Check satisfaction of the dependency only if its when condition can apply
if not rhs_edge.parent.name or rhs_edge.parent.name == self.name:
test_spec = self
elif rhs_edge.parent.name in self:
test_spec = self[rhs_edge.parent.name]
else:
test_spec = None
if test_spec and not test_spec._intersects(
rhs_edge.when, resolve_virtuals=resolve_virtuals
):
continue
# If we are checking for ^mpi we need to verify if there is any edge
if resolve_virtuals and spack.repo.PATH.is_virtual(rhs_edge.spec.name):
# Don't mutate objects in memory that may be referred elsewhere
rhs_edge = rhs_edge.copy()
rhs_edge.update_virtuals(virtuals=(rhs_edge.spec.name,))
if rhs_edge.direct:
# Note: this relies on abstract specs from string not being deeper than 2 levels
# e.g. in foo %fee ^bar %baz we cannot go deeper than "baz" and e.g. specify its
# dependencies too.
#
# We also need to account for cases like gcc@<new> %gcc@<old> where the parent
# name is the same as the child name
#
# The same assumptions hold on Spec.constrain, and Spec.intersect
current_node = self
if rhs_edge.parent.name and rhs_edge.parent.name != rhs_edge.spec.name:
try:
current_node = self[rhs_edge.parent.name]
except KeyError:
return False
if current_node.original_spec_format() < 5 or (
# If the current external node has dependencies, it has no annotations
current_node.original_spec_format() >= 5
and current_node.external
and not current_node._dependencies
):
compiler_spec = current_node.annotations.compiler_node_attribute
if compiler_spec is None:
return False
mock_nodes_from_old_specfiles.add(compiler_spec)
# This checks that the single node compiler spec satisfies the request
# of a direct dependency. The check is not perfect, but based on heuristic.
if not compiler_spec._satisfies(
rhs_edge.spec, resolve_virtuals=resolve_virtuals
):
return False
else:
name = (
None
if resolve_virtuals and spack.repo.PATH.is_virtual(rhs_edge.spec.name)
else rhs_edge.spec.name
)
candidate_edges = current_node.edges_to_dependencies(
name=name, virtuals=rhs_edge.virtuals or None
)
# Select at least the deptypes on the rhs_edge, and conditional edges that
# constrain a bigger portion of the search space (so it's rhs.when <= lhs.when)
candidates = [
lhs_edge.spec
for lhs_edge in candidate_edges
if ((lhs_edge.depflag & rhs_edge.depflag) ^ rhs_edge.depflag) == 0
and rhs_edge.when._satisfies(
lhs_edge.when, resolve_virtuals=resolve_virtuals
)
]
if not candidates or not any(
x._satisfies(rhs_edge.spec, resolve_virtuals=resolve_virtuals)
for x in candidates
):
return False
continue
# Skip edges from a concrete sub-DAG
if rhs_edge.parent.concrete:
continue
if not lhs_edges:
# Construct a map of the link/run subDAG + direct "build" edges,
# keyed by dependency name
for lhs_edge in self.traverse_edges(
root=False, cover="edges", deptype=("link", "run")
):
lhs_edges[lhs_edge.spec.name].add(lhs_edge)
for virtual_name in lhs_edge.virtuals:
lhs_edges[virtual_name].add(lhs_edge)
build_edges = self.edges_to_dependencies(depflag=dt.BUILD)
for lhs_edge in build_edges:
lhs_edges[lhs_edge.spec.name].add(lhs_edge)
for virtual_name in lhs_edge.virtuals:
lhs_edges[virtual_name].add(lhs_edge)
# We don't have edges to this dependency
current_dependency_name = rhs_edge.spec.name
if current_dependency_name and current_dependency_name not in lhs_edges:
return False
if not current_dependency_name:
# Here we have an anonymous spec e.g. ^ dev_path=*
candidate_edges = list(itertools.chain(*lhs_edges.values()))
else:
candidate_edges = [
lhs_edge
for lhs_edge in lhs_edges[current_dependency_name]
if rhs_edge.when._satisfies(lhs_edge.when, resolve_virtuals=resolve_virtuals)
]
if not candidate_edges:
return False
for virtual in rhs_edge.virtuals:
has_virtual = any(virtual in edge.virtuals for edge in candidate_edges)
if not has_virtual:
return False
for lhs_edge in candidate_edges:
if lhs_edge.spec._satisfies(
rhs_edge.spec, deps=False, resolve_virtuals=resolve_virtuals
):
break
else:
return False
return True
@property # type: ignore[misc] # decorated prop not supported in mypy
def patches(self):
"""Return patch objects for any patch sha256 sums on this Spec.
This is for use after concretization to iterate over any patches
associated with this spec.
TODO: this only checks in the package; it doesn't resurrect old
patches from install directories, but it probably should.
"""
if not hasattr(self, "_patches"):
self._patches = []
# translate patch sha256sums to patch objects by consulting the index
if self._patches_assigned():
for sha256 in self.variants["patches"]._patches_in_order_of_appearance:
index = spack.repo.PATH.patch_index
pkg_cls = spack.repo.PATH.get_pkg_class(self.name)
try:
patch = index.patch_for_package(sha256, pkg_cls)
except spack.error.PatchLookupError as e:
raise spack.error.SpecError(
f"{e}. This usually means the patch was modified or removed. "
"To fix this, either reconcretize or use the original package "
"repository"
) from e
self._patches.append(patch)
return self._patches
def _dup(
self,
other: "Spec",
deps: Union[bool, dt.DepTypes, dt.DepFlag] = True,
*,
propagation: Optional[PropagationPolicy] = None,
) -> bool:
"""Copies "other" into self, by overwriting all attributes.
Args:
other: spec to be copied onto ``self``
deps: if True copies all the dependencies. If False copies None.
If deptype, or depflag, copy matching types.
Returns:
True if ``self`` changed because of the copy operation, False otherwise.
"""
# We don't count dependencies as changes here
changed = True
if hasattr(self, "name"):
changed = (
self.name != other.name
and self.versions != other.versions
and self.architecture != other.architecture
and self.variants != other.variants
and self.concrete != other.concrete
and self.external_path != other.external_path
and self.external_modules != other.external_modules
and self.compiler_flags != other.compiler_flags
and self.abstract_hash != other.abstract_hash
)
self._package = None
# Local node attributes get copied first.
self.name = other.name
self.versions = other.versions.copy()
self.architecture = other.architecture.copy() if other.architecture else None
self.compiler_flags = other.compiler_flags.copy()
self.compiler_flags.spec = self
self.variants = other.variants.copy()
self._build_spec = other._build_spec
# Clear dependencies
self._dependents = _EdgeMap(store_by_child=False)
self._dependencies = _EdgeMap(store_by_child=True)
# FIXME: we manage _patches_in_order_of_appearance specially here
# to keep it from leaking out of spec.py, but we should figure
# out how to handle it more elegantly in the Variant classes.
for k, v in other.variants.items():
patches = getattr(v, "_patches_in_order_of_appearance", None)
if patches:
self.variants[k]._patches_in_order_of_appearance = patches
self.variants.spec = self
self.external_path = other.external_path
self.external_modules = other.external_modules
self.extra_attributes = other.extra_attributes
self.namespace = other.namespace
self.annotations = other.annotations
# If we copy dependencies, preserve DAG structure in the new spec
if deps:
# If caller restricted deptypes to be copied, adjust that here.
# By default, just copy all deptypes
depflag = dt.ALL
if isinstance(deps, (tuple, list, str)):
depflag = dt.canonicalize(deps)
self._dup_deps(other, depflag, propagation=propagation)
self._prefix = other._prefix
self._concrete = other._concrete
self.abstract_hash = other.abstract_hash
if self._concrete:
self._dunder_hash = other._dunder_hash
for h in ht.HASHES:
setattr(self, h.attr, getattr(other, h.attr, None))
else:
self._dunder_hash = None
for h in ht.HASHES:
setattr(self, h.attr, None)
return changed
def _dup_deps(
self, other, depflag: dt.DepFlag, propagation: Optional[PropagationPolicy] = None
):
def spid(spec):
return id(spec)
new_specs = {spid(other): self}
for edge in other.traverse_edges(cover="edges", root=False):
if edge.depflag and not depflag & edge.depflag:
continue
if spid(edge.parent) not in new_specs:
new_specs[spid(edge.parent)] = edge.parent.copy(deps=False)
if spid(edge.spec) not in new_specs:
new_specs[spid(edge.spec)] = edge.spec.copy(deps=False)
edge_propagation = edge.propagation if propagation is None else propagation
new_specs[spid(edge.parent)].add_dependency_edge(
new_specs[spid(edge.spec)],
depflag=edge.depflag,
virtuals=edge.virtuals,
propagation=edge_propagation,
direct=edge.direct,
when=edge.when,
)
def copy(self, deps: Union[bool, dt.DepTypes, dt.DepFlag] = True, **kwargs):
"""Make a copy of this spec.
Args:
deps: Defaults to :data:`True`. If boolean, controls
whether dependencies are copied (copied if :data:`True`). If a
DepTypes or DepFlag is provided, *only* matching dependencies are copied.
kwargs: additional arguments for internal use (passed to ``_dup``).
Returns:
A copy of this spec.
Examples:
Deep copy with dependencies::
spec.copy()
spec.copy(deps=True)
Shallow copy (no dependencies)::
spec.copy(deps=False)
Only build and run dependencies::
deps=("build", "run"):
"""
clone = Spec.__new__(Spec)
clone._dup(self, deps=deps, **kwargs)
return clone
@property
def version(self):
if not self.versions.concrete:
raise spack.error.SpecError("Spec version is not concrete: " + str(self))
return self.versions[0]
def __getitem__(self, name: str):
"""Get a dependency from the spec by its name. This call implicitly
sets a query state in the package being retrieved. The behavior of
packages may be influenced by additional query parameters that are
passed after a colon symbol.
Note that if a virtual package is queried a copy of the Spec is
returned while for non-virtual a reference is returned.
"""
query_parameters: List[str] = name.split(":")
if len(query_parameters) > 2:
raise KeyError("key has more than one ':' symbol. At most one is admitted.")
name, query_parameters = query_parameters[0], query_parameters[1:]
if query_parameters:
# We have extra query parameters, which are comma separated
# values
csv = query_parameters.pop().strip()
query_parameters = re.split(r"\s*,\s*", csv)
# Consider all direct dependencies and transitive runtime dependencies
order = itertools.chain(
self.edges_to_dependencies(depflag=dt.BUILD | dt.TEST),
self.traverse_edges(deptype=dt.LINK | dt.RUN, order="breadth", cover="edges"),
)
try:
edge = next((e for e in order if e.spec.name == name or name in e.virtuals))
except StopIteration as e:
raise KeyError(f"No spec with name {name} in {self}") from e
if self._concrete:
return SpecBuildInterface(
edge.spec, name, query_parameters, _parent=self, is_virtual=name in edge.virtuals
)
return edge.spec
def __contains__(self, spec):
"""True if this spec or some dependency satisfies the spec.
Note: If ``spec`` is anonymous, we ONLY check whether the root
satisfies it, NOT dependencies. This is because most anonymous
specs (e.g., ``@1.2``) don't make sense when applied across an
entire DAG -- we limit them to the root.
"""
spec = self._autospec(spec)
# if anonymous or same name, we only have to look at the root
if not spec.name or spec.name == self.name:
return self.satisfies(spec)
try:
dep = self[spec.name]
except KeyError:
return False
return dep.satisfies(spec)
def eq_dag(self, other, deptypes=True, vs=None, vo=None):
"""True if the full dependency DAGs of specs are equal."""
if vs is None:
vs = set()
if vo is None:
vo = set()
vs.add(id(self))
vo.add(id(other))
if not self.eq_node(other):
return False
if len(self._dependencies) != len(other._dependencies):
return False
ssorted = [self._dependencies[name] for name in sorted(self._dependencies)]
osorted = [other._dependencies[name] for name in sorted(other._dependencies)]
for s_dspec, o_dspec in zip(
itertools.chain.from_iterable(ssorted), itertools.chain.from_iterable(osorted)
):
if deptypes and s_dspec.depflag != o_dspec.depflag:
return False
s, o = s_dspec.spec, o_dspec.spec
visited_s = id(s) in vs
visited_o = id(o) in vo
# Check for duplicate or non-equal dependencies
if visited_s != visited_o:
return False
# Skip visited nodes
if visited_s or visited_o:
continue
# Recursive check for equality
if not s.eq_dag(o, deptypes, vs, vo):
return False
return True
def _cmp_node(self):
"""Yield comparable elements of just *this node* and not its deps."""
yield self.name
yield self.namespace
yield self.versions
yield self.variants
yield self.compiler_flags
yield self.architecture
yield self.abstract_hash
# this is not present on older specs
yield getattr(self, "_package_hash", None)
def eq_node(self, other):
"""Equality with another spec, not including dependencies."""
return (other is not None) and lang.lazy_eq(self._cmp_node, other._cmp_node)
def _cmp_fast_eq(self, other) -> Optional[bool]:
"""Short-circuit compare with other for equality, for lazy_lexicographic_ordering."""
# If there is ever a breaking change to hash computation, whether accidental or purposeful,
# two specs can be identical modulo DAG hash, depending on what time they were concretized
# From the perspective of many operation in Spack (database, build cache, etc) a different
# DAG hash means a different spec. Here we ensure that two otherwise identical specs, one
# serialized before the hash change and one after, are considered different.
if self is other:
return True
if self.concrete and other and other.concrete:
return self.dag_hash() == other.dag_hash()
return None
def _cmp_iter(self):
"""Lazily yield components of self for comparison."""
# Spec comparison in Spack needs to be fast, so there are several cases here for
# performance. The main places we care about this are:
#
# * Abstract specs: there are lots of abstract specs in package.py files,
# which are put into metadata dictionaries and sorted during concretization
# setup. We want comparing abstract specs to be fast.
#
# * Concrete specs: concrete specs are bigger and have lots of nodes and
# edges. Because of the graph complexity, we need a full, linear time
# traversal to compare them -- that's pretty much is unavoidable. But they
# also have precoputed cryptographic hashes (dag_hash()), which we can use
# to do fast equality comparison. See _cmp_fast_eq() above for the
# short-circuit logic for hashes.
#
# A full traversal involves constructing data structurs, visitor objects, etc.,
# and it can be expensive if we have to do it to compare a bunch of tiny
# abstract specs. Therefore, there are 3 cases below, which avoid calling
# `spack.traverse.traverse_edges()` unless necessary.
#
# WARNING: the cases below need to be consistent, so don't mess with this code
# unless you really know what you're doing. Be sure to keep all three consistent.
#
# All cases lazily yield:
#
# 1. A generator over nodes
# 2. A generator over canonical edges
#
# Canonical edges have consistent ids defined by breadth-first traversal order. That is,
# the root is always 0, dependencies of the root are 1, 2, 3, etc., and so on.
#
# The three cases are:
#
# 1. Spec has no dependencies
# * We can avoid any traversal logic and just yield this node's _cmp_node generator.
#
# 2. Spec has dependencies, but dependencies have no dependencies.
# * We need to sort edges, but we don't need to track visited nodes, which
# can save us the cost of setting up all the tracking data structures
# `spack.traverse` uses.
#
# 3. Spec has dependencies that have dependencies.
# * In this case, the spec is *probably* concrete. Equality comparisons
# will be short-circuited by dag_hash(), but other comparisons will need
# to lazily enumerate components of the spec. The traversal logic is
# unavoidable.
#
# TODO: consider reworking `spack.traverse` to construct fewer data structures
# and objects, as this would make all traversals faster and could eliminate the
# need for the complexity here. It was not clear at the time of writing that how
# much optimization was possible in `spack.traverse`.
sorted_l1_edges = None
edge_list = None
node_ids = None
def nodes():
nonlocal sorted_l1_edges
nonlocal edge_list
nonlocal node_ids
# Level 0: root node
yield self._cmp_node # always yield the root (this node)
if not self._dependencies: # done if there are no dependencies
return
# Level 1: direct dependencies
# we can yield these in sorted order without tracking visited nodes
deps_have_deps = False
sorted_l1_edges = self.edges_to_dependencies(depflag=dt.ALL)
if len(sorted_l1_edges) > 1:
sorted_l1_edges = spack.traverse.sort_edges(sorted_l1_edges)
for edge in sorted_l1_edges:
yield edge.spec._cmp_node
if edge.spec._dependencies:
deps_have_deps = True
if not deps_have_deps: # done if level 1 specs have no dependencies
return
# Level 2: dependencies of direct dependencies
# now it's general; we need full traverse() to track visited nodes
l1_specs = [edge.spec for edge in sorted_l1_edges]
# the node_ids dict generates consistent ids based on BFS traversal order
# these are used to identify edges later
node_ids = collections.defaultdict(lambda: len(node_ids))
node_ids[id(self)] # self is 0
for spec in l1_specs:
node_ids[id(spec)] # l1 starts at 1
edge_list = []
for edge in spack.traverse.traverse_edges(
l1_specs, order="breadth", cover="edges", root=False, visited=set([0])
):
# yield each node only once, and generate a consistent id for it the
# first time it's encountered.
if id(edge.spec) not in node_ids:
yield edge.spec._cmp_node
node_ids[id(edge.spec)]
if edge.parent is None: # skip fake edge to root
continue
edge_list.append(
(
node_ids[id(edge.parent)],
node_ids[id(edge.spec)],
edge.depflag,
edge.virtuals,
edge.direct,
edge.when,
)
)
def edges():
# no edges in single-node graph
if not self._dependencies:
return
# level 1 edges all start with zero
for i, edge in enumerate(sorted_l1_edges, start=1):
yield (0, i, edge.depflag, edge.virtuals, edge.direct, edge.when)
# yield remaining edges in the order they were encountered during traversal
if edge_list:
yield from edge_list
yield nodes
yield edges
@property
def namespace_if_anonymous(self):
return self.namespace if not self.name else None
@property
def spack_root(self):
"""Special field for using ``{spack_root}`` in :meth:`format`."""
return spack.paths.spack_root
@property
def spack_install(self):
"""Special field for using ``{spack_install}`` in :meth:`format`."""
from spack.store import STORE
return STORE.layout.root
def _format_default(self) -> str:
"""Fast path for formatting with DEFAULT_FORMAT and no color.
This method manually concatenates the string representation of spec attributes,
avoiding the regex parsing overhead of the general format() method.
"""
parts = []
if self.name:
parts.append(self.name)
if self.versions:
version_str = str(self.versions)
if version_str and version_str != ":": # only include if not full range
parts.append(f"@{version_str}")
compiler_flags_str = str(self.compiler_flags)
if compiler_flags_str:
parts.append(compiler_flags_str)
variants_str = str(self.variants)
if variants_str:
parts.append(variants_str)
if not self.name and self.namespace:
parts.append(f" namespace={self.namespace}")
if self.architecture:
if self.architecture.platform:
parts.append(f" platform={self.architecture.platform}")
if self.architecture.os:
parts.append(f" os={self.architecture.os}")
if self.architecture.target:
parts.append(f" target={self.architecture.target}")
if self.abstract_hash:
parts.append(f"/{self.abstract_hash}")
return "".join(parts).strip()
def format(self, format_string: str = DEFAULT_FORMAT, color: Optional[bool] = False) -> str:
r"""Prints out attributes of a spec according to a format string.
Using an ``{attribute}`` format specifier, any field of the spec can be
selected. Those attributes can be recursive. For example,
``s.format({compiler.version})`` will print the version of the compiler.
If the attribute in a format specifier evaluates to ``None``, then the format
specifier will evaluate to the empty string, ``""``.
Commonly used attributes of the Spec for format strings include:
.. code-block:: text
name
version
compiler_flags
compilers
variants
architecture
architecture.platform
architecture.os
architecture.target
prefix
namespace
Some additional special-case properties can be added:
.. code-block:: text
hash[:len] The DAG hash with optional length argument
spack_root The spack root directory
spack_install The spack install directory
The ``^`` sigil can be used to access dependencies by name.
``s.format({^mpi.name})`` will print the name of the MPI implementation in the
spec.
The ``@``, ``%``, and ``/`` sigils can be used to include the sigil with the
printed string. These sigils may only be used with the appropriate attributes,
listed below:
* ``@``: ``{@version}``, ``{@compiler.version}``
* ``%``: ``{%compiler}``, ``{%compiler.name}``
* ``/``: ``{/hash}``, ``{/hash:7}``, etc
The ``@`` sigil may also be used for any other property named ``version``.
Sigils printed with the attribute string are only printed if the attribute
string is non-empty, and are colored according to the color of the attribute.
Variants listed by name naturally print with their sigil. For example,
``spec.format("{variants.debug}")`` prints either ``+debug`` or ``~debug``
depending on the name of the variant. Non-boolean variants print as
``name=value``. To print variant names or values independently, use
``spec.format("{variants.<name>.name}")`` or
``spec.format("{variants.<name>.value}")``.
There are a few attributes on specs that can be specified as key-value pairs
that are *not* variants, e.g.: ``os``, ``arch``, ``architecture``, ``target``,
``namespace``, etc. You can format these with an optional ``key=`` prefix, e.g.
``{namespace=namespace}`` or ``{arch=architecture}``, etc. The ``key=`` prefix
will be colorized along with the value.
When formatting specs, key-value pairs are separated from preceding parts of the
spec by whitespace. To avoid printing extra whitespace when the formatted
attribute is not set, you can add whitespace to the key *inside* the braces of
the format string, e.g.:
.. code-block:: text
{ namespace=namespace}
This evaluates to ``" namespace=builtin"`` if ``namespace`` is set to ``builtin``,
and to ``""`` if ``namespace`` is ``None``.
Spec format strings use ``\`` as the escape character. Use ``\{`` and ``\}`` for
literal braces, and ``\\`` for the literal ``\`` character.
Args:
format_string: string containing the format to be expanded
color: True for colorized result; False for no color; None for auto color.
"""
# Fast path for the common case: default format with no color
if format_string == DEFAULT_FORMAT and color is False:
return self._format_default()
ensure_modern_format_string(format_string)
def safe_color(sigil: str, string: str, color_fmt: Optional[str]) -> str:
# avoid colorizing if there is no color or the string is empty
if (color is False) or not color_fmt or not string:
return sigil + string
# escape and add the sigil here to avoid multiple concatenations
if sigil == "@":
sigil = "@@"
return clr.colorize(f"{color_fmt}{sigil}{clr.cescape(string)}@.", color=color)
def format_attribute(match_object: Match) -> str:
(esc, sig, dep, hash, hash_len, attribute, close_brace, unmatched_close_brace) = (
match_object.groups()
)
if esc:
return esc
elif unmatched_close_brace:
raise SpecFormatStringError(f"Unmatched close brace: '{format_string}'")
elif not close_brace:
raise SpecFormatStringError(f"Missing close brace: '{format_string}'")
current = self if dep is None else self[dep]
# Hash attributes can return early.
# NOTE: we currently treat abstract_hash like an attribute and ignore
# any length associated with it. We may want to change that.
if hash:
if sig and sig != "/":
raise SpecFormatSigilError(sig, "DAG hashes", hash)
try:
length = int(hash_len) if hash_len else None
except ValueError:
raise SpecFormatStringError(f"Invalid hash length: '{hash_len}'")
return safe_color(sig or "", current.dag_hash(length), HASH_COLOR)
if attribute == "":
raise SpecFormatStringError("Format string attributes must be non-empty")
attribute = attribute.lower()
parts = attribute.split(".")
assert parts
# check that the sigil is valid for the attribute.
if not sig:
sig = ""
elif sig == "@" and parts[-1] not in ("versions", "version"):
raise SpecFormatSigilError(sig, "versions", attribute)
elif sig == "%" and attribute not in ("compiler", "compiler.name"):
raise SpecFormatSigilError(sig, "compilers", attribute)
elif sig == "/" and attribute != "abstract_hash":
raise SpecFormatSigilError(sig, "DAG hashes", attribute)
# Iterate over components using getattr to get next element
for idx, part in enumerate(parts):
if not part:
raise SpecFormatStringError("Format string attributes must be non-empty")
elif part.startswith("_"):
raise SpecFormatStringError("Attempted to format private attribute")
elif isinstance(current, VariantMap):
# subscript instead of getattr for variant names
try:
current = current[part]
except KeyError:
raise SpecFormatStringError(f"Variant '{part}' does not exist")
else:
# aliases
if part == "arch":
part = "architecture"
elif part == "version" and not current.versions.concrete:
# version (singular) requires a concrete versions list. Avoid
# pedantic errors by using versions (plural) when not concrete.
# These two are not entirely equivalent for pkg@=1.2.3:
# - version prints '1.2.3'
# - versions prints '=1.2.3'
part = "versions"
try:
current = getattr(current, part)
except AttributeError:
if part == "compiler":
return "none"
elif part == "specfile_version":
return f"v{current.original_spec_format()}"
raise SpecFormatStringError(
f"Attempted to format attribute {attribute}. "
f"Spec {'.'.join(parts[:idx])} has no attribute {part}"
)
if isinstance(current, vn.VersionList) and current == vn.any_version:
# don't print empty version lists
return ""
if callable(current):
raise SpecFormatStringError("Attempted to format callable object")
if current is None:
# not printing anything
return ""
# Set color codes for various attributes
color = None
if "architecture" in parts:
color = ARCHITECTURE_COLOR
elif "variants" in parts or sig.endswith("="):
color = VARIANT_COLOR
elif any(c in parts for c in ("compiler", "compilers", "compiler_flags")):
color = COMPILER_COLOR
elif "version" in parts or "versions" in parts:
color = VERSION_COLOR
# return empty string if the value of the attribute is None.
if current is None:
return ""
# return colored output
return safe_color(sig, str(current), color)
return SPEC_FORMAT_RE.sub(format_attribute, format_string).strip()
def cformat(self, format_string: str = DEFAULT_FORMAT) -> str:
"""Same as :meth:`format`, but color defaults to auto instead of False."""
return self.format(format_string, color=None)
def format_path(
# self, format_string: str, _path_ctor: Optional[pathlib.PurePath] = None
self,
format_string: str,
_path_ctor: Optional[Callable[[Any], pathlib.PurePath]] = None,
) -> str:
"""Given a ``format_string`` that is intended as a path, generate a string like from
:meth:`format`, but eliminate extra path separators introduced by formatting of Spec
properties.
Path separators explicitly added to the string are preserved, so for example
``{name}/{version}`` would generate a directory based on the Spec's name, and a
subdirectory based on its version; this function guarantees though that the resulting
string would only have two directories (i.e. that if under normal circumstances that
``str(self.version)`` would contain a path separator, it would not in this case).
"""
format_component_with_sep = r"\{[^}]*[/\\][^}]*}"
if re.search(format_component_with_sep, format_string):
raise SpecFormatPathError(
f"Invalid path format string: cannot contain {{/...}}\n\t{format_string}"
)
path_ctor = _path_ctor or pathlib.PurePath
format_string_as_path = path_ctor(format_string)
if format_string_as_path.is_absolute() or (
# Paths that begin with a single "\" on windows are relative, but we still
# want to preserve the initial "\\" to be consistent with PureWindowsPath.
# Ensure that this '\' is not passed to polite_filename() so it's not converted to '_'
(os.name == "nt" or path_ctor == pathlib.PureWindowsPath)
and format_string_as_path.parts[0] == "\\"
):
output_path_components = [format_string_as_path.parts[0]]
input_path_components = list(format_string_as_path.parts[1:])
else:
output_path_components = []
input_path_components = list(format_string_as_path.parts)
output_path_components += [
fs.polite_filename(self.format(part)) for part in input_path_components
]
return str(path_ctor(*output_path_components))
def _format_edge_attributes(self, dep: DependencySpec, deptypes=True, virtuals=True):
deptypes_str = (
f"deptypes={','.join(dt.flag_to_tuple(dep.depflag))}"
if deptypes and dep.depflag
else ""
)
when_str = f"when='{(dep.when)}'" if dep.when != Spec() else ""
virtuals_str = f"virtuals={','.join(dep.virtuals)}" if virtuals and dep.virtuals else ""
attrs = " ".join(s for s in (when_str, deptypes_str, virtuals_str) if s)
if attrs:
attrs = f"[{attrs}] "
return attrs
def _format_dependencies(
self,
format_string: str = DEFAULT_FORMAT,
include: Optional[Callable[[DependencySpec], bool]] = None,
deptypes: bool = True,
color: Optional[bool] = False,
_force_direct: bool = False,
):
"""Helper for formatting dependencies on specs.
Arguments:
format_string: format string to use for each dependency
include: predicate to select which dependencies to include
deptypes: whether to format deptypes
color: colorize if True, don't colorize if False, auto-colorize if None
_force_direct: if True, print all dependencies as direct dependencies
(to be removed when we have this metadata on concrete edges)
"""
include = include or (lambda dep: True)
parts = []
if self.concrete:
direct = self.edges_to_dependencies()
transitive: List[DependencySpec] = []
else:
direct, transitive = lang.stable_partition(
self.edges_to_dependencies(), predicate_fn=lambda x: x.direct
)
# helper for direct and transitive loops below
def format_edge(edge: DependencySpec, sigil: str, dep_spec: Optional[Spec] = None) -> str:
dep_spec = dep_spec or edge.spec
dep_format = dep_spec.format(format_string, color=color)
edge_attributes = (
self._format_edge_attributes(edge, deptypes=deptypes, virtuals=False)
if edge.depflag or edge.when != Spec()
else ""
)
virtuals = f"{','.join(edge.virtuals)}=" if edge.virtuals else ""
star = _anonymous_star(edge, dep_format)
return f"{sigil}{edge_attributes}{star}{virtuals}{dep_format}"
# direct dependencies
for edge in sorted(direct, key=lambda x: x.spec.name):
if not include(edge):
continue
# replace legacy compiler names
old_name = edge.spec.name
new_name = spack.aliases.BUILTIN_TO_LEGACY_COMPILER.get(old_name)
try:
# this is ugly but copies can be expensive
sigil = "%"
if new_name:
edge.spec.name = new_name
if edge.propagation == PropagationPolicy.PREFERENCE:
sigil = "%%"
parts.append(format_edge(edge, sigil=sigil, dep_spec=edge.spec))
finally:
edge.spec.name = old_name
if self.concrete:
# Concrete specs should go no further, as the complexity
# below is O(paths)
return " ".join(parts).strip()
# transitive dependencies (with any direct dependencies)
for edge in sorted(transitive, key=lambda x: x.spec.name):
if not include(edge):
continue
sigil = "%" if _force_direct else "^" # hack til direct deps represented better
parts.append(format_edge(edge, sigil, edge.spec))
# also recursively add any direct dependencies of transitive dependencies
if edge.spec._dependencies:
parts.append(
edge.spec._format_dependencies(
format_string=format_string,
include=include,
deptypes=deptypes,
_force_direct=_force_direct,
)
)
return " ".join(parts).strip()
def _long_spec(self, color: Optional[bool] = False) -> str:
"""Helper for :attr:`long_spec` and :attr:`clong_spec`."""
if self.concrete:
return self.tree(format=DISPLAY_FORMAT, color=color)
return f"{self.format(color=color)} {self._format_dependencies(color=color)}".strip()
def _short_spec(self, color: Optional[bool] = False) -> str:
"""Helper for :attr:`short_spec` and :attr:`cshort_spec`."""
return self.format(
"{name}{@version}{variants}"
"{ platform=architecture.platform}{ os=architecture.os}{ target=architecture.target}"
"{/hash:7}",
color=color,
)
@property
def compilers(self):
# TODO: get rid of the space here and make formatting smarter
return " " + self._format_dependencies(
"{name}{@version}",
include=lambda dep: any(lang in dep.virtuals for lang in ("c", "cxx", "fortran")),
deptypes=False,
_force_direct=True,
)
@property
def long_spec(self):
"""Long string of the spec, including dependencies."""
return self._long_spec(color=False)
@property
def clong_spec(self):
"""Returns an auto-colorized version of :attr:`long_spec`."""
return self._long_spec(color=None)
@property
def short_spec(self):
"""Short string of the spec, with hash and without dependencies."""
return self._short_spec(color=False)
@property
def cshort_spec(self):
"""Returns an auto-colorized version of :attr:`short_spec`."""
return self._short_spec(color=None)
@property
def colored_str(self) -> str:
"""Auto-colorized string representation of this spec."""
return self._str(color=None)
def _str(self, color: Optional[bool] = False) -> str:
"""String representation of this spec.
Args:
color: colorize if True, don't colorize if False, auto-colorize if None
"""
if self._concrete:
return self.format("{name}{@version}{/hash}", color=color)
if not self._dependencies:
return self.format(color=color)
return self._long_spec(color=color)
def __str__(self) -> str:
"""String representation of this spec."""
return self._str(color=False)
def install_status(self) -> InstallStatus:
"""Helper for tree to print DB install status."""
if not self.concrete:
return InstallStatus.absent
if self.external:
return InstallStatus.external
from spack.store import STORE
upstream, record = STORE.db.query_by_spec_hash(self.dag_hash())
if not record:
return InstallStatus.absent
elif upstream and record.installed:
return InstallStatus.upstream
elif record.installed:
return InstallStatus.installed
else:
return InstallStatus.missing
def _installed_explicitly(self):
"""Helper for tree to print DB install status."""
if not self.concrete:
return None
try:
from spack.store import STORE
record = STORE.db.get_record(self)
return record.explicit
except KeyError:
return None
def tree(
self,
*,
color: Optional[bool] = None,
depth: bool = False,
hashes: bool = False,
hashlen: Optional[int] = None,
cover: spack.traverse.CoverType = "nodes",
indent: int = 0,
format: str = DEFAULT_FORMAT,
deptypes: Union[dt.DepTypes, dt.DepFlag] = dt.ALL,
show_types: bool = False,
depth_first: bool = False,
recurse_dependencies: bool = True,
status_fn: Optional[Callable[["Spec"], InstallStatus]] = None,
prefix: Optional[Callable[["Spec"], str]] = None,
key=id,
) -> str:
"""Prints out this spec and its dependencies, tree-formatted with indentation.
See multi-spec ``spack.spec.tree()`` function for details.
Args:
specs: List of specs to format.
color: if True, always colorize the tree. If False, don't colorize the tree. If None,
use the default from spack.llnl.tty.color
depth: print the depth from the root
hashes: if True, print the hash of each node
hashlen: length of the hash to be printed
cover: either ``"nodes"`` or ``"edges"``
indent: extra indentation for the tree being printed
format: format to be used to print each node
deptypes: dependency types to be represented in the tree
show_types: if True, show the (merged) dependency type of a node
depth_first: if True, traverse the DAG depth first when representing it as a tree
recurse_dependencies: if True, recurse on dependencies
status_fn: optional callable that takes a node as an argument and return its
installation status
prefix: optional callable that takes a node as an argument and return its
installation prefix
"""
return tree(
[self],
color=color,
depth=depth,
hashes=hashes,
hashlen=hashlen,
cover=cover,
indent=indent,
format=format,
deptypes=deptypes,
show_types=show_types,
depth_first=depth_first,
recurse_dependencies=recurse_dependencies,
status_fn=status_fn,
prefix=prefix,
key=key,
)
def __repr__(self):
return str(self)
@property
def platform(self):
return self.architecture.platform
@property
def os(self):
return self.architecture.os
@property
def target(self):
return self.architecture.target
@property
def build_spec(self):
return self._build_spec or self
@build_spec.setter
def build_spec(self, value):
self._build_spec = value
def trim(self, dep_name):
"""
Remove any package that is or provides ``dep_name`` transitively
from this tree. This can also remove other dependencies if
they are only present because of ``dep_name``.
"""
for spec in list(self.traverse()):
new_dependencies = _EdgeMap() # A new _EdgeMap
for pkg_name, edge_list in spec._dependencies.items():
for edge in edge_list:
if (dep_name not in edge.virtuals) and (not dep_name == edge.spec.name):
new_dependencies.add(edge)
spec._dependencies = new_dependencies
def _virtuals_provided(self, root):
"""Return set of virtuals provided by self in the context of root"""
if root is self:
# Could be using any virtual the package can provide
return set(v.name for v in self.package.virtuals_provided)
hashes = [s.dag_hash() for s in root.traverse()]
in_edges = set(
[edge for edge in self.edges_from_dependents() if edge.parent.dag_hash() in hashes]
)
return set().union(*[edge.virtuals for edge in in_edges])
def _splice_match(self, other, self_root, other_root):
"""Return True if other is a match for self in a splice of other_root into self_root
Other is a splice match for self if it shares a name, or if self is a virtual provider
and other provides a superset of the virtuals provided by self. Virtuals provided are
evaluated in the context of a root spec (self_root for self, other_root for other).
This is a slight oversimplification. Other could be a match for self in the context of
one edge in self_root and not in the context of another edge. This method could be
expanded in the future to account for these cases.
"""
if other.name == self.name:
return True
return bool(
bool(self._virtuals_provided(self_root))
and self._virtuals_provided(self_root) <= other._virtuals_provided(other_root)
)
def _splice_detach_and_add_dependents(self, replacement, context):
"""Helper method for Spec._splice_helper.
replacement is a node to splice in, context is the scope of dependents to consider relevant
to this splice."""
# Update build_spec attributes for all transitive dependents
# before we start changing their dependencies
ancestors_in_context = [
a
for a in self.traverse(root=False, direction="parents")
if a in context.traverse(deptype=dt.LINK | dt.RUN)
]
for ancestor in ancestors_in_context:
# Only set it if it hasn't been spliced before
ancestor._build_spec = ancestor._build_spec or ancestor.copy()
ancestor.clear_caches(ignore=(ht.package_hash.attr,))
for edge in ancestor.edges_to_dependencies(depflag=dt.BUILD):
if edge.depflag & ~dt.BUILD:
edge.depflag &= ~dt.BUILD
else:
ancestor._dependencies[edge.spec.name].remove(edge)
edge.spec._dependents[ancestor.name].remove(edge)
# For each direct dependent in the link/run graph, replace the dependency on
# node with one on replacement
for edge in self.edges_from_dependents():
if edge.parent not in ancestors_in_context:
continue
edge.parent._dependencies.edges[self.name].remove(edge)
self._dependents.edges[edge.parent.name].remove(edge)
edge.parent._add_dependency(replacement, depflag=edge.depflag, virtuals=edge.virtuals)
def _splice_helper(self, replacement):
"""Main loop of a transitive splice.
The while loop around a traversal of self ensures that changes to self from previous
iterations are reflected in the traversal. This avoids evaluating irrelevant nodes
using topological traversal (all incoming edges traversed before any outgoing edge).
If any node will not be in the end result, its parent will be spliced and it will not
ever be considered.
For each node in self, find any analogous node in replacement and swap it in.
We assume all build deps are handled outside of this method
Arguments:
replacement: The node that will replace any equivalent node in self
self_root: The root of the spec that self comes from. This provides the context for
evaluating whether ``replacement`` is a match for each node of ``self``. See
``Spec._splice_match`` and ``Spec._virtuals_provided`` for details.
other_root: The root of the spec that replacement comes from. This provides the context
for evaluating whether ``replacement`` is a match for each node of ``self``. See
``Spec._splice_match`` and ``Spec._virtuals_provided`` for details.
"""
ids = set(id(s) for s in replacement.traverse())
# Sort all possible replacements by name and virtual for easy access later
replacements_by_name = collections.defaultdict(list)
for node in replacement.traverse():
replacements_by_name[node.name].append(node)
virtuals = node._virtuals_provided(root=replacement)
for virtual in virtuals:
replacements_by_name[virtual].append(node)
changed = True
while changed:
changed = False
# Intentionally allowing traversal to change on each iteration
# using breadth-first traversal to ensure we only reach nodes that will
# be in final result
for node in self.traverse(root=False, order="topo", deptype=dt.ALL & ~dt.BUILD):
# If this node has already been swapped in, don't consider it again
if id(node) in ids:
continue
analogs = replacements_by_name[node.name]
if not analogs:
# If we have to check for matching virtuals, then we need to check that it
# matches all virtuals. Use `_splice_match` to validate possible matches
for virtual in node._virtuals_provided(root=self):
analogs += [
r
for r in replacements_by_name[virtual]
if node._splice_match(r, self_root=self, other_root=replacement)
]
# No match, keep iterating over self
if not analogs:
continue
# If there are multiple analogs, this package must satisfy the constraint
# that a newer version can always replace a lesser version.
analog = max(analogs, key=lambda s: s.version)
# No splice needed here, keep checking
if analog == node:
continue
node._splice_detach_and_add_dependents(analog, context=self)
changed = True
break
def splice(self, other: "Spec", transitive: bool = True) -> "Spec":
"""Returns a new, spliced concrete :class:`Spec` with the ``other`` dependency and,
optionally, its dependencies.
Args:
other: alternate dependency
transitive: include other's dependencies
Returns: a concrete, spliced version of the current :class:`Spec`
When transitive is :data:`True`, use the dependencies from ``other`` to reconcile
conflicting dependencies. When transitive is :data:`False`, use dependencies from self.
For example, suppose we have the following dependency graph:
.. code-block:: text
T
| \\
Z<-H
Spec ``T`` depends on ``H`` and ``Z``, and ``H`` also depends on ``Z``. Now we want to use
a different ``H``, called ``H'``. This function can be used to splice in ``H'`` to
create a new spec, called ``T*``. If ``H'`` was built with ``Z'``, then ``transitive=True``
will ensure ``H'`` and ``T*`` both depend on ``Z'``:
.. code-block:: text
T*
| \\
Z'<-H'
If ``transitive=False``, then ``H'`` and ``T*`` will both depend on
the original ``Z``, resulting in a new ``H'*``:
.. code-block:: text
T*
| \\
Z<-H'*
Provenance of the build is tracked through the :attr:`build_spec` property
of the spliced spec and any correspondingly modified dependency specs.
The build specs are set to that of the original spec, so the original
spec's provenance is preserved unchanged."""
assert self.concrete
assert other.concrete
if self._splice_match(other, self_root=self, other_root=other):
return other.copy()
if not any(
node._splice_match(other, self_root=self, other_root=other)
for node in self.traverse(root=False, deptype=dt.LINK | dt.RUN)
):
other_str = other.format("{name}/{hash:7}")
self_str = self.format("{name}/{hash:7}")
msg = f"Cannot splice {other_str} into {self_str}."
msg += f" Either {self_str} cannot depend on {other_str},"
msg += f" or {other_str} fails to provide a virtual used in {self_str}"
raise SpliceError(msg)
# Copies of all non-build deps, build deps will get added at the end
spec = self.copy(deps=dt.ALL & ~dt.BUILD)
replacement = other.copy(deps=dt.ALL & ~dt.BUILD)
def make_node_pairs(orig_spec, copied_spec):
return list(
zip(
orig_spec.traverse(deptype=dt.ALL & ~dt.BUILD),
copied_spec.traverse(deptype=dt.ALL & ~dt.BUILD),
)
)
def mask_build_deps(in_spec):
for edge in in_spec.traverse_edges(cover="edges"):
edge.depflag &= ~dt.BUILD
if transitive:
# These pairs will allow us to reattach all direct build deps
# We need the list of pairs while the two specs still match
node_pairs = make_node_pairs(self, spec)
# Ignore build deps in the modified spec while doing the splice
# They will be added back in at the end
mask_build_deps(spec)
# Transitively splice any relevant nodes from new into base
# This handles all shared dependencies between self and other
spec._splice_helper(replacement)
else:
# Do the same thing as the transitive splice, but reversed
node_pairs = make_node_pairs(other, replacement)
mask_build_deps(replacement)
replacement._splice_helper(spec)
# Intransitively splice replacement into spec
# This is very simple now that all shared dependencies have been handled
for node in spec.traverse(order="topo", deptype=dt.LINK | dt.RUN):
if node._splice_match(other, self_root=spec, other_root=other):
node._splice_detach_and_add_dependents(replacement, context=spec)
# For nodes that were spliced, modify the build spec to ensure build deps are preserved
# For nodes that were not spliced, replace the build deps on the spec itself
for orig, copy in node_pairs:
if copy._build_spec:
copy._build_spec = orig.build_spec.copy()
else:
for edge in orig.edges_to_dependencies(depflag=dt.BUILD):
copy._add_dependency(edge.spec, depflag=dt.BUILD, virtuals=edge.virtuals)
return spec
def clear_caches(self, ignore: Tuple[str, ...] = ()) -> None:
"""
Clears all cached hashes in a Spec, while preserving other properties.
"""
for h in ht.HASHES:
if h.attr not in ignore:
if hasattr(self, h.attr):
setattr(self, h.attr, None)
for attr in ("_dunder_hash", "_prefix"):
if attr not in ignore:
setattr(self, attr, None)
def __hash__(self):
# If the spec is concrete, we leverage the dag hash and just use a 64-bit prefix of it.
# The dag hash has the advantage that it's computed once per concrete spec, and it's saved
# -- so if we read concrete specs we don't need to recompute the whole hash.
if self.concrete:
if not self._dunder_hash:
self._dunder_hash = self.dag_hash_bit_prefix(64)
return self._dunder_hash
# This is the normal hash for lazy_lexicographic_ordering. It's
# slow for large specs because it traverses the whole spec graph,
# so we hope it only runs on abstract specs, which are small.
return hash(lang.tuplify(self._cmp_iter))
def __reduce__(self):
return Spec.from_dict, (self.to_dict(hash=ht.dag_hash),)
def attach_git_version_lookup(self):
# Add a git lookup method for GitVersions
if not self.name:
return
for v in self.versions:
if isinstance(v, vn.GitVersion) and v.std_version is None:
v.attach_lookup(spack.version.git_ref_lookup.GitRefLookup(self.fullname))
def original_spec_format(self) -> int:
"""Returns the spec format originally used for this spec."""
return self.annotations.original_spec_format
def has_virtual_dependency(self, virtual: str) -> bool:
return bool(self.dependencies(virtuals=(virtual,)))
|
Spec
|
python
|
kamyu104__LeetCode-Solutions
|
Python/best-poker-hand.py
|
{
"start": 42,
"end": 509
}
|
class ____(object):
def bestHand(self, ranks, suits):
"""
:type ranks: List[int]
:type suits: List[str]
:rtype: str
"""
LOOKUP = ["", "High Card", "Pair", "Three of a Kind", "Three of a Kind", "Three of a Kind"]
if all(suits[i] == suits[0] for i in xrange(1, len(suits))):
return "Flush"
cnt = [0]*13
for x in ranks:
cnt[x-1] += 1
return LOOKUP[max(cnt)]
|
Solution
|
python
|
numba__numba
|
numba/core/typing/asnumbatype.py
|
{
"start": 165,
"end": 6061
}
|
class ____:
"""
A registry for Python types. It stores a lookup table for simple cases
(e.g. ``int``) and a list of functions for more complicated cases (e.g.
generics like ``List[int]``).
Python types are used in Python type annotations, and in instance checks.
Therefore, this registry supports determining the Numba type of Python type
annotations at compile time, along with determining the type of classinfo
arguments to ``isinstance()``.
This registry is not used dynamically on instances at runtime; to check the
type of an object at runtime, use ``numba.typeof``.
"""
def __init__(self):
self.lookup = {
type(example): typeof(example)
for example in [
0,
0.0,
complex(0),
"numba",
True,
None,
]
}
self.functions = [self._builtin_infer, self._numba_type_infer]
def _numba_type_infer(self, py_type):
if isinstance(py_type, types.Type):
return py_type
def _builtin_infer(self, py_type):
if PYVERSION in ((3, 14), ):
# As of 3.14 the typing module has been updated to return a
# different type when calling: `typing.Optional[X]`.
#
# On 3.14:
#
# >>> type(typing.Optional[float])
# <class 'typing.Union'>
#
#
# On 3.13 (and presumably below):
#
# >>> type(typing._UnionGenericAlias)
# <class 'typing._UnionGenericAlias'>
#
#
# The previous implementation of this predicate used
# `_GenericAlias`, which was possible because `_UnionGenericAlias`
# is a subclass of `_GenericAlias`...
#
# >>> issubclass(typing._UnionGenericAlias, typing._GenericAlias)
# True
#
# However, other types, such as `typing.List[float]` remain as
# `typing._GenericAlias`, so that must be keept.
#
if not isinstance(py_type, (py_typing.Union,
py_typing._GenericAlias)):
return
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
# Use of underscore type `_GenericAlias`.
if not isinstance(py_type, py_typing._GenericAlias):
return
else:
raise NotImplementedError(PYVERSION)
if getattr(py_type, "__origin__", None) is py_typing.Union:
if len(py_type.__args__) != 2:
raise errors.TypingError(
"Cannot type Union of more than two types")
(arg_1_py, arg_2_py) = py_type.__args__
if arg_2_py is type(None): # noqa: E721
return types.Optional(self.infer(arg_1_py))
elif arg_1_py is type(None): # noqa: E721
return types.Optional(self.infer(arg_2_py))
else:
raise errors.TypingError(
"Cannot type Union that is not an Optional "
f"(neither type type {arg_2_py} is not NoneType")
if getattr(py_type, "__origin__", None) is list:
(element_py,) = py_type.__args__
return types.ListType(self.infer(element_py))
if getattr(py_type, "__origin__", None) is dict:
key_py, value_py = py_type.__args__
return types.DictType(self.infer(key_py), self.infer(value_py))
if getattr(py_type, "__origin__", None) is set:
(element_py,) = py_type.__args__
return types.Set(self.infer(element_py))
if getattr(py_type, "__origin__", None) is tuple:
tys = tuple(map(self.infer, py_type.__args__))
return types.BaseTuple.from_types(tys)
def register(self, func_or_py_type, numba_type=None):
"""
Add support for new Python types (e.g. user-defined JitClasses) to the
registry. For a simple pair of a Python type and a Numba type, this can
be called as a function ``register(py_type, numba_type)``. If more
complex logic is required (e.g. for generic types), ``register`` can be
used as a decorator for a function that takes a Python type as input
and returns a Numba type or ``None``.
"""
if numba_type is not None:
# register used with a specific (py_type, numba_type) pair.
assert isinstance(numba_type, types.Type)
self.lookup[func_or_py_type] = numba_type
else:
# register used as a decorator.
assert inspect.isfunction(func_or_py_type)
self.functions.append(func_or_py_type)
def try_infer(self, py_type):
"""
Try to determine the Numba type of a given Python type. We first
consider the lookup dictionary. If ``py_type`` is not there, we iterate
through the registered functions until one returns a Numba type. If
type inference fails, return ``None``.
"""
result = self.lookup.get(py_type, None)
for func in self.functions:
if result is not None:
break
result = func(py_type)
if result is not None and not isinstance(result, types.Type):
raise errors.TypingError(
f"as_numba_type should return a Numba type, got {result}"
)
return result
def infer(self, py_type):
result = self.try_infer(py_type)
if result is None:
raise errors.TypingError(
f"Cannot infer Numba type of Python type {py_type}"
)
return result
def __call__(self, py_type):
return self.infer(py_type)
as_numba_type = AsNumbaTypeRegistry()
|
AsNumbaTypeRegistry
|
python
|
dask__dask
|
dask/dataframe/tseries/resample.py
|
{
"start": 6784,
"end": 6979
}
|
class ____(ResampleReduction):
how = "agg"
def _simplify_up(self, parent, dependents):
# Disable optimization in `agg`; function may access other columns
return
|
ResampleAgg
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/test_cli.py
|
{
"start": 42639,
"end": 43932
}
|
class ____(BoringModel):
def __init__(self, learning_rate, step_size=None, **kwargs):
super().__init__()
self.save_hyperparameters()
self.learning_rate = learning_rate
self.step_size = step_size
self.kwargs = kwargs
def test_lightning_cli_save_hyperparameters_untyped_module(cleandir):
config = {
"model": {
"class_path": f"{__name__}.TestModelSaveHparamsUntyped",
"init_args": {"learning_rate": 1e-2},
"dict_kwargs": {"x": 1},
}
}
with mock.patch("sys.argv", ["any.py", f"--config={json.dumps(config)}", "--trainer.max_epochs=1"]):
cli = LightningCLI(BoringModel, run=False, auto_configure_optimizers=False, subclass_mode_model=True)
cli.trainer.fit(cli.model)
assert isinstance(cli.model, TestModelSaveHparamsUntyped)
assert cli.model.hparams["learning_rate"] == 1e-2
assert cli.model.hparams["step_size"] is None
assert cli.model.hparams["x"] == 1
checkpoint_path = next(Path(cli.trainer.log_dir, "checkpoints").glob("*.ckpt"), None)
model = TestModelSaveHparamsUntyped.load_from_checkpoint(checkpoint_path)
assert model.learning_rate == 1e-2
assert model.step_size is None
assert model.kwargs == {"x": 1}
|
TestModelSaveHparamsUntyped
|
python
|
viewflow__viewflow
|
viewflow/workflow/base.py
|
{
"start": 597,
"end": 2013
}
|
class ____:
"""Represents an edge in the Flow graph.
An edge connects two nodes (source and destination) in the flow graph and
can have different types (e.g., `next`, `cond_true`, `cond_false`, `default`).
Attributes:
_src: The source node of the edge.
_dst: The destination node of the edge.
_edge_class: The class/type of the edge.
"""
__slots__ = ("_src", "_dst", "_edge_class", "_label")
def __init__(self, src: str, dst: str, edge_class: str) -> None:
"""
Initializes an Edge instance.
"""
self._src = src
self._dst = dst
self._edge_class = edge_class
@property
def src(self) -> str:
"""Edge source node."""
return self._src
@property
def dst(self) -> str:
"""Edge destination node."""
return self._dst
@property
def edge_class(self) -> str:
"""Type of the edge.
Viewflow uses `next`, 'cond_true', `cond_false` and `default`
edge classes.
Edge class could be used as a hint for edge visualization.
"""
return self._edge_class
def __eq__(self, other: object) -> bool:
if isinstance(other, self.__class__):
return self.src == other.src and self.dst == other.dst
def __str__(self) -> str:
return "[{}] {} ---> {}".format(self._edge_class, self._src, self._dst)
|
Edge
|
python
|
getsentry__sentry
|
tests/sentry/core/endpoints/scim/test_scim_user_details.py
|
{
"start": 31911,
"end": 32919
}
|
class ____(unittest.TestCase):
def test_parse_filter_conditions_basic(self) -> None:
fil = parse_filter_conditions('userName eq "user@sentry.io"')
assert fil == "user@sentry.io"
# single quotes too
fil = parse_filter_conditions("userName eq 'user@sentry.io'")
assert fil == "user@sentry.io"
fil = parse_filter_conditions('value eq "23"')
assert fil == 23
fil = parse_filter_conditions('displayName eq "MyTeamName"')
assert fil == "MyTeamName"
def test_parse_filter_conditions_invalids(self) -> None:
with pytest.raises(SCIMFilterError):
parse_filter_conditions("userName invalid USER@sentry.io")
with pytest.raises(SCIMFilterError):
parse_filter_conditions("blablaba eq USER@sentry.io")
def test_parse_filter_conditions_single_quote_in_email(self) -> None:
fil = parse_filter_conditions('userName eq "jos\'h@sentry.io"')
assert fil == "jos'h@sentry.io"
|
SCIMUtilsTests
|
python
|
pytorch__pytorch
|
test/test_jit.py
|
{
"start": 112016,
"end": 114529
}
|
class ____(JitTestCase):
def test_instancing_error(self):
@torch.jit.ignore
class MyScriptClass:
def unscriptable(self):
return "a" + 200
class TestModule(torch.nn.Module):
def forward(self, x):
return MyScriptClass()
with self.assertRaises(torch.jit.frontend.FrontendError) as cm:
torch.jit.script(TestModule())
checker = FileCheck()
checker.check("Cannot instantiate class")
checker.check("def forward")
checker.run(str(cm.exception))
def test_dictionary_as_example_inputs_for_jit_trace(self):
class TestModule_v1(torch.nn.Module):
def forward(self, key2=None, key3=None, key4=None, key5=None, key1=None, key6=None):
return key1 + key2 + key3
class TestModule_v2(torch.nn.Module):
def forward(self, x, y):
return x + y
def test_func(x, y):
return x + y
model_1 = TestModule_v1()
model_2 = TestModule_v2()
value1 = torch.ones(1)
value2 = torch.ones(1)
value3 = torch.ones(1)
example_input_dict = {'key1': value1, 'key2': value2, 'key3': value3}
example_input_dict_func = {'x': value1, 'y': value2}
traced_model_1 = torch.jit.trace(model_1, example_kwarg_inputs=example_input_dict, strict=False)
traced_model_1_m = torch.jit.trace_module(
model_1, {'forward': example_input_dict}, example_inputs_is_kwarg=True, strict=False)
traced_model_2 = torch.jit.trace(model_2, example_kwarg_inputs={'x': torch.rand([2]), 'y': torch.rand([2])})
traced_func = torch.jit.trace(test_func, example_kwarg_inputs=example_input_dict_func, strict=False)
res_1 = traced_model_1(**example_input_dict)
res_1_m = traced_model_1_m(**example_input_dict)
self.assertEqual(res_1, 3 * torch.ones(1))
self.assertEqual(res_1_m, 3 * torch.ones(1))
res_func = traced_func(**example_input_dict_func)
self.assertEqual(res_func, 2 * torch.ones(1))
with self.assertRaisesRegex(RuntimeError, r"forward\(\) is missing value for argument 'x'."):
res_2 = traced_model_2(**{'z': torch.rand([2]), 'y': torch.rand([2])}) # noqa: PIE804
with self.assertRaisesRegex(RuntimeError, r"forward\(\) is missing value for argument 'y'."):
res_2 = traced_model_2(**{'x': torch.rand([2]), 'z': torch.rand([2])}) # noqa: PIE804
|
TestFrontend
|
python
|
ray-project__ray
|
python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_factory.py
|
{
"start": 2098,
"end": 6822
}
|
class ____:
"""Test suite for KVConnectorBackendFactory."""
def test_get_backend_class_success(self):
"""Test successful retrieval of a registered backend class."""
backend_class = KVConnectorBackendFactory.get_backend_class(
"LMCacheConnectorV1"
)
assert backend_class is not None
assert hasattr(backend_class, "setup")
def test_get_backend_class_not_registered_returns_base(self):
"""Test that getting a non-registered backend returns BaseConnectorBackend."""
backend_class = KVConnectorBackendFactory.get_backend_class(
"UnregisteredConnector"
)
assert backend_class == BaseConnectorBackend
assert issubclass(backend_class, BaseConnectorBackend)
def test_create_backend_success(self):
"""Test successful creation of a backend instance."""
llm_config = LLMConfig(
model_loading_config=dict(model_id="test-model"),
engine_kwargs=dict(
kv_transfer_config=dict(
kv_connector="LMCacheConnectorV1",
kv_role="kv_both",
)
),
)
backend = KVConnectorBackendFactory.create_backend(
"LMCacheConnectorV1", llm_config
)
assert isinstance(backend, BaseConnectorBackend)
assert backend.llm_config == llm_config
@pytest.mark.parametrize(
"connector_name",
["LMCacheConnectorV1", "NixlConnector", "MultiConnector"],
)
def test_all_registered_backends_can_be_loaded(self, connector_name):
"""Test that all pre-registered backends can be loaded."""
backend_class = KVConnectorBackendFactory.get_backend_class(connector_name)
assert backend_class is not None
assert issubclass(backend_class, BaseConnectorBackend)
def test_get_backend_class_import_error_handling(self):
"""Test that ImportError during backend loading is handled with clear message."""
# Register a backend with a non-existent module path
with registered_backend("BadBackend", "non.existent.module:NonExistentClass"):
with pytest.raises(
ImportError, match="Failed to load connector backend 'BadBackend'"
):
KVConnectorBackendFactory.get_backend_class("BadBackend")
def test_register_backend_with_class_directly(self):
"""Test registering a backend class directly."""
class CustomBackend(BaseConnectorBackend):
def setup(self):
pass
with registered_backend("CustomBackend", CustomBackend):
assert KVConnectorBackendFactory.is_registered("CustomBackend")
retrieved = KVConnectorBackendFactory.get_backend_class("CustomBackend")
assert retrieved == CustomBackend
def test_register_backend_with_module_path(self):
"""Test registering a backend via module path string."""
# Register using module:class format
with registered_backend(
"LMCacheViaPath",
"ray.llm._internal.serve.engines.vllm.kv_transfer.lmcache:LMCacheConnectorV1Backend",
):
assert KVConnectorBackendFactory.is_registered("LMCacheViaPath")
backend_class = KVConnectorBackendFactory.get_backend_class(
"LMCacheViaPath"
)
assert backend_class is not None
assert issubclass(backend_class, BaseConnectorBackend)
def test_unregistered_connector_with_llm_config_setup(self):
"""Test that unregistered connectors work with LLMConfig.setup_engine_backend()."""
llm_config = LLMConfig(
model_loading_config=dict(model_id="test-model"),
engine_kwargs=dict(
kv_transfer_config=dict(
kv_connector="SharedStorageConnector",
kv_role="kv_both",
)
),
)
# Should not raise an error
llm_config.setup_engine_backend()
@pytest.mark.asyncio
async def test_cross_process_registry_access(self, test_deployment_handle):
"""Test that registrations made in driver are accessible in Ray Serve child processes."""
handle, TestCrossProcessConnector = test_deployment_handle
# Verify it's registered in driver
assert KVConnectorBackendFactory.is_registered("TestCrossProcessConnector")
result = await handle.remote()
# Verify it's the correct class
assert result == TestCrossProcessConnector
assert issubclass(result, BaseConnectorBackend)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
TestKVConnectorBackendFactory
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_image04.py
|
{
"start": 315,
"end": 841
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image04.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.bmp")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
huggingface__transformers
|
src/transformers/convert_slow_tokenizer.py
|
{
"start": 55486,
"end": 57917
}
|
class ____(SpmConverter):
handle_byte_fallback = True
def __init__(self, vocab_file=None, *args):
self.vocab_file = vocab_file
requires_backends(self, "protobuf")
Converter.__init__(self, vocab_file)
model_pb2 = import_protobuf()
m = model_pb2.ModelProto()
with open(vocab_file, "rb") as f:
m.ParseFromString(f.read())
self.proto = m
def tokenizer(self, proto):
vocab_scores = self.vocab(proto)
_, merges = self.SpmExtractor(self.vocab_file).extract(vocab_scores)
bpe_vocab = {word: i for i, (word, score) in enumerate(vocab_scores)}
tokenizer = Tokenizer(
BPE(
bpe_vocab,
merges,
unk_token=proto.trainer_spec.unk_piece,
fuse_unk=True,
byte_fallback=self.handle_byte_fallback,
dropout=None,
)
)
# Add user defined symbols and control tokens from sentencepiece model
spm_added_tokens = [
(id, p.piece, p.type == 3 or p.piece in self.special_tokens)
for id, p in enumerate(proto.pieces)
if p.type in [3, 4]
]
tokenizer.add_tokens(
[
AddedToken(token, normalized=False, special=special)
for id, token, special in sorted(spm_added_tokens, key=lambda x: x[0])
]
)
return tokenizer
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
characters the bpe code barfs on.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
tables between utf-8 bytes and unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
|
ParakeetConverter
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-fauna/unit_tests/test_util.py
|
{
"start": 1354,
"end": 2863
}
|
class ____:
def __init__(self, domain: str, port: int, scheme: str, secret: str, collection=CollectionConfig()):
self.domain = domain
self.port = port
self.scheme = scheme
self.secret = secret
self.collection = collection
@staticmethod
def localhost(collection=CollectionConfig()) -> "FullConfig":
# 9000 is our testing db, that we spawn in database_test.py
return FullConfig(domain="127.0.0.1", port=9000, scheme="http", secret="secret", collection=collection)
def partial_overwrite(obj: dict, new: dict) -> dict:
"""
Recursively replaces the values in obj with the values in new.
"""
for k, v in new.items():
if type(v) is dict:
partial_overwrite(obj[k], v)
else:
obj[k] = v
return obj
def config(extra: dict[str, any]) -> dict[str, any]:
obj = {
"domain": "127.0.0.1",
"port": 8443,
"scheme": "http",
"secret": "secret",
"collection": {
"page_size": 64,
"deletions": {"deletion_mode": "ignore"},
},
}
return partial_overwrite(obj, extra)
def expand_columns_query(ref):
doc = q.var("document")
return q.let(
{
"document": q.get(ref),
},
{
"ref": q.select(["ref", "id"], doc),
"ts": q.select("ts", doc),
"data": q.select("data", doc, {}),
"ttl": q.select("ttl", doc, None),
},
)
|
FullConfig
|
python
|
ethereum__web3.py
|
web3/types.py
|
{
"start": 4507,
"end": 4628
}
|
class ____(TypedDict):
index: int
validator_index: int
address: ChecksumAddress
amount: Gwei
|
WithdrawalData
|
python
|
gevent__gevent
|
src/greentest/3.10/test_subprocess.py
|
{
"start": 1656,
"end": 2288
}
|
class ____(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
if not mswindows:
# subprocess._active is not used on Windows and is set to None.
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(
subprocess._active, "subprocess._active not empty"
)
self.doCleanups()
support.reap_children()
|
BaseTestCase
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/release_thresholds/utils/test_get_new_issue_counts.py
|
{
"start": 718,
"end": 6277
}
|
class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.org = self.create_organization()
self.project1 = self.create_project(name="foo", organization=self.org)
self.project2 = self.create_project(name="bar", organization=self.org)
# 2 environments
self.null_environment = Environment.objects.create(
organization_id=self.organization.id, name=""
)
self.canary_environment = Environment.objects.create(
organization_id=self.organization.id, name="canary"
)
# release created for proj1, and proj2
self.release1 = Release.objects.create(version="v1", organization=self.organization)
# add_project get_or_creates a ReleaseProject
self.release1.add_project(self.project1)
self.release1.add_project(self.project2)
# Attaches the release to a particular environment
# project superfluous/deprecated in ReleaseEnvironment
# release1 canary
ReleaseEnvironment.objects.create(
organization_id=self.organization.id,
release_id=self.release1.id,
environment_id=self.canary_environment.id,
)
# Release Project Environments are required to query releases by project
# Even though both environment & project are here, this seems to just attach a release to a project
# You can have multiple ReleaseProjectEnvironment's per release (this attaches multiple projects to the release&env)
# release1 project1 canary
ReleaseProjectEnvironment.objects.create(
release_id=self.release1.id,
project_id=self.project1.id,
environment_id=self.canary_environment.id,
)
self.now = datetime.now(timezone.utc)
self.group1_p1_r1 = Group.objects.create(
project=self.project1,
first_release=self.release1,
first_seen=self.now - timedelta(minutes=30),
)
self.groupenvironment_g1_r1 = GroupEnvironment.objects.create(
group_id=self.group1_p1_r1.id,
environment_id=self.null_environment.id,
first_release=self.release1,
first_seen=self.now - timedelta(minutes=30),
)
self.group2_p1_r1 = Group.objects.create(
project=self.project1,
first_release=self.release1,
first_seen=self.now - timedelta(minutes=30),
)
self.groupenvironment_g2_r1 = GroupEnvironment.objects.create(
group_id=self.group2_p1_r1.id,
environment_id=self.canary_environment.id,
first_release=self.release1,
first_seen=self.now - timedelta(minutes=30),
)
def test_success_fetches_new_issue_counts(self) -> None:
# standard threshold
t1: EnrichedThreshold = {
"id": "1",
"project_id": self.project1.id,
"release": self.release1.version,
"start": self.now - timedelta(hours=1),
"end": self.now,
"date": self.now,
"environment": None,
"is_healthy": False,
"key": "",
"project": serialize(self.project1),
"project_slug": self.project1.slug,
"threshold_type": ReleaseThresholdType.NEW_ISSUE_COUNT,
"trigger_type": TriggerType.OVER_STR,
"value": 1,
"window_in_seconds": 60, # NOTE: window_in_seconds only used to determine start/end. Not utilized in validation method
"metric_value": None,
}
# threshold w/ environment
t2: EnrichedThreshold = {
"id": "2",
"project_id": self.project1.id,
"release": self.release1.version,
"start": self.now - timedelta(hours=1),
"end": self.now,
"date": self.now,
"environment": {"name": "canary"},
"is_healthy": False,
"key": "",
"project": serialize(self.project1),
"project_slug": self.project1.slug,
"threshold_type": ReleaseThresholdType.NEW_ISSUE_COUNT,
"trigger_type": TriggerType.OVER_STR,
"value": 1,
"window_in_seconds": 60, # NOTE: window_in_seconds only used to determine start/end. Not utilized in validation method
"metric_value": None,
}
# second threshold separate start/end
t3: EnrichedThreshold = {
"id": "3",
"project_id": self.project1.id,
"release": self.release1.version,
"start": self.now,
"end": self.now + timedelta(hours=1),
"date": self.now,
"environment": None,
"is_healthy": False,
"key": "",
"project": serialize(self.project1),
"project_slug": self.project1.slug,
"threshold_type": ReleaseThresholdType.NEW_ISSUE_COUNT,
"trigger_type": TriggerType.OVER_STR,
"value": 1,
"window_in_seconds": 60, # NOTE: window_in_seconds only used to determine start/end. Not utilized in validation method
"metric_value": None,
}
thresholds: list[EnrichedThreshold] = [t1, t2, t3]
new_issue_counts = get_new_issue_counts(organization_id=self.org.id, thresholds=thresholds)
assert new_issue_counts[str(t1["id"])] == 1
assert new_issue_counts[str(t2["id"])] == 1
assert new_issue_counts.get(str(t3["id"]), None) is None
|
GetNewIssueCountTest
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/stripe/provider.py
|
{
"start": 583,
"end": 1021
}
|
class ____(OAuth2Provider):
id = "stripe"
name = "Stripe"
account_class = StripeAccount
oauth2_adapter_class = StripeOAuth2Adapter
def extract_uid(self, data):
return data["id"]
def extract_common_fields(self, data):
return dict(name=data.get("display_name"), email=data.get("email"))
def get_default_scope(self):
return ["read_only"]
provider_classes = [StripeProvider]
|
StripeProvider
|
python
|
pandas-dev__pandas
|
pandas/tests/extension/base/ops.py
|
{
"start": 9145,
"end": 10631
}
|
class ____(BaseOpsUtil):
def test_invert(self, data):
ser = pd.Series(data, name="name")
try:
[~x for x in data]
except TypeError:
# scalars don't support invert -> we don't expect the vectorized
# operation to succeed
with pytest.raises(TypeError):
~ser
with pytest.raises(TypeError):
~data
else:
# Note we do not reuse the pointwise result to construct expected
# because python semantics for negating bools are weird see GH#54569
result = ~ser
expected = pd.Series(~data, name="name")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.positive, np.negative, np.abs])
def test_unary_ufunc_dunder_equivalence(self, data, ufunc):
# the dunder __pos__ works if and only if np.positive works,
# same for __neg__/np.negative and __abs__/np.abs
attr = {np.positive: "__pos__", np.negative: "__neg__", np.abs: "__abs__"}[
ufunc
]
exc = None
try:
result = getattr(data, attr)()
except Exception as err:
exc = err
# if __pos__ raised, then so should the ufunc
with pytest.raises((type(exc), TypeError)):
ufunc(data)
else:
alt = ufunc(data)
tm.assert_extension_array_equal(result, alt)
|
BaseUnaryOpsTests
|
python
|
kamyu104__LeetCode-Solutions
|
Python/design-an-expression-tree-with-evaluate-function.py
|
{
"start": 84,
"end": 231
}
|
class ____:
__metaclass__ = ABCMeta
# define your fields here
@abstractmethod
def evaluate(self):
pass
import operator
|
Node
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.