language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pytorch__pytorch
|
test/dynamo/test_repros.py
|
{
"start": 24347,
"end": 26402
}
|
class ____:
attn_layers = ["local", "lsh", "local", "lsh", "local", "lsh"]
lsh_attn_chunk_length = 64
local_attn_chunk_length = 64
def _get_min_chunk_len(config):
"""from hf_Reformer"""
attn_types = config.attn_layers
attn_types_set = set(attn_types)
if len(attn_types_set) == 1 and attn_types[0] == "lsh":
return config.lsh_attn_chunk_length
elif len(attn_types_set) == 1 and attn_types[0] == "local":
return config.local_attn_chunk_length
elif len(attn_types_set) == 2 and attn_types_set == {"lsh", "local"}:
return min(config.lsh_attn_chunk_length, config.local_attn_chunk_length)
else:
raise NotImplementedError(
f"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {config.attn_layers}. Select "
"attn layer types from ['lsh', 'local'] only."
)
def _stable_argsort(vector, dim):
"""from hf_Reformer"""
# this function scales the vector so that torch.argsort is stable.
# torch.argsort is not stable on its own
scale_offset = torch.arange(vector.shape[dim], device=vector.device).view(1, 1, -1)
scale_offset = scale_offset.expand(vector.shape)
scaled_vector = vector.shape[dim] * vector + (scale_offset % vector.shape[dim])
return torch.argsort(scaled_vector, dim=dim)
def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(buckets):
"""from hf_Reformer"""
# no gradients are needed
with torch.no_grad():
# hash-based sort
sorted_bucket_idx = _stable_argsort(buckets, dim=-1)
# create simple indices to scatter to, to have undo sort
indices = (
torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device)
.view(1, 1, -1)
.expand(sorted_bucket_idx.shape)
)
# get undo sort
undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size())
undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices)
return sorted_bucket_idx, undo_sorted_bucket_idx
|
DummyConfig
|
python
|
pytorch__pytorch
|
torch/_export/serde/schema.py
|
{
"start": 3315,
"end": 3796
}
|
class ____(_Union):
as_name: Annotated[str, 10]
as_float: Annotated[float, 20]
# In most cases we will use the "as_name" field to store arguments which are
# SymBools.
# The "as_bool" field is used in the case where we have a list containing a mix
# of SymBool and bools (ex. [True, i0, ...]). We will serialize this type of list to
# be List[SymboolArgument] and map the SymBools to the "as_name" field, and bools
# to the "as_bool" field.
@_union_dataclass
|
SymFloatArgument
|
python
|
django__django
|
django/db/models/sql/where.py
|
{
"start": 466,
"end": 11689
}
|
class ____(tree.Node):
"""
An SQL WHERE clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
A child is usually an expression producing boolean values. Most likely the
expression is a Lookup instance.
However, a child could also be any class with as_sql() and either
relabeled_clone() method or relabel_aliases() and clone() methods and
contains_aggregate attribute.
"""
default = AND
resolved = False
conditional = True
def split_having_qualify(self, negated=False, must_group_by=False):
"""
Return three possibly None nodes: one for those parts of self that
should be included in the WHERE clause, one for those parts of self
that must be included in the HAVING clause, and one for those parts
that refer to window functions.
"""
if not self.contains_aggregate and not self.contains_over_clause:
return self, None, None
in_negated = negated ^ self.negated
# Whether or not children must be connected in the same filtering
# clause (WHERE > HAVING > QUALIFY) to maintain logical semantic.
must_remain_connected = (
(in_negated and self.connector == AND)
or (not in_negated and self.connector == OR)
or self.connector == XOR
)
if (
must_remain_connected
and self.contains_aggregate
and not self.contains_over_clause
):
# It's must cheaper to short-circuit and stash everything in the
# HAVING clause than split children if possible.
return None, self, None
where_parts = []
having_parts = []
qualify_parts = []
for c in self.children:
if hasattr(c, "split_having_qualify"):
where_part, having_part, qualify_part = c.split_having_qualify(
in_negated, must_group_by
)
if where_part is not None:
where_parts.append(where_part)
if having_part is not None:
having_parts.append(having_part)
if qualify_part is not None:
qualify_parts.append(qualify_part)
elif c.contains_over_clause:
qualify_parts.append(c)
elif c.contains_aggregate:
having_parts.append(c)
else:
where_parts.append(c)
if must_remain_connected and qualify_parts:
# Disjunctive heterogeneous predicates can be pushed down to
# qualify as long as no conditional aggregation is involved.
if not where_parts or (where_parts and not must_group_by):
return None, None, self
elif where_parts:
# In theory this should only be enforced when dealing with
# where_parts containing predicates against multi-valued
# relationships that could affect aggregation results but this
# is complex to infer properly.
raise NotImplementedError(
"Heterogeneous disjunctive predicates against window functions are "
"not implemented when performing conditional aggregation."
)
where_node = (
self.create(where_parts, self.connector, self.negated)
if where_parts
else None
)
having_node = (
self.create(having_parts, self.connector, self.negated)
if having_parts
else None
)
qualify_node = (
self.create(qualify_parts, self.connector, self.negated)
if qualify_parts
else None
)
return where_node, having_node, qualify_node
def as_sql(self, compiler, connection):
"""
Return the SQL version of the where clause and the value to be
substituted in. Return '', [] if this node matches everything,
None, [] if this node is empty, and raise EmptyResultSet if this
node can't match anything.
"""
result = []
result_params = []
if self.connector == AND:
full_needed, empty_needed = len(self.children), 1
else:
full_needed, empty_needed = 1, len(self.children)
if self.connector == XOR and not connection.features.supports_logical_xor:
# Convert if the database doesn't support XOR:
# a XOR b XOR c XOR ...
# to:
# (a OR b OR c OR ...) AND MOD(a + b + c + ..., 2) == 1
# The result of an n-ary XOR is true when an odd number of operands
# are true.
lhs = self.__class__(self.children, OR)
rhs_sum = reduce(
operator.add,
(Case(When(c, then=1), default=0) for c in self.children),
)
if len(self.children) > 2:
rhs_sum = Mod(rhs_sum, 2)
rhs = Exact(1, rhs_sum)
return self.__class__([lhs, rhs], AND, self.negated).as_sql(
compiler, connection
)
for child in self.children:
try:
sql, params = compiler.compile(child)
except EmptyResultSet:
empty_needed -= 1
except FullResultSet:
full_needed -= 1
else:
if sql:
result.append(sql)
result_params.extend(params)
else:
full_needed -= 1
# Check if this node matches nothing or everything.
# First check the amount of full nodes and empty nodes
# to make this node empty/full.
# Now, check if this node is full/empty using the
# counts.
if empty_needed == 0:
if self.negated:
raise FullResultSet
else:
raise EmptyResultSet
if full_needed == 0:
if self.negated:
raise EmptyResultSet
else:
raise FullResultSet
conn = " %s " % self.connector
sql_string = conn.join(result)
if not sql_string:
raise FullResultSet
if self.negated:
# Some backends (Oracle at least) need parentheses around the inner
# SQL in the negated case, even if the inner SQL contains just a
# single expression.
sql_string = "NOT (%s)" % sql_string
elif len(result) > 1 or self.resolved:
sql_string = "(%s)" % sql_string
return sql_string, result_params
def get_group_by_cols(self):
cols = []
for child in self.children:
cols.extend(child.get_group_by_cols())
return cols
def get_source_expressions(self):
return self.children[:]
def set_source_expressions(self, children):
assert len(children) == len(self.children)
self.children = children
def relabel_aliases(self, change_map):
"""
Relabel the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
if not change_map:
return self
for pos, child in enumerate(self.children):
if hasattr(child, "relabel_aliases"):
# For example another WhereNode
child.relabel_aliases(change_map)
elif hasattr(child, "relabeled_clone"):
self.children[pos] = child.relabeled_clone(change_map)
def clone(self):
clone = self.create(connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, "clone"):
child = child.clone()
clone.children.append(child)
return clone
def relabeled_clone(self, change_map):
clone = self.clone()
clone.relabel_aliases(change_map)
return clone
def replace_expressions(self, replacements):
if not replacements:
return self
if replacement := replacements.get(self):
return replacement
clone = self.create(connector=self.connector, negated=self.negated)
for child in self.children:
clone.children.append(child.replace_expressions(replacements))
return clone
def get_refs(self):
refs = set()
for child in self.children:
refs |= child.get_refs()
return refs
@classmethod
def _contains_aggregate(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_aggregate(c) for c in obj.children)
return obj.contains_aggregate
@cached_property
def contains_aggregate(self):
return self._contains_aggregate(self)
@classmethod
def _contains_over_clause(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_over_clause(c) for c in obj.children)
return obj.contains_over_clause
@cached_property
def contains_over_clause(self):
return self._contains_over_clause(self)
@property
def is_summary(self):
return any(child.is_summary for child in self.children)
@staticmethod
def _resolve_leaf(expr, query, *args, **kwargs):
if hasattr(expr, "resolve_expression"):
expr = expr.resolve_expression(query, *args, **kwargs)
return expr
@classmethod
def _resolve_node(cls, node, query, *args, **kwargs):
if hasattr(node, "children"):
for child in node.children:
cls._resolve_node(child, query, *args, **kwargs)
if hasattr(node, "lhs"):
node.lhs = cls._resolve_leaf(node.lhs, query, *args, **kwargs)
if hasattr(node, "rhs"):
node.rhs = cls._resolve_leaf(node.rhs, query, *args, **kwargs)
def resolve_expression(self, *args, **kwargs):
clone = self.clone()
clone._resolve_node(clone, *args, **kwargs)
clone.resolved = True
return clone
@cached_property
def output_field(self):
from django.db.models import BooleanField
return BooleanField()
@property
def _output_field_or_none(self):
return self.output_field
def select_format(self, compiler, sql, params):
# Wrap filters with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END"
return sql, params
def get_db_converters(self, connection):
return self.output_field.get_db_converters(connection)
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def leaves(self):
for child in self.children:
if isinstance(child, WhereNode):
yield from child.leaves()
else:
yield child
|
WhereNode
|
python
|
graphql-python__graphene
|
graphene/relay/id_type.py
|
{
"start": 539,
"end": 1315
}
|
class ____(BaseGlobalIDType):
"""
Default global ID type: base64 encoded version of "<node type name>: <node id>".
"""
graphene_type = ID
@classmethod
def resolve_global_id(cls, info, global_id):
try:
_type, _id = from_global_id(global_id)
if not _type:
raise ValueError("Invalid Global ID")
return _type, _id
except Exception as e:
raise Exception(
f'Unable to parse global ID "{global_id}". '
'Make sure it is a base64 encoded string in the format: "TypeName:id". '
f"Exception message: {e}"
)
@classmethod
def to_global_id(cls, _type, _id):
return to_global_id(_type, _id)
|
DefaultGlobalIDType
|
python
|
apache__airflow
|
providers/databricks/src/airflow/providers/databricks/triggers/databricks.py
|
{
"start": 1100,
"end": 4550
}
|
class ____(BaseTrigger):
"""
The trigger handles the logic of async communication with DataBricks API.
:param run_id: id of the run
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
:param polling_period_seconds: Controls the rate of the poll for the result of this run.
By default, the trigger will poll every 30 seconds.
:param retry_limit: The number of times to retry the connection in case of service outages.
:param retry_delay: The number of seconds to wait between retries.
:param retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class.
:param run_page_url: The run page url.
"""
def __init__(
self,
run_id: int,
databricks_conn_id: str,
polling_period_seconds: int = 30,
retry_limit: int = 3,
retry_delay: int = 10,
retry_args: dict[Any, Any] | None = None,
run_page_url: str | None = None,
repair_run: bool = False,
caller: str = "DatabricksExecutionTrigger",
) -> None:
super().__init__()
self.run_id = run_id
self.databricks_conn_id = databricks_conn_id
self.polling_period_seconds = polling_period_seconds
self.retry_limit = retry_limit
self.retry_delay = retry_delay
self.retry_args = retry_args
self.run_page_url = run_page_url
self.repair_run = repair_run
self.hook = DatabricksHook(
databricks_conn_id,
retry_limit=self.retry_limit,
retry_delay=self.retry_delay,
retry_args=retry_args,
caller=caller,
)
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
"airflow.providers.databricks.triggers.databricks.DatabricksExecutionTrigger",
{
"run_id": self.run_id,
"databricks_conn_id": self.databricks_conn_id,
"polling_period_seconds": self.polling_period_seconds,
"retry_limit": self.retry_limit,
"retry_delay": self.retry_delay,
"retry_args": self.retry_args,
"run_page_url": self.run_page_url,
"repair_run": self.repair_run,
},
)
async def run(self):
async with self.hook:
while True:
run_state = await self.hook.a_get_run_state(self.run_id)
if not run_state.is_terminal:
self.log.info(
"run-id %s in run state %s. sleeping for %s seconds",
self.run_id,
run_state,
self.polling_period_seconds,
)
await asyncio.sleep(self.polling_period_seconds)
continue
run_info = await self.hook.a_get_run(self.run_id)
failed_tasks = await extract_failed_task_errors_async(self.hook, run_info, run_state)
yield TriggerEvent(
{
"run_id": self.run_id,
"run_page_url": self.run_page_url,
"run_state": run_state.to_json(),
"repair_run": self.repair_run,
"errors": failed_tasks,
}
)
return
|
DatabricksExecutionTrigger
|
python
|
keras-team__keras
|
keras/src/trainers/trainer_test.py
|
{
"start": 2631,
"end": 3210
}
|
class ____(Trainer, layers.Layer):
def __init__(self, units):
layers.Layer.__init__(self)
Trainer.__init__(self)
self.dense_1 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
self.dense_2 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
def call(self, x):
return {
"y_one": self.dense_1(x["x_one"]),
"y_two": self.dense_2(x["x_two"]),
}
|
StructModel
|
python
|
Textualize__textual
|
src/textual/drivers/win32.py
|
{
"start": 3149,
"end": 6208
}
|
class ____(Structure):
"""https://docs.microsoft.com/en-us/windows/console/input-record-str"""
_fields_ = [("EventType", wintypes.WORD), ("Event", InputEvent)]
def set_console_mode(file: IO, mode: int) -> bool:
"""Set the console mode for a given file (stdout or stdin).
Args:
file: A file like object.
mode: New mode.
Returns:
True on success, otherwise False.
"""
windows_filehandle = msvcrt.get_osfhandle(file.fileno()) # type: ignore
success = KERNEL32.SetConsoleMode(windows_filehandle, mode)
return success
def get_console_mode(file: IO) -> int:
"""Get the console mode for a given file (stdout or stdin)
Args:
file: A file-like object.
Returns:
The current console mode.
"""
windows_filehandle = msvcrt.get_osfhandle(file.fileno()) # type: ignore
mode = wintypes.DWORD()
KERNEL32.GetConsoleMode(windows_filehandle, ctypes.byref(mode))
return mode.value
def enable_application_mode() -> Callable[[], None]:
"""Enable application mode.
Returns:
A callable that will restore terminal to previous state.
"""
terminal_in = sys.__stdin__
terminal_out = sys.__stdout__
current_console_mode_in = get_console_mode(terminal_in)
current_console_mode_out = get_console_mode(terminal_out)
def restore() -> None:
"""Restore console mode to previous settings"""
set_console_mode(terminal_in, current_console_mode_in)
set_console_mode(terminal_out, current_console_mode_out)
set_console_mode(
terminal_out, current_console_mode_out | ENABLE_VIRTUAL_TERMINAL_PROCESSING
)
set_console_mode(terminal_in, ENABLE_VIRTUAL_TERMINAL_INPUT)
return restore
def wait_for_handles(handles: List[HANDLE], timeout: int = -1) -> Optional[HANDLE]:
"""
Waits for multiple handles. (Similar to 'select') Returns the handle which is ready.
Returns `None` on timeout.
http://msdn.microsoft.com/en-us/library/windows/desktop/ms687025(v=vs.85).aspx
Note that handles should be a list of `HANDLE` objects, not integers. See
this comment in the patch by @quark-zju for the reason why:
''' Make sure HANDLE on Windows has a correct size
Previously, the type of various HANDLEs are native Python integer
types. The ctypes library will treat them as 4-byte integer when used
in function arguments. On 64-bit Windows, HANDLE is 8-byte and usually
a small integer. Depending on whether the extra 4 bytes are zero-ed out
or not, things can happen to work, or break. '''
This function returns either `None` or one of the given `HANDLE` objects.
(The return value can be tested with the `is` operator.)
"""
arrtype = HANDLE * len(handles)
handle_array = arrtype(*handles)
ret: int = KERNEL32.WaitForMultipleObjects(
len(handle_array), handle_array, BOOL(False), DWORD(timeout)
)
if ret == WAIT_TIMEOUT:
return None
else:
return handles[ret]
|
INPUT_RECORD
|
python
|
getsentry__sentry
|
src/sentry/analytics/events/sentry_app_installation_token_deleted.py
|
{
"start": 94,
"end": 306
}
|
class ____(analytics.Event):
user_id: int
organization_id: int
sentry_app_installation_id: int
sentry_app: str
analytics.register(SentryAppInstallationTokenDeleted)
|
SentryAppInstallationTokenDeleted
|
python
|
apache__airflow
|
providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/exceptions.py
|
{
"start": 871,
"end": 999
}
|
class ____(AirflowException):
"""Raised when exception happens during Pod Mutation Hook execution."""
|
PodMutationHookException
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/testing/suite/test_reflection.py
|
{
"start": 19446,
"end": 90758
}
|
class ____(ComparesTables, OneConnectionTablesTest):
run_inserts = run_deletes = None
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.define_reflected_tables(metadata, None)
if testing.requires.schemas.enabled:
cls.define_reflected_tables(metadata, testing.config.test_schema)
@classmethod
def define_reflected_tables(cls, metadata, schema):
if schema:
schema_prefix = schema + "."
else:
schema_prefix = ""
if testing.requires.self_referential_foreign_keys.enabled:
parent_id_args = (
ForeignKey(
"%susers.user_id" % schema_prefix, name="user_id_fk"
),
)
else:
parent_id_args = ()
users = Table(
"users",
metadata,
Column("user_id", sa.INT, primary_key=True),
Column("test1", sa.CHAR(5), nullable=False),
Column("test2", sa.Float(), nullable=False),
Column("parent_user_id", sa.Integer, *parent_id_args),
sa.CheckConstraint(
"test2 > 0",
name="zz_test2_gt_zero",
comment="users check constraint",
),
sa.CheckConstraint("test2 <= 1000"),
schema=schema,
test_needs_fk=True,
)
Table(
"dingalings",
metadata,
Column("dingaling_id", sa.Integer, primary_key=True),
Column(
"address_id",
sa.Integer,
ForeignKey(
"%semail_addresses.address_id" % schema_prefix,
name="zz_email_add_id_fg",
comment="di fk comment",
),
),
Column(
"id_user",
sa.Integer,
ForeignKey("%susers.user_id" % schema_prefix),
),
Column("data", sa.String(30), unique=True),
sa.CheckConstraint(
"address_id > 0 AND address_id < 1000",
name="address_id_gt_zero",
),
sa.UniqueConstraint(
"address_id",
"dingaling_id",
name="zz_dingalings_multiple",
comment="di unique comment",
),
schema=schema,
test_needs_fk=True,
)
Table(
"email_addresses",
metadata,
Column("address_id", sa.Integer),
Column("remote_user_id", sa.Integer, ForeignKey(users.c.user_id)),
Column("email_address", sa.String(20), index=True),
sa.PrimaryKeyConstraint(
"address_id", name="email_ad_pk", comment="ea pk comment"
),
schema=schema,
test_needs_fk=True,
)
Table(
"comment_test",
metadata,
Column("id", sa.Integer, primary_key=True, comment="id comment"),
Column("data", sa.String(20), comment="data % comment"),
Column(
"d2",
sa.String(20),
comment=r"""Comment types type speedily ' " \ '' Fun!""",
),
Column("d3", sa.String(42), comment="Comment\nwith\rescapes"),
schema=schema,
comment=r"""the test % ' " \ table comment""",
)
Table(
"no_constraints",
metadata,
Column("data", sa.String(20)),
schema=schema,
comment="no\nconstraints\rhas\fescaped\vcomment",
)
if testing.requires.cross_schema_fk_reflection.enabled:
if schema is None:
Table(
"local_table",
metadata,
Column("id", sa.Integer, primary_key=True),
Column("data", sa.String(20)),
Column(
"remote_id",
ForeignKey(
"%s.remote_table_2.id" % testing.config.test_schema
),
),
test_needs_fk=True,
schema=config.db.dialect.default_schema_name,
)
else:
Table(
"remote_table",
metadata,
Column("id", sa.Integer, primary_key=True),
Column(
"local_id",
ForeignKey(
"%s.local_table.id"
% config.db.dialect.default_schema_name
),
),
Column("data", sa.String(20)),
schema=schema,
test_needs_fk=True,
)
Table(
"remote_table_2",
metadata,
Column("id", sa.Integer, primary_key=True),
Column("data", sa.String(20)),
schema=schema,
test_needs_fk=True,
)
if testing.requires.index_reflection.enabled:
Index("users_t_idx", users.c.test1, users.c.test2, unique=True)
Index(
"users_all_idx", users.c.user_id, users.c.test2, users.c.test1
)
if not schema:
# test_needs_fk is at the moment to force MySQL InnoDB
noncol_idx_test_nopk = Table(
"noncol_idx_test_nopk",
metadata,
Column("q", sa.String(5)),
test_needs_fk=True,
)
noncol_idx_test_pk = Table(
"noncol_idx_test_pk",
metadata,
Column("id", sa.Integer, primary_key=True),
Column("q", sa.String(5)),
test_needs_fk=True,
)
if (
testing.requires.indexes_with_ascdesc.enabled
and testing.requires.reflect_indexes_with_ascdesc.enabled
):
Index("noncol_idx_nopk", noncol_idx_test_nopk.c.q.desc())
Index("noncol_idx_pk", noncol_idx_test_pk.c.q.desc())
if testing.requires.view_column_reflection.enabled:
cls.define_views(metadata, schema)
if not schema and testing.requires.temp_table_reflection.enabled:
cls.define_temp_tables(metadata)
@classmethod
def temp_table_name(cls):
return get_temp_table_name(
config, config.db, f"user_tmp_{config.ident}"
)
@classmethod
def define_temp_tables(cls, metadata):
kw = temp_table_keyword_args(config, config.db)
table_name = cls.temp_table_name()
user_tmp = Table(
table_name,
metadata,
Column("id", sa.INT, primary_key=True),
Column("name", sa.VARCHAR(50)),
Column("foo", sa.INT),
# disambiguate temp table unique constraint names. this is
# pretty arbitrary for a generic dialect however we are doing
# it to suit SQL Server which will produce name conflicts for
# unique constraints created against temp tables in different
# databases.
# https://www.arbinada.com/en/node/1645
sa.UniqueConstraint("name", name=f"user_tmp_uq_{config.ident}"),
sa.Index("user_tmp_ix", "foo"),
**kw,
)
if (
testing.requires.view_reflection.enabled
and testing.requires.temporary_views.enabled
):
event.listen(
user_tmp,
"after_create",
DDL(
"create temporary view user_tmp_v as "
"select * from user_tmp_%s" % config.ident
),
)
event.listen(user_tmp, "before_drop", DDL("drop view user_tmp_v"))
@classmethod
def define_views(cls, metadata, schema):
if testing.requires.materialized_views.enabled:
materialized = {"dingalings"}
else:
materialized = set()
for table_name in ("users", "email_addresses", "dingalings"):
fullname = table_name
if schema:
fullname = f"{schema}.{table_name}"
view_name = fullname + "_v"
prefix = "MATERIALIZED " if table_name in materialized else ""
query = (
f"CREATE {prefix}VIEW {view_name} AS SELECT * FROM {fullname}"
)
event.listen(metadata, "after_create", DDL(query))
if table_name in materialized:
index_name = "mat_index"
if schema and testing.against("oracle"):
index_name = f"{schema}.{index_name}"
idx = f"CREATE INDEX {index_name} ON {view_name}(data)"
event.listen(metadata, "after_create", DDL(idx))
event.listen(
metadata, "before_drop", DDL(f"DROP {prefix}VIEW {view_name}")
)
def _resolve_kind(self, kind, tables, views, materialized):
res = {}
if ObjectKind.TABLE in kind:
res.update(tables)
if ObjectKind.VIEW in kind:
res.update(views)
if ObjectKind.MATERIALIZED_VIEW in kind:
res.update(materialized)
return res
def _resolve_views(self, views, materialized):
if not testing.requires.view_column_reflection.enabled:
materialized.clear()
views.clear()
elif not testing.requires.materialized_views.enabled:
views.update(materialized)
materialized.clear()
def _resolve_names(self, schema, scope, filter_names, values):
scope_filter = lambda _: True # noqa: E731
if scope is ObjectScope.DEFAULT:
scope_filter = lambda k: "tmp" not in k[1] # noqa: E731
if scope is ObjectScope.TEMPORARY:
scope_filter = lambda k: "tmp" in k[1] # noqa: E731
removed = {
None: {"remote_table", "remote_table_2"},
testing.config.test_schema: {
"local_table",
"noncol_idx_test_nopk",
"noncol_idx_test_pk",
"user_tmp_v",
self.temp_table_name(),
},
}
if not testing.requires.cross_schema_fk_reflection.enabled:
removed[None].add("local_table")
removed[testing.config.test_schema].update(
["remote_table", "remote_table_2"]
)
if not testing.requires.index_reflection.enabled:
removed[None].update(
["noncol_idx_test_nopk", "noncol_idx_test_pk"]
)
if (
not testing.requires.temp_table_reflection.enabled
or not testing.requires.temp_table_names.enabled
):
removed[None].update(["user_tmp_v", self.temp_table_name()])
if not testing.requires.temporary_views.enabled:
removed[None].update(["user_tmp_v"])
res = {
k: v
for k, v in values.items()
if scope_filter(k)
and k[1] not in removed[schema]
and (not filter_names or k[1] in filter_names)
}
return res
def exp_options(
self,
schema=None,
scope=ObjectScope.ANY,
kind=ObjectKind.ANY,
filter_names=None,
):
materialized = {(schema, "dingalings_v"): mock.ANY}
views = {
(schema, "email_addresses_v"): mock.ANY,
(schema, "users_v"): mock.ANY,
(schema, "user_tmp_v"): mock.ANY,
}
self._resolve_views(views, materialized)
tables = {
(schema, "users"): mock.ANY,
(schema, "dingalings"): mock.ANY,
(schema, "email_addresses"): mock.ANY,
(schema, "comment_test"): mock.ANY,
(schema, "no_constraints"): mock.ANY,
(schema, "local_table"): mock.ANY,
(schema, "remote_table"): mock.ANY,
(schema, "remote_table_2"): mock.ANY,
(schema, "noncol_idx_test_nopk"): mock.ANY,
(schema, "noncol_idx_test_pk"): mock.ANY,
(schema, self.temp_table_name()): mock.ANY,
}
res = self._resolve_kind(kind, tables, views, materialized)
res = self._resolve_names(schema, scope, filter_names, res)
return res
def exp_comments(
self,
schema=None,
scope=ObjectScope.ANY,
kind=ObjectKind.ANY,
filter_names=None,
):
empty = {"text": None}
materialized = {(schema, "dingalings_v"): empty}
views = {
(schema, "email_addresses_v"): empty,
(schema, "users_v"): empty,
(schema, "user_tmp_v"): empty,
}
self._resolve_views(views, materialized)
tables = {
(schema, "users"): empty,
(schema, "dingalings"): empty,
(schema, "email_addresses"): empty,
(schema, "comment_test"): {
"text": r"""the test % ' " \ table comment"""
},
(schema, "no_constraints"): {
"text": "no\nconstraints\rhas\fescaped\vcomment"
},
(schema, "local_table"): empty,
(schema, "remote_table"): empty,
(schema, "remote_table_2"): empty,
(schema, "noncol_idx_test_nopk"): empty,
(schema, "noncol_idx_test_pk"): empty,
(schema, self.temp_table_name()): empty,
}
res = self._resolve_kind(kind, tables, views, materialized)
res = self._resolve_names(schema, scope, filter_names, res)
return res
def exp_columns(
self,
schema=None,
scope=ObjectScope.ANY,
kind=ObjectKind.ANY,
filter_names=None,
):
def col(
name, auto=False, default=mock.ANY, comment=None, nullable=True
):
res = {
"name": name,
"autoincrement": auto,
"type": mock.ANY,
"default": default,
"comment": comment,
"nullable": nullable,
}
if auto == "omit":
res.pop("autoincrement")
return res
def pk(name, **kw):
kw = {"auto": True, "default": mock.ANY, "nullable": False, **kw}
return col(name, **kw)
materialized = {
(schema, "dingalings_v"): [
col("dingaling_id", auto="omit", nullable=mock.ANY),
col("address_id"),
col("id_user"),
col("data"),
]
}
views = {
(schema, "email_addresses_v"): [
col("address_id", auto="omit", nullable=mock.ANY),
col("remote_user_id"),
col("email_address"),
],
(schema, "users_v"): [
col("user_id", auto="omit", nullable=mock.ANY),
col("test1", nullable=mock.ANY),
col("test2", nullable=mock.ANY),
col("parent_user_id"),
],
(schema, "user_tmp_v"): [
col("id", auto="omit", nullable=mock.ANY),
col("name"),
col("foo"),
],
}
self._resolve_views(views, materialized)
tables = {
(schema, "users"): [
pk("user_id"),
col("test1", nullable=False),
col("test2", nullable=False),
col("parent_user_id"),
],
(schema, "dingalings"): [
pk("dingaling_id"),
col("address_id"),
col("id_user"),
col("data"),
],
(schema, "email_addresses"): [
pk("address_id"),
col("remote_user_id"),
col("email_address"),
],
(schema, "comment_test"): [
pk("id", comment="id comment"),
col("data", comment="data % comment"),
col(
"d2",
comment=r"""Comment types type speedily ' " \ '' Fun!""",
),
col("d3", comment="Comment\nwith\rescapes"),
],
(schema, "no_constraints"): [col("data")],
(schema, "local_table"): [pk("id"), col("data"), col("remote_id")],
(schema, "remote_table"): [pk("id"), col("local_id"), col("data")],
(schema, "remote_table_2"): [pk("id"), col("data")],
(schema, "noncol_idx_test_nopk"): [col("q")],
(schema, "noncol_idx_test_pk"): [pk("id"), col("q")],
(schema, self.temp_table_name()): [
pk("id"),
col("name"),
col("foo"),
],
}
res = self._resolve_kind(kind, tables, views, materialized)
res = self._resolve_names(schema, scope, filter_names, res)
return res
@property
def _required_column_keys(self):
return {"name", "type", "nullable", "default"}
def exp_pks(
self,
schema=None,
scope=ObjectScope.ANY,
kind=ObjectKind.ANY,
filter_names=None,
):
def pk(*cols, name=mock.ANY, comment=None):
return {
"constrained_columns": list(cols),
"name": name,
"comment": comment,
}
empty = pk(name=None)
if testing.requires.materialized_views_reflect_pk.enabled:
materialized = {(schema, "dingalings_v"): pk("dingaling_id")}
else:
materialized = {(schema, "dingalings_v"): empty}
views = {
(schema, "email_addresses_v"): empty,
(schema, "users_v"): empty,
(schema, "user_tmp_v"): empty,
}
self._resolve_views(views, materialized)
tables = {
(schema, "users"): pk("user_id"),
(schema, "dingalings"): pk("dingaling_id"),
(schema, "email_addresses"): pk(
"address_id", name="email_ad_pk", comment="ea pk comment"
),
(schema, "comment_test"): pk("id"),
(schema, "no_constraints"): empty,
(schema, "local_table"): pk("id"),
(schema, "remote_table"): pk("id"),
(schema, "remote_table_2"): pk("id"),
(schema, "noncol_idx_test_nopk"): empty,
(schema, "noncol_idx_test_pk"): pk("id"),
(schema, self.temp_table_name()): pk("id"),
}
if not testing.requires.reflects_pk_names.enabled:
for val in tables.values():
if val["name"] is not None:
val["name"] = mock.ANY
res = self._resolve_kind(kind, tables, views, materialized)
res = self._resolve_names(schema, scope, filter_names, res)
return res
@property
def _required_pk_keys(self):
return {"name", "constrained_columns"}
def exp_fks(
self,
schema=None,
scope=ObjectScope.ANY,
kind=ObjectKind.ANY,
filter_names=None,
):
class tt:
def __eq__(self, other):
return (
other is None
or config.db.dialect.default_schema_name == other
)
def fk(
cols,
ref_col,
ref_table,
ref_schema=schema,
name=mock.ANY,
comment=None,
):
return {
"constrained_columns": cols,
"referred_columns": ref_col,
"name": name,
"options": mock.ANY,
"referred_schema": (
ref_schema if ref_schema is not None else tt()
),
"referred_table": ref_table,
"comment": comment,
}
materialized = {(schema, "dingalings_v"): []}
views = {
(schema, "email_addresses_v"): [],
(schema, "users_v"): [],
(schema, "user_tmp_v"): [],
}
self._resolve_views(views, materialized)
tables = {
(schema, "users"): [
fk(["parent_user_id"], ["user_id"], "users", name="user_id_fk")
],
(schema, "dingalings"): [
fk(["id_user"], ["user_id"], "users"),
fk(
["address_id"],
["address_id"],
"email_addresses",
name="zz_email_add_id_fg",
comment="di fk comment",
),
],
(schema, "email_addresses"): [
fk(["remote_user_id"], ["user_id"], "users")
],
(schema, "comment_test"): [],
(schema, "no_constraints"): [],
(schema, "local_table"): [
fk(
["remote_id"],
["id"],
"remote_table_2",
ref_schema=config.test_schema,
)
],
(schema, "remote_table"): [
fk(["local_id"], ["id"], "local_table", ref_schema=None)
],
(schema, "remote_table_2"): [],
(schema, "noncol_idx_test_nopk"): [],
(schema, "noncol_idx_test_pk"): [],
(schema, self.temp_table_name()): [],
}
if not testing.requires.self_referential_foreign_keys.enabled:
tables[(schema, "users")].clear()
if not testing.requires.named_constraints.enabled:
for vals in tables.values():
for val in vals:
if val["name"] is not mock.ANY:
val["name"] = mock.ANY
res = self._resolve_kind(kind, tables, views, materialized)
res = self._resolve_names(schema, scope, filter_names, res)
return res
@property
def _required_fk_keys(self):
return {
"name",
"constrained_columns",
"referred_schema",
"referred_table",
"referred_columns",
}
def exp_indexes(
self,
schema=None,
scope=ObjectScope.ANY,
kind=ObjectKind.ANY,
filter_names=None,
):
def idx(
*cols,
name,
unique=False,
column_sorting=None,
duplicates=False,
fk=False,
):
fk_req = testing.requires.foreign_keys_reflect_as_index
dup_req = testing.requires.unique_constraints_reflect_as_index
sorting_expression = (
testing.requires.reflect_indexes_with_ascdesc_as_expression
)
if (fk and not fk_req.enabled) or (
duplicates and not dup_req.enabled
):
return ()
res = {
"unique": unique,
"column_names": list(cols),
"name": name,
"dialect_options": mock.ANY,
"include_columns": [],
}
if column_sorting:
res["column_sorting"] = column_sorting
if sorting_expression.enabled:
res["expressions"] = orig = res["column_names"]
res["column_names"] = [
None if c in column_sorting else c for c in orig
]
if duplicates:
res["duplicates_constraint"] = name
return [res]
materialized = {(schema, "dingalings_v"): []}
views = {
(schema, "email_addresses_v"): [],
(schema, "users_v"): [],
(schema, "user_tmp_v"): [],
}
self._resolve_views(views, materialized)
if materialized:
materialized[(schema, "dingalings_v")].extend(
idx("data", name="mat_index")
)
tables = {
(schema, "users"): [
*idx("parent_user_id", name="user_id_fk", fk=True),
*idx("user_id", "test2", "test1", name="users_all_idx"),
*idx("test1", "test2", name="users_t_idx", unique=True),
],
(schema, "dingalings"): [
*idx("data", name=mock.ANY, unique=True, duplicates=True),
*idx("id_user", name=mock.ANY, fk=True),
*idx(
"address_id",
"dingaling_id",
name="zz_dingalings_multiple",
unique=True,
duplicates=True,
),
],
(schema, "email_addresses"): [
*idx("email_address", name=mock.ANY),
*idx("remote_user_id", name=mock.ANY, fk=True),
],
(schema, "comment_test"): [],
(schema, "no_constraints"): [],
(schema, "local_table"): [
*idx("remote_id", name=mock.ANY, fk=True)
],
(schema, "remote_table"): [
*idx("local_id", name=mock.ANY, fk=True)
],
(schema, "remote_table_2"): [],
(schema, "noncol_idx_test_nopk"): [
*idx(
"q",
name="noncol_idx_nopk",
column_sorting={"q": ("desc",)},
)
],
(schema, "noncol_idx_test_pk"): [
*idx(
"q", name="noncol_idx_pk", column_sorting={"q": ("desc",)}
)
],
(schema, self.temp_table_name()): [
*idx("foo", name="user_tmp_ix"),
*idx(
"name",
name=f"user_tmp_uq_{config.ident}",
duplicates=True,
unique=True,
),
],
}
if (
not testing.requires.indexes_with_ascdesc.enabled
or not testing.requires.reflect_indexes_with_ascdesc.enabled
):
tables[(schema, "noncol_idx_test_nopk")].clear()
tables[(schema, "noncol_idx_test_pk")].clear()
res = self._resolve_kind(kind, tables, views, materialized)
res = self._resolve_names(schema, scope, filter_names, res)
return res
@property
def _required_index_keys(self):
return {"name", "column_names", "unique"}
def exp_ucs(
self,
schema=None,
scope=ObjectScope.ANY,
kind=ObjectKind.ANY,
filter_names=None,
all_=False,
):
def uc(
*cols, name, duplicates_index=None, is_index=False, comment=None
):
req = testing.requires.unique_index_reflect_as_unique_constraints
if is_index and not req.enabled:
return ()
res = {
"column_names": list(cols),
"name": name,
"comment": comment,
}
if duplicates_index:
res["duplicates_index"] = duplicates_index
return [res]
materialized = {(schema, "dingalings_v"): []}
views = {
(schema, "email_addresses_v"): [],
(schema, "users_v"): [],
(schema, "user_tmp_v"): [],
}
self._resolve_views(views, materialized)
tables = {
(schema, "users"): [
*uc(
"test1",
"test2",
name="users_t_idx",
duplicates_index="users_t_idx",
is_index=True,
)
],
(schema, "dingalings"): [
*uc("data", name=mock.ANY, duplicates_index=mock.ANY),
*uc(
"address_id",
"dingaling_id",
name="zz_dingalings_multiple",
duplicates_index="zz_dingalings_multiple",
comment="di unique comment",
),
],
(schema, "email_addresses"): [],
(schema, "comment_test"): [],
(schema, "no_constraints"): [],
(schema, "local_table"): [],
(schema, "remote_table"): [],
(schema, "remote_table_2"): [],
(schema, "noncol_idx_test_nopk"): [],
(schema, "noncol_idx_test_pk"): [],
(schema, self.temp_table_name()): [
*uc("name", name=f"user_tmp_uq_{config.ident}")
],
}
if all_:
return {**materialized, **views, **tables}
else:
res = self._resolve_kind(kind, tables, views, materialized)
res = self._resolve_names(schema, scope, filter_names, res)
return res
@property
def _required_unique_cst_keys(self):
return {"name", "column_names"}
def exp_ccs(
self,
schema=None,
scope=ObjectScope.ANY,
kind=ObjectKind.ANY,
filter_names=None,
):
class tt(str):
def __eq__(self, other):
res = (
other.lower()
.replace("(", "")
.replace(")", "")
.replace("`", "")
)
return self in res
def cc(text, name, comment=None):
return {"sqltext": tt(text), "name": name, "comment": comment}
# print({1: "test2 > (0)::double precision"} == {1: tt("test2 > 0")})
# assert 0
materialized = {(schema, "dingalings_v"): []}
views = {
(schema, "email_addresses_v"): [],
(schema, "users_v"): [],
(schema, "user_tmp_v"): [],
}
self._resolve_views(views, materialized)
tables = {
(schema, "users"): [
cc("test2 <= 1000", mock.ANY),
cc(
"test2 > 0",
"zz_test2_gt_zero",
comment="users check constraint",
),
],
(schema, "dingalings"): [
cc(
"address_id > 0 and address_id < 1000",
name="address_id_gt_zero",
),
],
(schema, "email_addresses"): [],
(schema, "comment_test"): [],
(schema, "no_constraints"): [],
(schema, "local_table"): [],
(schema, "remote_table"): [],
(schema, "remote_table_2"): [],
(schema, "noncol_idx_test_nopk"): [],
(schema, "noncol_idx_test_pk"): [],
(schema, self.temp_table_name()): [],
}
res = self._resolve_kind(kind, tables, views, materialized)
res = self._resolve_names(schema, scope, filter_names, res)
return res
@property
def _required_cc_keys(self):
return {"name", "sqltext"}
@testing.requires.schema_reflection
def test_get_schema_names(self, connection):
insp = inspect(connection)
is_true(testing.config.test_schema in insp.get_schema_names())
@testing.requires.schema_reflection
def test_has_schema(self, connection):
insp = inspect(connection)
is_true(insp.has_schema(testing.config.test_schema))
is_false(insp.has_schema("sa_fake_schema_foo"))
@testing.requires.schema_reflection
def test_get_schema_names_w_translate_map(self, connection):
"""test #7300"""
connection = connection.execution_options(
schema_translate_map={
"foo": "bar",
BLANK_SCHEMA: testing.config.test_schema,
}
)
insp = inspect(connection)
is_true(testing.config.test_schema in insp.get_schema_names())
@testing.requires.schema_reflection
def test_has_schema_w_translate_map(self, connection):
connection = connection.execution_options(
schema_translate_map={
"foo": "bar",
BLANK_SCHEMA: testing.config.test_schema,
}
)
insp = inspect(connection)
is_true(insp.has_schema(testing.config.test_schema))
is_false(insp.has_schema("sa_fake_schema_foo"))
@testing.requires.schema_reflection
@testing.requires.schema_create_delete
def test_schema_cache(self, connection):
insp = inspect(connection)
is_false("foo_bar" in insp.get_schema_names())
is_false(insp.has_schema("foo_bar"))
connection.execute(DDL("CREATE SCHEMA foo_bar"))
try:
is_false("foo_bar" in insp.get_schema_names())
is_false(insp.has_schema("foo_bar"))
insp.clear_cache()
is_true("foo_bar" in insp.get_schema_names())
is_true(insp.has_schema("foo_bar"))
finally:
connection.execute(DDL("DROP SCHEMA foo_bar"))
@testing.requires.schema_reflection
def test_dialect_initialize(self):
engine = engines.testing_engine()
inspect(engine)
assert hasattr(engine.dialect, "default_schema_name")
@testing.requires.schema_reflection
def test_get_default_schema_name(self, connection):
insp = inspect(connection)
eq_(insp.default_schema_name, connection.dialect.default_schema_name)
@testing.combinations(
None,
("foreign_key", testing.requires.foreign_key_constraint_reflection),
argnames="order_by",
)
@testing.combinations(
(True, testing.requires.schemas), False, argnames="use_schema"
)
def test_get_table_names(self, connection, order_by, use_schema):
if use_schema:
schema = config.test_schema
else:
schema = None
_ignore_tables = {
"comment_test",
"noncol_idx_test_pk",
"noncol_idx_test_nopk",
"local_table",
"remote_table",
"remote_table_2",
"no_constraints",
}
insp = inspect(connection)
if order_by:
tables = [
rec[0]
for rec in insp.get_sorted_table_and_fkc_names(schema)
if rec[0]
]
else:
tables = insp.get_table_names(schema)
table_names = [t for t in tables if t not in _ignore_tables]
if order_by == "foreign_key":
answer = ["users", "email_addresses", "dingalings"]
eq_(table_names, answer)
else:
answer = ["dingalings", "email_addresses", "users"]
eq_(sorted(table_names), answer)
@testing.combinations(
(True, testing.requires.schemas), False, argnames="use_schema"
)
def test_get_view_names(self, connection, use_schema):
insp = inspect(connection)
if use_schema:
schema = config.test_schema
else:
schema = None
table_names = insp.get_view_names(schema)
if testing.requires.materialized_views.enabled:
eq_(sorted(table_names), ["email_addresses_v", "users_v"])
eq_(insp.get_materialized_view_names(schema), ["dingalings_v"])
else:
answer = ["dingalings_v", "email_addresses_v", "users_v"]
eq_(sorted(table_names), answer)
@testing.requires.temp_table_names
def test_get_temp_table_names(self, connection):
insp = inspect(connection)
temp_table_names = insp.get_temp_table_names()
eq_(sorted(temp_table_names), [f"user_tmp_{config.ident}"])
@testing.requires.view_reflection
@testing.requires.temporary_views
def test_get_temp_view_names(self, connection):
insp = inspect(connection)
temp_table_names = insp.get_temp_view_names()
eq_(sorted(temp_table_names), ["user_tmp_v"])
@testing.requires.comment_reflection
def test_get_comments(self, connection):
self._test_get_comments(connection)
@testing.requires.comment_reflection
@testing.requires.schemas
def test_get_comments_with_schema(self, connection):
self._test_get_comments(connection, testing.config.test_schema)
def _test_get_comments(self, connection, schema=None):
insp = inspect(connection)
exp = self.exp_comments(schema=schema)
eq_(
insp.get_table_comment("comment_test", schema=schema),
exp[(schema, "comment_test")],
)
eq_(
insp.get_table_comment("users", schema=schema),
exp[(schema, "users")],
)
eq_(
insp.get_table_comment("comment_test", schema=schema),
exp[(schema, "comment_test")],
)
no_cst = self.tables.no_constraints.name
eq_(
insp.get_table_comment(no_cst, schema=schema),
exp[(schema, no_cst)],
)
@testing.combinations(
(False, False),
(False, True, testing.requires.schemas),
(True, False, testing.requires.view_reflection),
(
True,
True,
testing.requires.schemas + testing.requires.view_reflection,
),
argnames="use_views,use_schema",
)
def test_get_columns(self, connection, use_views, use_schema):
if use_schema:
schema = config.test_schema
else:
schema = None
users, addresses = (self.tables.users, self.tables.email_addresses)
if use_views:
table_names = ["users_v", "email_addresses_v", "dingalings_v"]
else:
table_names = ["users", "email_addresses"]
insp = inspect(connection)
for table_name, table in zip(table_names, (users, addresses)):
schema_name = schema
cols = insp.get_columns(table_name, schema=schema_name)
is_true(len(cols) > 0, len(cols))
# should be in order
for i, col in enumerate(table.columns):
eq_(col.name, cols[i]["name"])
ctype = cols[i]["type"].__class__
ctype_def = col.type
if isinstance(ctype_def, sa.types.TypeEngine):
ctype_def = ctype_def.__class__
# Oracle returns Date for DateTime.
if testing.against("oracle") and ctype_def in (
sql_types.Date,
sql_types.DateTime,
):
ctype_def = sql_types.Date
# assert that the desired type and return type share
# a base within one of the generic types.
is_true(
len(
set(ctype.__mro__)
.intersection(ctype_def.__mro__)
.intersection(
[
sql_types.Integer,
sql_types.Numeric,
sql_types.Float,
sql_types.DateTime,
sql_types.Date,
sql_types.Time,
sql_types.String,
sql_types._Binary,
]
)
)
> 0,
"%s(%s), %s(%s)"
% (col.name, col.type, cols[i]["name"], ctype),
)
if not col.primary_key:
assert cols[i]["default"] is None
# The case of a table with no column
# is tested below in TableNoColumnsTest
@testing.requires.temp_table_reflection
def test_reflect_table_temp_table(self, connection):
table_name = self.temp_table_name()
user_tmp = self.tables[table_name]
reflected_user_tmp = Table(
table_name, MetaData(), autoload_with=connection
)
self.assert_tables_equal(
user_tmp, reflected_user_tmp, strict_constraints=False
)
@testing.requires.temp_table_reflection
def test_get_temp_table_columns(self, connection):
table_name = self.temp_table_name()
user_tmp = self.tables[table_name]
insp = inspect(connection)
cols = insp.get_columns(table_name)
is_true(len(cols) > 0, len(cols))
for i, col in enumerate(user_tmp.columns):
eq_(col.name, cols[i]["name"])
@testing.requires.temp_table_reflection
@testing.requires.view_column_reflection
@testing.requires.temporary_views
def test_get_temp_view_columns(self, connection):
insp = inspect(connection)
cols = insp.get_columns("user_tmp_v")
eq_([col["name"] for col in cols], ["id", "name", "foo"])
@testing.combinations(
(False,), (True, testing.requires.schemas), argnames="use_schema"
)
@testing.requires.primary_key_constraint_reflection
def test_get_pk_constraint(self, connection, use_schema):
if use_schema:
schema = testing.config.test_schema
else:
schema = None
users, addresses = self.tables.users, self.tables.email_addresses
insp = inspect(connection)
exp = self.exp_pks(schema=schema)
users_cons = insp.get_pk_constraint(users.name, schema=schema)
self._check_list(
[users_cons], [exp[(schema, users.name)]], self._required_pk_keys
)
addr_cons = insp.get_pk_constraint(addresses.name, schema=schema)
exp_cols = exp[(schema, addresses.name)]["constrained_columns"]
eq_(addr_cons["constrained_columns"], exp_cols)
with testing.requires.reflects_pk_names.fail_if():
eq_(addr_cons["name"], "email_ad_pk")
no_cst = self.tables.no_constraints.name
self._check_list(
[insp.get_pk_constraint(no_cst, schema=schema)],
[exp[(schema, no_cst)]],
self._required_pk_keys,
)
@testing.combinations(
"PK_test_table",
"pk_test_table",
"mixedCasePK",
"pk.with.dots",
argnames="pk_name",
)
@testing.requires.primary_key_constraint_reflection
@testing.requires.reflects_pk_names
def test_get_pk_constraint_quoted_name(
self, connection, metadata, pk_name
):
"""Test that primary key constraint names with various casing are
properly reflected."""
Table(
"test_table",
metadata,
Column("id", Integer),
Column("data", String(50)),
sa.PrimaryKeyConstraint("id", name=pk_name),
)
metadata.create_all(connection)
insp = inspect(connection)
pk_cons = insp.get_pk_constraint("test_table")
eq_(pk_cons["name"], pk_name)
eq_(pk_cons["constrained_columns"], ["id"])
@testing.combinations(
(False,), (True, testing.requires.schemas), argnames="use_schema"
)
@testing.requires.foreign_key_constraint_reflection
def test_get_foreign_keys(self, connection, use_schema):
if use_schema:
schema = config.test_schema
else:
schema = None
users, addresses = (self.tables.users, self.tables.email_addresses)
insp = inspect(connection)
expected_schema = schema
# users
if testing.requires.self_referential_foreign_keys.enabled:
users_fkeys = insp.get_foreign_keys(users.name, schema=schema)
fkey1 = users_fkeys[0]
with testing.requires.named_constraints.fail_if():
eq_(fkey1["name"], "user_id_fk")
eq_(fkey1["referred_schema"], expected_schema)
eq_(fkey1["referred_table"], users.name)
eq_(fkey1["referred_columns"], ["user_id"])
eq_(fkey1["constrained_columns"], ["parent_user_id"])
# addresses
addr_fkeys = insp.get_foreign_keys(addresses.name, schema=schema)
fkey1 = addr_fkeys[0]
with testing.requires.implicitly_named_constraints.fail_if():
is_true(fkey1["name"] is not None)
eq_(fkey1["referred_schema"], expected_schema)
eq_(fkey1["referred_table"], users.name)
eq_(fkey1["referred_columns"], ["user_id"])
eq_(fkey1["constrained_columns"], ["remote_user_id"])
no_cst = self.tables.no_constraints.name
eq_(insp.get_foreign_keys(no_cst, schema=schema), [])
@testing.combinations(
"FK_users_id",
"fk_users_id",
"mixedCaseName",
"fk.with.dots",
argnames="fk_name",
)
@testing.requires.foreign_key_constraint_reflection
def test_get_foreign_keys_quoted_name(self, connection, metadata, fk_name):
"""Test that foreign key constraint names with various casing are
properly reflected."""
Table(
"users_ref",
metadata,
Column("user_id", Integer, primary_key=True),
test_needs_fk=True,
)
Table(
"user_orders",
metadata,
Column("order_id", Integer, primary_key=True),
Column("user_id", Integer),
sa.ForeignKeyConstraint(
["user_id"],
["users_ref.user_id"],
name=fk_name,
),
test_needs_fk=True,
)
metadata.create_all(connection)
insp = inspect(connection)
fkeys = insp.get_foreign_keys("user_orders")
eq_(len(fkeys), 1)
fkey = fkeys[0]
with testing.requires.named_constraints.fail_if():
eq_(fkey["name"], fk_name)
eq_(fkey["referred_table"], "users_ref")
eq_(fkey["referred_columns"], ["user_id"])
eq_(fkey["constrained_columns"], ["user_id"])
@testing.requires.cross_schema_fk_reflection
@testing.requires.schemas
def test_get_inter_schema_foreign_keys(self, connection):
local_table, remote_table, remote_table_2 = self.tables(
"%s.local_table" % connection.dialect.default_schema_name,
"%s.remote_table" % testing.config.test_schema,
"%s.remote_table_2" % testing.config.test_schema,
)
insp = inspect(connection)
local_fkeys = insp.get_foreign_keys(local_table.name)
eq_(len(local_fkeys), 1)
fkey1 = local_fkeys[0]
eq_(fkey1["referred_schema"], testing.config.test_schema)
eq_(fkey1["referred_table"], remote_table_2.name)
eq_(fkey1["referred_columns"], ["id"])
eq_(fkey1["constrained_columns"], ["remote_id"])
remote_fkeys = insp.get_foreign_keys(
remote_table.name, schema=testing.config.test_schema
)
eq_(len(remote_fkeys), 1)
fkey2 = remote_fkeys[0]
is_true(
fkey2["referred_schema"]
in (
None,
connection.dialect.default_schema_name,
)
)
eq_(fkey2["referred_table"], local_table.name)
eq_(fkey2["referred_columns"], ["id"])
eq_(fkey2["constrained_columns"], ["local_id"])
@testing.combinations(
(False,), (True, testing.requires.schemas), argnames="use_schema"
)
@testing.requires.index_reflection
def test_get_indexes(self, connection, use_schema):
if use_schema:
schema = config.test_schema
else:
schema = None
# The database may decide to create indexes for foreign keys, etc.
# so there may be more indexes than expected.
insp = inspect(connection)
indexes = insp.get_indexes("users", schema=schema)
exp = self.exp_indexes(schema=schema)
self._check_list(
indexes, exp[(schema, "users")], self._required_index_keys
)
no_cst = self.tables.no_constraints.name
self._check_list(
insp.get_indexes(no_cst, schema=schema),
exp[(schema, no_cst)],
self._required_index_keys,
)
@testing.combinations(
("noncol_idx_test_nopk", "noncol_idx_nopk"),
("noncol_idx_test_pk", "noncol_idx_pk"),
argnames="tname,ixname",
)
@testing.requires.index_reflection
@testing.requires.indexes_with_ascdesc
@testing.requires.reflect_indexes_with_ascdesc
def test_get_noncol_index(self, connection, tname, ixname):
insp = inspect(connection)
indexes = insp.get_indexes(tname)
# reflecting an index that has "x DESC" in it as the column.
# the DB may or may not give us "x", but make sure we get the index
# back, it has a name, it's connected to the table.
expected_indexes = self.exp_indexes()[(None, tname)]
self._check_list(indexes, expected_indexes, self._required_index_keys)
t = Table(tname, MetaData(), autoload_with=connection)
eq_(len(t.indexes), 1)
is_(list(t.indexes)[0].table, t)
eq_(list(t.indexes)[0].name, ixname)
@testing.combinations(
"IX_test_data",
"ix_test_data",
"mixedCaseIndex",
"ix.with.dots",
argnames="idx_name",
)
@testing.requires.index_reflection
def test_get_indexes_quoted_name(self, connection, metadata, idx_name):
"""Test that index names with various casing are properly reflected."""
t = Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
Index(idx_name, t.c.data)
metadata.create_all(connection)
insp = inspect(connection)
indexes = insp.get_indexes("test_table")
index_names = [idx["name"] for idx in indexes]
assert idx_name in index_names, f"Expected {idx_name} in {index_names}"
# Find the specific index
matching_idx = [idx for idx in indexes if idx["name"] == idx_name]
eq_(len(matching_idx), 1)
eq_(matching_idx[0]["column_names"], ["data"])
@testing.requires.temp_table_reflection
@testing.requires.unique_constraint_reflection
def test_get_temp_table_unique_constraints(self, connection):
insp = inspect(connection)
name = self.temp_table_name()
reflected = insp.get_unique_constraints(name)
exp = self.exp_ucs(all_=True)[(None, name)]
self._check_list(reflected, exp, self._required_index_keys)
@testing.requires.temp_table_reflect_indexes
def test_get_temp_table_indexes(self, connection):
insp = inspect(connection)
table_name = self.temp_table_name()
indexes = insp.get_indexes(table_name)
for ind in indexes:
ind.pop("dialect_options", None)
expected = [
{"unique": False, "column_names": ["foo"], "name": "user_tmp_ix"}
]
if testing.requires.index_reflects_included_columns.enabled:
expected[0]["include_columns"] = []
eq_(
[idx for idx in indexes if idx["name"] == "user_tmp_ix"],
expected,
)
@testing.combinations(
(True, testing.requires.schemas), (False,), argnames="use_schema"
)
@testing.requires.unique_constraint_reflection
def test_get_unique_constraints(self, metadata, connection, use_schema):
# SQLite dialect needs to parse the names of the constraints
# separately from what it gets from PRAGMA index_list(), and
# then matches them up. so same set of column_names in two
# constraints will confuse it. Perhaps we should no longer
# bother with index_list() here since we have the whole
# CREATE TABLE?
if use_schema:
schema = config.test_schema
else:
schema = None
uniques = sorted(
[
{"name": "unique_a", "column_names": ["a"]},
{"name": "unique_a_b_c", "column_names": ["a", "b", "c"]},
{"name": "unique_c_a_b", "column_names": ["c", "a", "b"]},
{"name": "unique_asc_key", "column_names": ["asc", "key"]},
{"name": "i.have.dots", "column_names": ["b"]},
{"name": "i have spaces", "column_names": ["c"]},
],
key=operator.itemgetter("name"),
)
table = Table(
"testtbl",
metadata,
Column("a", sa.String(20)),
Column("b", sa.String(30)),
Column("c", sa.Integer),
# reserved identifiers
Column("asc", sa.String(30)),
Column("key", sa.String(30)),
schema=schema,
)
for uc in uniques:
table.append_constraint(
sa.UniqueConstraint(*uc["column_names"], name=uc["name"])
)
table.create(connection)
insp = inspect(connection)
reflected = sorted(
insp.get_unique_constraints("testtbl", schema=schema),
key=operator.itemgetter("name"),
)
names_that_duplicate_index = set()
eq_(len(uniques), len(reflected))
for orig, refl in zip(uniques, reflected):
# Different dialects handle duplicate index and constraints
# differently, so ignore this flag
dupe = refl.pop("duplicates_index", None)
if dupe:
names_that_duplicate_index.add(dupe)
eq_(refl.pop("comment", None), None)
# ignore dialect_options
refl.pop("dialect_options", None)
eq_(orig, refl)
reflected_metadata = MetaData()
reflected = Table(
"testtbl",
reflected_metadata,
autoload_with=connection,
schema=schema,
)
# test "deduplicates for index" logic. MySQL and Oracle
# "unique constraints" are actually unique indexes (with possible
# exception of a unique that is a dupe of another one in the case
# of Oracle). make sure # they aren't duplicated.
idx_names = {idx.name for idx in reflected.indexes}
uq_names = {
uq.name
for uq in reflected.constraints
if isinstance(uq, sa.UniqueConstraint)
}.difference(["unique_c_a_b"])
assert not idx_names.intersection(uq_names)
if names_that_duplicate_index:
eq_(names_that_duplicate_index, idx_names)
eq_(uq_names, set())
no_cst = self.tables.no_constraints.name
eq_(insp.get_unique_constraints(no_cst, schema=schema), [])
@testing.combinations(
"UQ_email",
"uq_email",
"mixedCaseUQ",
"uq.with.dots",
argnames="uq_name",
)
@testing.requires.unique_constraint_reflection
def test_get_unique_constraints_quoted_name(
self, connection, metadata, uq_name
):
"""Test that unique constraint names with various casing are
properly reflected."""
Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("email", String(50)),
sa.UniqueConstraint("email", name=uq_name),
)
metadata.create_all(connection)
insp = inspect(connection)
uq_cons = insp.get_unique_constraints("test_table")
eq_(len(uq_cons), 1)
eq_(uq_cons[0]["name"], uq_name)
eq_(uq_cons[0]["column_names"], ["email"])
@testing.requires.view_reflection
@testing.combinations(
(False,), (True, testing.requires.schemas), argnames="use_schema"
)
def test_get_view_definition(self, connection, use_schema):
if use_schema:
schema = config.test_schema
else:
schema = None
insp = inspect(connection)
for view in ["users_v", "email_addresses_v", "dingalings_v"]:
v = insp.get_view_definition(view, schema=schema)
is_true(bool(v))
@testing.requires.view_reflection
def test_get_view_definition_does_not_exist(self, connection):
insp = inspect(connection)
with expect_raises(NoSuchTableError):
insp.get_view_definition("view_does_not_exist")
with expect_raises(NoSuchTableError):
insp.get_view_definition("users") # a table
@testing.requires.table_reflection
def test_autoincrement_col(self, connection):
"""test that 'autoincrement' is reflected according to sqla's policy.
Don't mark this test as unsupported for any backend !
(technically it fails with MySQL InnoDB since "id" comes before "id2")
A backend is better off not returning "autoincrement" at all,
instead of potentially returning "False" for an auto-incrementing
primary key column.
"""
insp = inspect(connection)
for tname, cname in [
("users", "user_id"),
("email_addresses", "address_id"),
("dingalings", "dingaling_id"),
]:
cols = insp.get_columns(tname)
id_ = {c["name"]: c for c in cols}[cname]
assert id_.get("autoincrement", True)
@testing.combinations(
(True, testing.requires.schemas), (False,), argnames="use_schema"
)
def test_get_table_options(self, use_schema):
insp = inspect(config.db)
schema = config.test_schema if use_schema else None
if testing.requires.reflect_table_options.enabled:
res = insp.get_table_options("users", schema=schema)
is_true(isinstance(res, dict))
# NOTE: can't really create a table with no option
res = insp.get_table_options("no_constraints", schema=schema)
is_true(isinstance(res, dict))
else:
with expect_raises(NotImplementedError):
insp.get_table_options("users", schema=schema)
@testing.combinations((True, testing.requires.schemas), False)
def test_multi_get_table_options(self, use_schema):
insp = inspect(config.db)
if testing.requires.reflect_table_options.enabled:
schema = config.test_schema if use_schema else None
res = insp.get_multi_table_options(schema=schema)
exp = {
(schema, table): insp.get_table_options(table, schema=schema)
for table in insp.get_table_names(schema=schema)
}
eq_(res, exp)
else:
with expect_raises(NotImplementedError):
insp.get_multi_table_options()
@testing.fixture
def get_multi_exp(self, connection):
def provide_fixture(
schema, scope, kind, use_filter, single_reflect_fn, exp_method
):
insp = inspect(connection)
# call the reflection function at least once to avoid
# "Unexpected success" errors if the result is actually empty
# and NotImplementedError is not raised
single_reflect_fn(insp, "email_addresses")
kw = {"scope": scope, "kind": kind}
if schema:
schema = schema()
filter_names = []
if ObjectKind.TABLE in kind:
filter_names.extend(
["comment_test", "users", "does-not-exist"]
)
if ObjectKind.VIEW in kind:
filter_names.extend(["email_addresses_v", "does-not-exist"])
if ObjectKind.MATERIALIZED_VIEW in kind:
filter_names.extend(["dingalings_v", "does-not-exist"])
if schema:
kw["schema"] = schema
if use_filter:
kw["filter_names"] = filter_names
exp = exp_method(
schema=schema,
scope=scope,
kind=kind,
filter_names=kw.get("filter_names"),
)
kws = [kw]
if scope == ObjectScope.DEFAULT:
nkw = kw.copy()
nkw.pop("scope")
kws.append(nkw)
if kind == ObjectKind.TABLE:
nkw = kw.copy()
nkw.pop("kind")
kws.append(nkw)
return inspect(connection), kws, exp
return provide_fixture
@testing.requires.reflect_table_options
@_multi_combination
def test_multi_get_table_options_tables(
self, get_multi_exp, schema, scope, kind, use_filter
):
insp, kws, exp = get_multi_exp(
schema,
scope,
kind,
use_filter,
Inspector.get_table_options,
self.exp_options,
)
for kw in kws:
insp.clear_cache()
result = insp.get_multi_table_options(**kw)
eq_(result, exp)
@testing.requires.comment_reflection
@_multi_combination
def test_get_multi_table_comment(
self, get_multi_exp, schema, scope, kind, use_filter
):
insp, kws, exp = get_multi_exp(
schema,
scope,
kind,
use_filter,
Inspector.get_table_comment,
self.exp_comments,
)
for kw in kws:
insp.clear_cache()
eq_(insp.get_multi_table_comment(**kw), exp)
def _check_expressions(self, result, exp, err_msg):
def _clean(text: str):
return re.sub(r"['\" ]", "", text).lower()
if isinstance(exp, dict):
eq_({_clean(e): v for e, v in result.items()}, exp, err_msg)
else:
eq_([_clean(e) for e in result], exp, err_msg)
def _check_list(self, result, exp, req_keys=None, msg=None):
if req_keys is None:
eq_(result, exp, msg)
else:
eq_(len(result), len(exp), msg)
for r, e in zip(result, exp):
for k in set(r) | set(e):
if k in req_keys or (k in r and k in e):
err_msg = f"{msg} - {k} - {r}"
if k in ("expressions", "column_sorting"):
self._check_expressions(r[k], e[k], err_msg)
else:
eq_(r[k], e[k], err_msg)
def _check_table_dict(self, result, exp, req_keys=None, make_lists=False):
eq_(set(result.keys()), set(exp.keys()))
for k in result:
r, e = result[k], exp[k]
if make_lists:
r, e = [r], [e]
self._check_list(r, e, req_keys, k)
@_multi_combination
def test_get_multi_columns(
self, get_multi_exp, schema, scope, kind, use_filter
):
insp, kws, exp = get_multi_exp(
schema,
scope,
kind,
use_filter,
Inspector.get_columns,
self.exp_columns,
)
for kw in kws:
insp.clear_cache()
result = insp.get_multi_columns(**kw)
self._check_table_dict(result, exp, self._required_column_keys)
@testing.requires.primary_key_constraint_reflection
@_multi_combination
def test_get_multi_pk_constraint(
self, get_multi_exp, schema, scope, kind, use_filter
):
insp, kws, exp = get_multi_exp(
schema,
scope,
kind,
use_filter,
Inspector.get_pk_constraint,
self.exp_pks,
)
for kw in kws:
insp.clear_cache()
result = insp.get_multi_pk_constraint(**kw)
self._check_table_dict(
result, exp, self._required_pk_keys, make_lists=True
)
def _adjust_sort(self, result, expected, key):
if not testing.requires.implicitly_named_constraints.enabled:
for obj in [result, expected]:
for val in obj.values():
if len(val) > 1 and any(
v.get("name") in (None, mock.ANY) for v in val
):
val.sort(key=key)
@testing.requires.foreign_key_constraint_reflection
@_multi_combination
def test_get_multi_foreign_keys(
self, get_multi_exp, schema, scope, kind, use_filter
):
insp, kws, exp = get_multi_exp(
schema,
scope,
kind,
use_filter,
Inspector.get_foreign_keys,
self.exp_fks,
)
for kw in kws:
insp.clear_cache()
result = insp.get_multi_foreign_keys(**kw)
self._adjust_sort(
result, exp, lambda d: tuple(d["constrained_columns"])
)
self._check_table_dict(result, exp, self._required_fk_keys)
@testing.requires.index_reflection
@_multi_combination
def test_get_multi_indexes(
self, get_multi_exp, schema, scope, kind, use_filter
):
insp, kws, exp = get_multi_exp(
schema,
scope,
kind,
use_filter,
Inspector.get_indexes,
self.exp_indexes,
)
for kw in kws:
insp.clear_cache()
result = insp.get_multi_indexes(**kw)
self._check_table_dict(result, exp, self._required_index_keys)
@testing.requires.unique_constraint_reflection
@_multi_combination
def test_get_multi_unique_constraints(
self, get_multi_exp, schema, scope, kind, use_filter
):
insp, kws, exp = get_multi_exp(
schema,
scope,
kind,
use_filter,
Inspector.get_unique_constraints,
self.exp_ucs,
)
for kw in kws:
insp.clear_cache()
result = insp.get_multi_unique_constraints(**kw)
self._adjust_sort(result, exp, lambda d: tuple(d["column_names"]))
self._check_table_dict(result, exp, self._required_unique_cst_keys)
@testing.requires.check_constraint_reflection
@_multi_combination
def test_get_multi_check_constraints(
self, get_multi_exp, schema, scope, kind, use_filter
):
insp, kws, exp = get_multi_exp(
schema,
scope,
kind,
use_filter,
Inspector.get_check_constraints,
self.exp_ccs,
)
for kw in kws:
insp.clear_cache()
result = insp.get_multi_check_constraints(**kw)
self._adjust_sort(result, exp, lambda d: tuple(d["sqltext"]))
self._check_table_dict(result, exp, self._required_cc_keys)
@testing.combinations(
("get_table_options", testing.requires.reflect_table_options),
"get_columns",
(
"get_pk_constraint",
testing.requires.primary_key_constraint_reflection,
),
(
"get_foreign_keys",
testing.requires.foreign_key_constraint_reflection,
),
("get_indexes", testing.requires.index_reflection),
(
"get_unique_constraints",
testing.requires.unique_constraint_reflection,
),
(
"get_check_constraints",
testing.requires.check_constraint_reflection,
),
("get_table_comment", testing.requires.comment_reflection),
argnames="method",
)
def test_not_existing_table(self, method, connection):
insp = inspect(connection)
meth = getattr(insp, method)
with expect_raises(NoSuchTableError):
meth("table_does_not_exists")
def test_unreflectable(self, connection):
mc = Inspector.get_multi_columns
def patched(*a, **k):
ur = k.setdefault("unreflectable", {})
ur[(None, "some_table")] = UnreflectableTableError("err")
return mc(*a, **k)
with mock.patch.object(Inspector, "get_multi_columns", patched):
with expect_raises_message(UnreflectableTableError, "err"):
inspect(connection).reflect_table(
Table("some_table", MetaData()), None
)
@testing.combinations(True, False, argnames="use_schema")
@testing.combinations(
(True, testing.requires.views), False, argnames="views"
)
def test_metadata(self, connection, use_schema, views):
m = MetaData()
schema = config.test_schema if use_schema else None
m.reflect(connection, schema=schema, views=views, resolve_fks=False)
insp = inspect(connection)
tables = insp.get_table_names(schema)
if views:
tables += insp.get_view_names(schema)
try:
tables += insp.get_materialized_view_names(schema)
except NotImplementedError:
pass
if schema:
tables = [f"{schema}.{t}" for t in tables]
eq_(sorted(m.tables), sorted(tables))
@testing.requires.comment_reflection
def test_comments_unicode(self, connection, metadata):
Table(
"unicode_comments",
metadata,
Column("unicode", Integer, comment="é試蛇ẟΩ"),
Column("emoji", Integer, comment="☁️✨"),
comment="試蛇ẟΩ✨",
)
metadata.create_all(connection)
insp = inspect(connection)
tc = insp.get_table_comment("unicode_comments")
eq_(tc, {"text": "試蛇ẟΩ✨"})
cols = insp.get_columns("unicode_comments")
value = {c["name"]: c["comment"] for c in cols}
exp = {"unicode": "é試蛇ẟΩ", "emoji": "☁️✨"}
eq_(value, exp)
@testing.requires.comment_reflection_full_unicode
def test_comments_unicode_full(self, connection, metadata):
Table(
"unicode_comments",
metadata,
Column("emoji", Integer, comment="🐍🧙🝝🧙♂️🧙♀️"),
comment="🎩🁰🝑🤷♀️🤷♂️",
)
metadata.create_all(connection)
insp = inspect(connection)
tc = insp.get_table_comment("unicode_comments")
eq_(tc, {"text": "🎩🁰🝑🤷♀️🤷♂️"})
c = insp.get_columns("unicode_comments")[0]
eq_({c["name"]: c["comment"]}, {"emoji": "🐍🧙🝝🧙♂️🧙♀️"})
@testing.requires.column_collation_reflection
@testing.requires.order_by_collation
def test_column_collation_reflection(self, connection, metadata):
collation = testing.requires.get_order_by_collation(config)
Table(
"t",
metadata,
Column("collated", sa.String(collation=collation)),
Column("not_collated", sa.String()),
)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
eq_(t2.c.collated.type.collation, collation)
is_none(t2.c.not_collated.type.collation)
insp = inspect(connection)
collated, not_collated = insp.get_columns("t")
eq_(collated["type"].collation, collation)
is_none(not_collated["type"].collation)
|
ComponentReflectionTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/utils/object_identity.py
|
{
"start": 3135,
"end": 4092
}
|
class ____(collections.abc.MutableMapping):
"""A mutable mapping data structure which compares using "is".
This is necessary because we have trackable objects (_ListWrapper) which
have behavior identical to built-in Python lists (including being unhashable
and comparing based on the equality of their contents by default).
"""
__slots__ = ["_storage"]
def __init__(self):
self._storage = {}
def _wrap_key(self, key):
return _ObjectIdentityWrapper(key)
def __getitem__(self, key):
return self._storage[self._wrap_key(key)]
def __setitem__(self, key, value):
self._storage[self._wrap_key(key)] = value
def __delitem__(self, key):
del self._storage[self._wrap_key(key)]
def __len__(self):
return len(self._storage)
def __iter__(self):
for key in self._storage:
yield key.unwrapped
def __repr__(self):
return "ObjectIdentityDictionary(%s)" % repr(self._storage)
|
ObjectIdentityDictionary
|
python
|
django__django
|
tests/settings_tests/tests.py
|
{
"start": 3478,
"end": 4394
}
|
class ____(ClassDecoratedTestCaseSuper):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.foo = getattr(settings, "TEST", "BUG")
def test_override(self):
self.assertEqual(settings.TEST, "override")
def test_setupclass_override(self):
"""Settings are overridden within setUpClass (#21281)."""
self.assertEqual(self.foo, "override")
@override_settings(TEST="override2")
def test_method_override(self):
self.assertEqual(settings.TEST, "override2")
def test_max_recursion_error(self):
"""
Overriding a method on a super class and then calling that method on
the super class should not trigger infinite recursion. See #17011.
"""
super().test_max_recursion_error()
@modify_settings(ITEMS={"append": "mother"})
@override_settings(ITEMS=["father"], TEST="override-parent")
|
ClassDecoratedTestCase
|
python
|
huggingface__transformers
|
tests/models/glm4v_moe/test_modeling_glm4v_moe.py
|
{
"start": 11234,
"end": 17740
}
|
class ____(unittest.TestCase):
model = None
@classmethod
def get_model(cls):
if cls.model is None:
cls.model = Glm4vMoeForConditionalGeneration.from_pretrained(
"zai-org/GLM-4.5V", dtype="auto", device_map="auto"
)
return cls.model
@classmethod
def tearDownClass(cls):
if hasattr(cls, "model"):
del cls.model
cleanup(torch_device, gc_collect=True)
def setUp(self):
cleanup(torch_device, gc_collect=True)
self.processor = AutoProcessor.from_pretrained(
"zai-org/GLM-4.5V", size={"shortest_edge": 10800, "longest_edge": 10800}
)
self.message = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "What kind of dog is this?"},
],
}
]
self.message2 = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png",
},
{"type": "text", "text": "What kind of dog is this?"},
],
}
]
self.message_wo_image = [
{"role": "user", "content": [{"type": "text", "text": "Who are you?"}]},
]
question = "Describe this video."
video_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4"
self.video_messages = [
{
"role": "user",
"content": [
{
"type": "video",
"video": video_url,
},
{"type": "text", "text": question},
],
}
]
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_small_model_integration_test(self):
inputs = self.processor.apply_chat_template(
self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
)
expected_input_ids = [151331, 151333, 151336, 198, 151339, 151363, 151363, 151363, 151363, 151363, 151363, 151340, 3838, 3093, 315, 5562, 374] # fmt: skip
assert expected_input_ids == inputs.input_ids[0].tolist()[:17]
expected_pixel_slice = torch.tensor(
[
[-0.1134, -0.4492, -0.8580],
[-0.6244, -1.1645, -0.7120],
[-0.3324, -0.7996, -0.7120],
[0.2077, 0.2223, 0.4121],
[0.4413, 0.1931, 0.4559],
[0.5873, 0.3099, 0.4851],
],
dtype=torch.float32,
device="cpu",
)
torch.testing.assert_close(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=1e-4, rtol=1e-4)
def test_small_model_integration_test_batch(self):
model = self.get_model()
batch_messages = [self.message, self.message2, self.message_wo_image]
inputs = self.processor.apply_chat_template(
batch_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=10)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's try to figure out",
"\nWhat kind of dog is this?\n<think>Got it, let's see. The user",
'\nWho are you?\n<think>The user is asking "Who are you?"'
] # fmt: skip
decoded = self.processor.batch_decode(output, skip_special_tokens=True)
decoded = [x.replace("<|image|>", "") for x in decoded]
self.assertEqual(
decoded,
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_with_video(self):
processor = AutoProcessor.from_pretrained("zai-org/GLM-4.5V", max_image_size={"longest_edge": 50176})
model = self.get_model()
batch_messages = [self.video_messages]
inputs = processor.apply_chat_template(
batch_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=3)
EXPECTED_DECODED_TEXT = ["\n012345Describe this video.\n<think>Got it"] # fmt: skip
decoded = processor.batch_decode(output, skip_special_tokens=True)
decoded = [x.replace("<|image|>", "") for x in decoded]
self.assertEqual(
decoded,
EXPECTED_DECODED_TEXT,
)
@run_first
@require_flash_attn
@require_torch_gpu
def test_small_model_integration_test_batch_flashatt2(self):
model = Glm4vMoeForConditionalGeneration.from_pretrained(
"zai-org/GLM-4.5V",
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
batch_messages = [self.message, self.message2, self.message_wo_image]
inputs = self.processor.apply_chat_template(
batch_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=3)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it",
"\nWhat kind of dog is this?\n<think>Got it",
"\nWho are you?\n<think>The user",
] # fmt: skip
decoded = self.processor.batch_decode(output, skip_special_tokens=True)
decoded = [x.replace("<|image|>", "") for x in decoded]
self.assertEqual(
decoded,
EXPECTED_DECODED_TEXT,
)
|
Glm4vMoeIntegrationTest
|
python
|
mkdocs__mkdocs
|
mkdocs/utils/__init__.py
|
{
"start": 10963,
"end": 11543
}
|
class ____(logging.NullHandler):
"""Counts all logged messages >= level."""
def __init__(self, **kwargs) -> None:
self.counts: dict[int, int] = defaultdict(int)
super().__init__(**kwargs)
def handle(self, record):
rv = self.filter(record)
if rv:
# Use levelno for keys so they can be sorted later
self.counts[record.levelno] += 1
return rv
def get_counts(self) -> list[tuple[str, int]]:
return [(logging.getLevelName(k), v) for k, v in sorted(self.counts.items(), reverse=True)]
|
CountHandler
|
python
|
getsentry__sentry
|
src/sentry/integrations/slack/analytics.py
|
{
"start": 294,
"end": 513
}
|
class ____(analytics.Event):
organization_id: int
status: str
resolve_type: str | None = None
user_id: int | None = None
@analytics.eventclass("integrations.slack.notification_sent")
|
SlackIntegrationStatus
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster_tests/components_tests/code_locations/component_component_deps_custom_component/defs/depends_on_my_python_defs/custom_component.py
|
{
"start": 130,
"end": 587
}
|
class ____(dg.Component):
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
assets_from_my_python_defs = context.component_tree.build_defs(
MY_PYTHON_DEFS_COMPONENT_PATH
).resolve_all_asset_keys()
@dg.asset(deps=assets_from_my_python_defs)
def downstream_of_all_my_python_defs():
pass
return dg.Definitions(assets=[downstream_of_all_my_python_defs])
|
MyCustomComponent
|
python
|
pydata__xarray
|
xarray/core/groupby.py
|
{
"start": 5528,
"end": 8539
}
|
class ____(Generic[T_Xarray]):
"""Class for keeping track of grouped dimensions without coordinates.
Should not be user visible.
"""
__slots__ = ("coords", "dataarray", "name", "size")
def __init__(self, obj: T_Xarray, name: Hashable, coords) -> None:
self.name = name
self.coords = coords
self.size = obj.sizes[name]
@property
def dims(self) -> tuple[Hashable]:
return (self.name,)
@property
def ndim(self) -> Literal[1]:
return 1
@property
def values(self) -> range:
return range(self.size)
@property
def data(self) -> np.ndarray:
return np.arange(self.size, dtype=int)
def __array__(
self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None
) -> np.ndarray:
if copy is False:
raise NotImplementedError(f"An array copy is necessary, got {copy = }.")
return np.arange(self.size)
@property
def shape(self) -> tuple[int, ...]:
return (self.size,)
@property
def attrs(self) -> dict:
return {}
def __getitem__(self, key):
if isinstance(key, tuple):
(key,) = key
return self.values[key]
def to_index(self) -> pd.Index:
# could be pd.RangeIndex?
return pd.Index(np.arange(self.size))
def copy(self, deep: bool = True, data: Any = None):
raise NotImplementedError
def to_dataarray(self) -> DataArray:
from xarray.core.dataarray import DataArray
return DataArray(
data=self.data, dims=(self.name,), coords=self.coords, name=self.name
)
def to_array(self) -> DataArray:
"""Deprecated version of to_dataarray."""
return self.to_dataarray()
T_Group = Union["T_DataArray", _DummyGroup]
def _ensure_1d(
group: T_Group, obj: T_DataWithCoords
) -> tuple[
T_Group,
T_DataWithCoords,
Hashable | None,
list[Hashable],
]:
# 1D cases: do nothing
if isinstance(group, _DummyGroup) or group.ndim == 1:
return group, obj, None, []
from xarray.core.dataarray import DataArray
if isinstance(group, DataArray):
for dim in set(group.dims) - set(obj.dims):
obj = obj.expand_dims(dim)
# try to stack the dims of the group into a single dim
orig_dims = group.dims
stacked_dim = "stacked_" + "_".join(map(str, orig_dims))
# these dimensions get created by the stack operation
inserted_dims = [dim for dim in group.dims if dim not in group.coords]
# `newgroup` construction is optimized so we don't create an index unnecessarily,
# or stack any non-dim coords unnecessarily
newgroup = DataArray(group.variable.stack({stacked_dim: orig_dims}))
newobj = obj.stack({stacked_dim: orig_dims})
return newgroup, newobj, stacked_dim, inserted_dims
raise TypeError(f"group must be DataArray or _DummyGroup, got {type(group)!r}.")
@dataclass
|
_DummyGroup
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/redshift_cluster.py
|
{
"start": 952,
"end": 7952
}
|
class ____(AwsBaseHook):
"""
Interact with Amazon Redshift.
This is a thin wrapper around
:external+boto3:py:class:`boto3.client("redshift") <Redshift.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
template_fields: Sequence[str] = ("cluster_identifier",)
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "redshift"
super().__init__(*args, **kwargs)
def create_cluster(
self,
cluster_identifier: str,
node_type: str,
master_username: str,
master_user_password: str,
params: dict[str, Any],
) -> dict[str, Any]:
"""
Create a new cluster with the specified parameters.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.create_cluster`
:param cluster_identifier: A unique identifier for the cluster.
:param node_type: The node type to be provisioned for the cluster. Refer
https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-node-type-info
for the list of available node types.
:param master_username: The username associated with the admin user account
for the cluster that is being created.
:param master_user_password: password associated with the admin user account
for the cluster that is being created.
:param params: Remaining AWS Create cluster API params.
"""
response = self.conn.create_cluster(
ClusterIdentifier=cluster_identifier,
NodeType=node_type,
MasterUsername=master_username,
MasterUserPassword=master_user_password,
**params,
)
return response
# TODO: Wrap create_cluster_snapshot
def cluster_status(self, cluster_identifier: str) -> str | None:
"""
Get status of a cluster.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.describe_clusters`
:param cluster_identifier: unique identifier of a cluster
"""
try:
response = self.conn.describe_clusters(ClusterIdentifier=cluster_identifier)["Clusters"]
return response[0]["ClusterStatus"] if response else None
except self.conn.exceptions.ClusterNotFoundFault:
return "cluster_not_found"
async def cluster_status_async(self, cluster_identifier: str) -> str | None:
async with await self.get_async_conn() as client:
response = await client.describe_clusters(ClusterIdentifier=cluster_identifier)
return response["Clusters"][0]["ClusterStatus"] if response else None
def delete_cluster(
self,
cluster_identifier: str,
skip_final_cluster_snapshot: bool = True,
final_cluster_snapshot_identifier: str | None = None,
):
"""
Delete a cluster and optionally create a snapshot.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.delete_cluster`
:param cluster_identifier: unique identifier of a cluster
:param skip_final_cluster_snapshot: determines cluster snapshot creation
:param final_cluster_snapshot_identifier: name of final cluster snapshot
"""
final_cluster_snapshot_identifier = final_cluster_snapshot_identifier or ""
response = self.conn.delete_cluster(
ClusterIdentifier=cluster_identifier,
SkipFinalClusterSnapshot=skip_final_cluster_snapshot,
FinalClusterSnapshotIdentifier=final_cluster_snapshot_identifier,
)
return response["Cluster"] if response["Cluster"] else None
def describe_cluster_snapshots(self, cluster_identifier: str) -> list[str] | None:
"""
List snapshots for a cluster.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.describe_cluster_snapshots`
:param cluster_identifier: unique identifier of a cluster
"""
response = self.conn.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier)
if "Snapshots" not in response:
return None
snapshots = response["Snapshots"]
snapshots = [snapshot for snapshot in snapshots if snapshot["Status"]]
snapshots.sort(key=lambda x: x["SnapshotCreateTime"], reverse=True)
return snapshots
def restore_from_cluster_snapshot(self, cluster_identifier: str, snapshot_identifier: str) -> dict | None:
"""
Restore a cluster from its snapshot.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.restore_from_cluster_snapshot`
:param cluster_identifier: unique identifier of a cluster
:param snapshot_identifier: unique identifier for a snapshot of a cluster
"""
response = self.conn.restore_from_cluster_snapshot(
ClusterIdentifier=cluster_identifier, SnapshotIdentifier=snapshot_identifier
)
return response["Cluster"] if response["Cluster"] else None
def create_cluster_snapshot(
self,
snapshot_identifier: str,
cluster_identifier: str,
retention_period: int = -1,
tags: list[Any] | None = None,
) -> dict | None:
"""
Create a snapshot of a cluster.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.create_cluster_snapshot`
:param snapshot_identifier: unique identifier for a snapshot of a cluster
:param cluster_identifier: unique identifier of a cluster
:param retention_period: The number of days that a manual snapshot is retained.
If the value is -1, the manual snapshot is retained indefinitely.
:param tags: A list of tag instances
"""
if tags is None:
tags = []
response = self.conn.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=cluster_identifier,
ManualSnapshotRetentionPeriod=retention_period,
Tags=tags,
)
return response["Snapshot"] if response["Snapshot"] else None
def get_cluster_snapshot_status(self, snapshot_identifier: str):
"""
Get Redshift cluster snapshot status.
If cluster snapshot not found, *None* is returned.
:param snapshot_identifier: A unique identifier for the snapshot that you are requesting
"""
try:
response = self.conn.describe_cluster_snapshots(
SnapshotIdentifier=snapshot_identifier,
)
snapshot = response.get("Snapshots")[0]
snapshot_status: str = snapshot.get("Status")
return snapshot_status
except self.conn.exceptions.ClusterSnapshotNotFoundFault:
return None
|
RedshiftHook
|
python
|
doocs__leetcode
|
solution/3300-3399/3346.Maximum Frequency of an Element After Performing Operations I/Solution.py
|
{
"start": 0,
"end": 437
}
|
class ____:
def maxFrequency(self, nums: List[int], k: int, numOperations: int) -> int:
cnt = defaultdict(int)
d = defaultdict(int)
for x in nums:
cnt[x] += 1
d[x] += 0
d[x - k] += 1
d[x + k + 1] -= 1
ans = s = 0
for x, t in sorted(d.items()):
s += t
ans = max(ans, min(s, cnt[x] + numOperations))
return ans
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-salesforce/source_salesforce/streams.py
|
{
"start": 37395,
"end": 38475
}
|
class ____(BatchedSubStream, BulkSalesforceStream):
def stream_slices(
self, sync_mode: SyncMode, cursor_field: Optional[List[str]] = None, stream_state: Optional[Mapping[str, Any]] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
self._instantiate_declarative_stream(
BulkParentStreamStreamSlicer(
super(BulkSalesforceSubStream, self), sync_mode, cursor_field, stream_state, PARENT_SALESFORCE_OBJECTS[self.name]["field"]
),
has_bulk_parent=True,
)
yield from self._bulk_job_stream.stream_slices(sync_mode=sync_mode, cursor_field=cursor_field, stream_state=stream_state)
@BulkSalesforceStream.transformer.registerCustomTransform
def transform_empty_string_to_none(instance: Any, schema: Any):
"""
BULK API returns a `csv` file, where all values are initially as string type.
This custom transformer replaces empty lines with `None` value.
"""
if isinstance(instance, str) and not instance.strip():
instance = None
return instance
|
BulkSalesforceSubStream
|
python
|
pytorch__pytorch
|
torch/nn/modules/conv.py
|
{
"start": 48291,
"end": 57353
}
|
class ____(_ConvTransposeNd):
__doc__ = (
r"""Applies a 3D transposed convolution operator over an input image composed of several input
planes.
The transposed convolution operator multiplies each input value element-wise by a learnable kernel,
and sums over the outputs from all input feature planes.
This module can be seen as the gradient of Conv3d with respect to its input.
It is also known as a fractionally-strided convolution or
a deconvolution (although it is not an actual deconvolution operation as it does
not compute a true inverse of convolution). For more information, see the visualizations
`here`_ and the `Deconvolutional Networks`_ paper.
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of implicit zero padding on both
sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
below for details.
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
"""
"""
* :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
"""
r"""
{groups_note}
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimensions
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Note:
The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
amount of zero padding to both sizes of the input. This is set so that
when a :class:`~torch.nn.Conv3d` and a :class:`~torch.nn.ConvTranspose3d`
are initialized with same parameters, they are inverses of each other in
regard to the input and output shapes. However, when ``stride > 1``,
:class:`~torch.nn.Conv3d` maps multiple input shapes to the same output
shape. :attr:`output_padding` is provided to resolve this ambiguity by
effectively increasing the calculated output shape on one side. Note
that :attr:`output_padding` is only used to find output shape, but does
not actually add zero-padding to output.
Note:
{cudnn_reproducibility_note}
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of each dimension in the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
""".format(**reproducibility_notes, **convolution_notes)
+ r"""
Shape:
- Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` or
:math:`(C_{out}, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = (D_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0]
\times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1
.. math::
H_{out} = (H_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1]
\times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1
.. math::
W_{out} = (W_{in} - 1) \times \text{stride}[2] - 2 \times \text{padding}[2] + \text{dilation}[2]
\times (\text{kernel\_size}[2] - 1) + \text{output\_padding}[2] + 1
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
:math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape (out_channels)
If :attr:`bias` is ``True``, then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
Examples::
>>> # With square kernels and equal stride
>>> m = nn.ConvTranspose3d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(0, 4, 2))
>>> input = torch.randn(20, 16, 10, 50, 100)
>>> output = m(input)
.. _`here`:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
.. _`Deconvolutional Networks`:
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
"""
)
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: _size_3_t = 0,
output_padding: _size_3_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_3_t = 1,
padding_mode: Literal["zeros", "reflect", "replicate", "circular"] = "zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
output_padding = _triple(output_padding)
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
True,
output_padding,
groups,
bias,
padding_mode,
**factory_kwargs,
)
def forward(self, input: Tensor, output_size: Optional[list[int]] = None) -> Tensor:
if self.padding_mode != "zeros":
raise ValueError(
"Only `zeros` padding mode is supported for ConvTranspose3d"
)
assert isinstance(self.padding, tuple)
# One cannot replace List by Tuple or Sequence in "_output_padding" because
# TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
num_spatial_dims = 3
output_padding = self._output_padding(
input,
output_size,
self.stride, # type: ignore[arg-type]
self.padding, # type: ignore[arg-type]
self.kernel_size, # type: ignore[arg-type]
num_spatial_dims,
self.dilation, # type: ignore[arg-type]
)
return F.conv_transpose3d(
input,
self.weight,
self.bias,
self.stride,
self.padding,
output_padding,
self.groups,
self.dilation,
)
# TODO: Deprecate and remove the following alias `_ConvTransposeMixin`.
#
# `_ConvTransposeMixin` was a mixin that was removed. It is meant to be used
# with `_ConvNd` to construct actual module classes that implements conv
# transpose ops:
#
# class MyConvTranspose(_ConvNd, _ConvTransposeMixin):
# ...
#
# In PyTorch, it has been replaced by `_ConvTransposeNd`, which is a proper
# subclass of `_ConvNd`. However, some user code in the wild still (incorrectly)
# use the internal class `_ConvTransposeMixin`. Hence, we provide this alias
# for BC, because it is cheap and easy for us to do so, even though that
# `_ConvTransposeNd` is really not a mixin anymore (but multiple inheritance as
# above would still work).
|
ConvTranspose3d
|
python
|
pandas-dev__pandas
|
pandas/tests/arrays/test_datetimes.py
|
{
"start": 416,
"end": 9130
}
|
class ____:
@pytest.fixture(params=["s", "ms", "us"])
def unit(self, request):
"""Fixture returning parametrized time units"""
return request.param
@pytest.fixture
def dtype(self, unit, tz_naive_fixture):
tz = tz_naive_fixture
if tz is None:
return np.dtype(f"datetime64[{unit}]")
else:
return DatetimeTZDtype(unit=unit, tz=tz)
@pytest.fixture
def dta_dti(self, unit, dtype):
tz = getattr(dtype, "tz", None)
dti = pd.date_range("2016-01-01", periods=55, freq="D", tz=tz, unit="ns")
if tz is None:
arr = np.asarray(dti).astype(f"M8[{unit}]")
else:
arr = np.asarray(dti.tz_convert("UTC").tz_localize(None)).astype(
f"M8[{unit}]"
)
dta = DatetimeArray._simple_new(arr, dtype=dtype)
return dta, dti
@pytest.fixture
def dta(self, dta_dti):
dta, dti = dta_dti
return dta
def test_non_nano(self, unit, dtype):
arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]")
dta = DatetimeArray._simple_new(arr, dtype=dtype)
assert dta.dtype == dtype
assert dta[0].unit == unit
assert tz_compare(dta.tz, dta[0].tz)
assert (dta[0] == dta[:1]).all()
@pytest.mark.parametrize(
"field", DatetimeArray._field_ops + DatetimeArray._bool_ops
)
def test_fields(self, unit, field, dtype, dta_dti):
dta, dti = dta_dti
assert (dti == dta).all()
res = getattr(dta, field)
expected = getattr(dti._data, field)
tm.assert_numpy_array_equal(res, expected)
def test_normalize(self, unit):
dti = pd.date_range("2016-01-01 06:00:00", periods=55, freq="D")
arr = np.asarray(dti).astype(f"M8[{unit}]")
dta = DatetimeArray._simple_new(arr, dtype=arr.dtype)
assert not dta.is_normalized
# TODO: simplify once we can just .astype to other unit
exp = np.asarray(dti.normalize()).astype(f"M8[{unit}]")
expected = DatetimeArray._simple_new(exp, dtype=exp.dtype)
res = dta.normalize()
tm.assert_extension_array_equal(res, expected)
def test_normalize_overflow_raises(self):
# GH#60583
ts = pd.Timestamp.min
dta = DatetimeArray._from_sequence([ts], dtype="M8[ns]")
msg = "Cannot normalize Timestamp without integer overflow"
with pytest.raises(ValueError, match=msg):
dta.normalize()
def test_simple_new_requires_match(self, unit):
arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]")
dtype = DatetimeTZDtype(unit, "UTC")
dta = DatetimeArray._simple_new(arr, dtype=dtype)
assert dta.dtype == dtype
wrong = DatetimeTZDtype("ns", "UTC")
with pytest.raises(AssertionError, match="^$"):
DatetimeArray._simple_new(arr, dtype=wrong)
def test_std_non_nano(self, unit):
dti = pd.date_range("2016-01-01", periods=55, freq="D", unit="ns")
arr = np.asarray(dti).astype(f"M8[{unit}]")
dta = DatetimeArray._simple_new(arr, dtype=arr.dtype)
# we should match the nano-reso std, but floored to our reso.
res = dta.std()
assert res._creso == dta._creso
assert res == dti.std().floor(unit)
@pytest.mark.filterwarnings("ignore:Converting to PeriodArray.*:UserWarning")
def test_to_period(self, dta_dti):
dta, dti = dta_dti
result = dta.to_period("D")
expected = dti._data.to_period("D")
tm.assert_extension_array_equal(result, expected)
def test_iter(self, dta):
res = next(iter(dta))
expected = dta[0]
assert type(res) is pd.Timestamp
assert res._value == expected._value
assert res._creso == expected._creso
assert res == expected
def test_astype_object(self, dta):
result = dta.astype(object)
assert all(x._creso == dta._creso for x in result)
assert all(x == y for x, y in zip(result, dta, strict=True))
def test_to_pydatetime(self, dta_dti):
dta, dti = dta_dti
result = dta.to_pydatetime()
expected = dti.to_pydatetime()
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("meth", ["time", "timetz", "date"])
def test_time_date(self, dta_dti, meth):
dta, dti = dta_dti
result = getattr(dta, meth)
expected = getattr(dti, meth)
tm.assert_numpy_array_equal(result, expected)
def test_format_native_types(self, unit, dtype, dta_dti):
# In this case we should get the same formatted values with our nano
# version dti._data as we do with the non-nano dta
dta, dti = dta_dti
res = dta._format_native_types()
exp = dti._data._format_native_types()
tm.assert_numpy_array_equal(res, exp)
def test_repr(self, dta_dti, unit):
dta, dti = dta_dti
assert repr(dta) == repr(dti._data).replace("[ns", f"[{unit}")
# TODO: tests with td64
def test_compare_mismatched_resolutions(self, comparison_op):
# comparison that numpy gets wrong bc of silent overflows
op = comparison_op
iinfo = np.iinfo(np.int64)
vals = np.array([iinfo.min, iinfo.min + 1, iinfo.max], dtype=np.int64)
# Construct so that arr2[1] < arr[1] < arr[2] < arr2[2]
arr = np.array(vals).view("M8[ns]")
arr2 = arr.view("M8[s]")
left = DatetimeArray._simple_new(arr, dtype=arr.dtype)
right = DatetimeArray._simple_new(arr2, dtype=arr2.dtype)
if comparison_op is operator.eq:
expected = np.array([False, False, False])
elif comparison_op is operator.ne:
expected = np.array([True, True, True])
elif comparison_op in [operator.lt, operator.le]:
expected = np.array([False, False, True])
else:
expected = np.array([False, True, False])
result = op(left, right)
tm.assert_numpy_array_equal(result, expected)
result = op(left[1], right)
tm.assert_numpy_array_equal(result, expected)
if op not in [operator.eq, operator.ne]:
# check that numpy still gets this wrong; if it is fixed we may be
# able to remove compare_mismatched_resolutions
np_res = op(left._ndarray, right._ndarray)
tm.assert_numpy_array_equal(np_res[1:], ~expected[1:])
def test_add_mismatched_reso_doesnt_downcast(self):
# https://github.com/pandas-dev/pandas/pull/48748#issuecomment-1260181008
td = pd.Timedelta(microseconds=1)
dti = pd.date_range("2016-01-01", periods=3) - td
dta = dti._data.as_unit("us")
res = dta + td.as_unit("us")
# even though the result is an even number of days
# (so we _could_ downcast to unit="s"), we do not.
assert res.unit == "us"
@pytest.mark.parametrize(
"scalar",
[
timedelta(hours=2),
pd.Timedelta(hours=2),
np.timedelta64(2, "h"),
np.timedelta64(2 * 3600 * 1000, "ms"),
pd.offsets.Minute(120),
pd.offsets.Hour(2),
],
)
def test_add_timedeltalike_scalar_mismatched_reso(self, dta_dti, scalar):
dta, dti = dta_dti
td = pd.Timedelta(scalar)
exp_unit = tm.get_finest_unit(dta.unit, td.unit)
expected = (dti + td)._data.as_unit(exp_unit)
result = dta + scalar
tm.assert_extension_array_equal(result, expected)
result = scalar + dta
tm.assert_extension_array_equal(result, expected)
expected = (dti - td)._data.as_unit(exp_unit)
result = dta - scalar
tm.assert_extension_array_equal(result, expected)
def test_sub_datetimelike_scalar_mismatch(self):
dti = pd.date_range("2016-01-01", periods=3)
dta = dti._data.as_unit("us")
ts = dta[0].as_unit("s")
result = dta - ts
expected = (dti - dti[0])._data.as_unit("us")
assert result.dtype == "m8[us]"
tm.assert_extension_array_equal(result, expected)
def test_sub_datetime64_reso_mismatch(self):
dti = pd.date_range("2016-01-01", periods=3)
left = dti._data.as_unit("s")
right = left.as_unit("ms")
result = left - right
exp_values = np.array([0, 0, 0], dtype="m8[ms]")
expected = TimedeltaArray._simple_new(
exp_values,
dtype=exp_values.dtype,
)
tm.assert_extension_array_equal(result, expected)
result2 = right - left
tm.assert_extension_array_equal(result2, expected)
|
TestNonNano
|
python
|
apache__airflow
|
dev/breeze/src/airflow_breeze/utils/cdxgen.py
|
{
"start": 12682,
"end": 13021
}
|
class ____:
python_version: str | None
target_path: Path
@abstractmethod
def produce(self, output: Output | None, port: int, github_token: str | None) -> tuple[int, str]:
raise NotImplementedError
@abstractmethod
def get_job_name(self) -> str:
raise NotImplementedError
@dataclass
|
SbomApplicationJob
|
python
|
ray-project__ray
|
rllib/utils/tests/test_check_multi_agent.py
|
{
"start": 113,
"end": 2638
}
|
class ____(unittest.TestCase):
def test_multi_agent_invalid_args(self):
self.assertRaisesRegex(
TypeError,
"got an unexpected keyword argument 'wrong_key'",
lambda: (
PPOConfig().multi_agent(
policies={"p0"}, policies_to_train=["p0"], wrong_key=1
)
),
)
def test_multi_agent_bad_policy_ids(self):
self.assertRaisesRegex(
ValueError,
"PolicyID `1` not valid!",
lambda: (
PPOConfig().multi_agent(
policies={1, "good_id"},
policy_mapping_fn=lambda agent_id, episode, worker, **kw: "good_id",
)
),
)
def test_multi_agent_invalid_sub_values(self):
self.assertRaisesRegex(
ValueError,
"config.multi_agent\\(count_steps_by=..\\) must be one of",
lambda: (PPOConfig().multi_agent(count_steps_by="invalid_value")),
)
def test_multi_agent_invalid_override_configs(self):
self.assertRaisesRegex(
KeyError,
"Invalid property name invdli for config class PPOConfig",
lambda: (
PPOConfig().multi_agent(
policies={
"p0": PolicySpec(config=PPOConfig.overrides(invdli=42.0)),
}
)
),
)
self.assertRaisesRegex(
KeyError,
"Invalid property name invdli for config class PPOConfig",
lambda: (
PPOConfig().multi_agent(
policies={
"p0": PolicySpec(config=PPOConfig.overrides(invdli=42.0)),
}
)
),
)
def test_setting_multiagent_key_in_config_should_fail(self):
config = PPOConfig().multi_agent(
policies={
"pol1": (None, None, None, None),
"pol2": (None, None, None, PPOConfig.overrides(lr=0.001)),
}
)
def set_ma(config):
# not ok: cannot set "multiagent" key in AlgorithmConfig anymore.
config["multiagent"] = {"policies": {"pol1", "pol2"}}
self.assertRaisesRegex(
AttributeError,
"Cannot set `multiagent` key in an AlgorithmConfig!",
lambda: set_ma(config),
)
if __name__ == "__main__":
import pytest
pytest.main()
|
TestCheckMultiAgent
|
python
|
langchain-ai__langchain
|
libs/langchain_v1/langchain/agents/middleware/file_search.py
|
{
"start": 2119,
"end": 12773
}
|
class ____(AgentMiddleware):
"""Provides Glob and Grep search over filesystem files.
This middleware adds two tools that search through local filesystem:
- Glob: Fast file pattern matching by file path
- Grep: Fast content search using ripgrep or Python fallback
Example:
```python
from langchain.agents import create_agent
from langchain.agents.middleware import (
FilesystemFileSearchMiddleware,
)
agent = create_agent(
model=model,
tools=[], # Add tools as needed
middleware=[
FilesystemFileSearchMiddleware(root_path="/workspace"),
],
)
```
"""
def __init__(
self,
*,
root_path: str,
use_ripgrep: bool = True,
max_file_size_mb: int = 10,
) -> None:
"""Initialize the search middleware.
Args:
root_path: Root directory to search.
use_ripgrep: Whether to use `ripgrep` for search.
Falls back to Python if `ripgrep` unavailable.
max_file_size_mb: Maximum file size to search in MB.
"""
self.root_path = Path(root_path).resolve()
self.use_ripgrep = use_ripgrep
self.max_file_size_bytes = max_file_size_mb * 1024 * 1024
# Create tool instances as closures that capture self
@tool
def glob_search(pattern: str, path: str = "/") -> str:
"""Fast file pattern matching tool that works with any codebase size.
Supports glob patterns like `**/*.js` or `src/**/*.ts`.
Returns matching file paths sorted by modification time.
Use this tool when you need to find files by name patterns.
Args:
pattern: The glob pattern to match files against.
path: The directory to search in. If not specified, searches from root.
Returns:
Newline-separated list of matching file paths, sorted by modification
time (most recently modified first). Returns `'No files found'` if no
matches.
"""
try:
base_full = self._validate_and_resolve_path(path)
except ValueError:
return "No files found"
if not base_full.exists() or not base_full.is_dir():
return "No files found"
# Use pathlib glob
matching: list[tuple[str, str]] = []
for match in base_full.glob(pattern):
if match.is_file():
# Convert to virtual path
virtual_path = "/" + str(match.relative_to(self.root_path))
stat = match.stat()
modified_at = datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat()
matching.append((virtual_path, modified_at))
if not matching:
return "No files found"
file_paths = [p for p, _ in matching]
return "\n".join(file_paths)
@tool
def grep_search(
pattern: str,
path: str = "/",
include: str | None = None,
output_mode: Literal["files_with_matches", "content", "count"] = "files_with_matches",
) -> str:
"""Fast content search tool that works with any codebase size.
Searches file contents using regular expressions. Supports full regex
syntax and filters files by pattern with the include parameter.
Args:
pattern: The regular expression pattern to search for in file contents.
path: The directory to search in. If not specified, searches from root.
include: File pattern to filter (e.g., `'*.js'`, `'*.{ts,tsx}'`).
output_mode: Output format:
- `'files_with_matches'`: Only file paths containing matches
- `'content'`: Matching lines with `file:line:content` format
- `'count'`: Count of matches per file
Returns:
Search results formatted according to `output_mode`.
Returns `'No matches found'` if no results.
"""
# Compile regex pattern (for validation)
try:
re.compile(pattern)
except re.error as e:
return f"Invalid regex pattern: {e}"
if include and not _is_valid_include_pattern(include):
return "Invalid include pattern"
# Try ripgrep first if enabled
results = None
if self.use_ripgrep:
with suppress(
FileNotFoundError,
subprocess.CalledProcessError,
subprocess.TimeoutExpired,
):
results = self._ripgrep_search(pattern, path, include)
# Python fallback if ripgrep failed or is disabled
if results is None:
results = self._python_search(pattern, path, include)
if not results:
return "No matches found"
# Format output based on mode
return self._format_grep_results(results, output_mode)
self.glob_search = glob_search
self.grep_search = grep_search
self.tools = [glob_search, grep_search]
def _validate_and_resolve_path(self, path: str) -> Path:
"""Validate and resolve a virtual path to filesystem path."""
# Normalize path
if not path.startswith("/"):
path = "/" + path
# Check for path traversal
if ".." in path or "~" in path:
msg = "Path traversal not allowed"
raise ValueError(msg)
# Convert virtual path to filesystem path
relative = path.lstrip("/")
full_path = (self.root_path / relative).resolve()
# Ensure path is within root
try:
full_path.relative_to(self.root_path)
except ValueError:
msg = f"Path outside root directory: {path}"
raise ValueError(msg) from None
return full_path
def _ripgrep_search(
self, pattern: str, base_path: str, include: str | None
) -> dict[str, list[tuple[int, str]]]:
"""Search using ripgrep subprocess."""
try:
base_full = self._validate_and_resolve_path(base_path)
except ValueError:
return {}
if not base_full.exists():
return {}
# Build ripgrep command
cmd = ["rg", "--json"]
if include:
# Convert glob pattern to ripgrep glob
cmd.extend(["--glob", include])
cmd.extend(["--", pattern, str(base_full)])
try:
result = subprocess.run( # noqa: S603
cmd,
capture_output=True,
text=True,
timeout=30,
check=False,
)
except (subprocess.TimeoutExpired, FileNotFoundError):
# Fallback to Python search if ripgrep unavailable or times out
return self._python_search(pattern, base_path, include)
# Parse ripgrep JSON output
results: dict[str, list[tuple[int, str]]] = {}
for line in result.stdout.splitlines():
try:
data = json.loads(line)
if data["type"] == "match":
path = data["data"]["path"]["text"]
# Convert to virtual path
virtual_path = "/" + str(Path(path).relative_to(self.root_path))
line_num = data["data"]["line_number"]
line_text = data["data"]["lines"]["text"].rstrip("\n")
if virtual_path not in results:
results[virtual_path] = []
results[virtual_path].append((line_num, line_text))
except (json.JSONDecodeError, KeyError):
continue
return results
def _python_search(
self, pattern: str, base_path: str, include: str | None
) -> dict[str, list[tuple[int, str]]]:
"""Search using Python regex (fallback)."""
try:
base_full = self._validate_and_resolve_path(base_path)
except ValueError:
return {}
if not base_full.exists():
return {}
regex = re.compile(pattern)
results: dict[str, list[tuple[int, str]]] = {}
# Walk directory tree
for file_path in base_full.rglob("*"):
if not file_path.is_file():
continue
# Check include filter
if include and not _match_include_pattern(file_path.name, include):
continue
# Skip files that are too large
if file_path.stat().st_size > self.max_file_size_bytes:
continue
try:
content = file_path.read_text()
except (UnicodeDecodeError, PermissionError):
continue
# Search content
for line_num, line in enumerate(content.splitlines(), 1):
if regex.search(line):
virtual_path = "/" + str(file_path.relative_to(self.root_path))
if virtual_path not in results:
results[virtual_path] = []
results[virtual_path].append((line_num, line))
return results
def _format_grep_results(
self,
results: dict[str, list[tuple[int, str]]],
output_mode: str,
) -> str:
"""Format grep results based on output mode."""
if output_mode == "files_with_matches":
# Just return file paths
return "\n".join(sorted(results.keys()))
if output_mode == "content":
# Return file:line:content format
lines = []
for file_path in sorted(results.keys()):
for line_num, line in results[file_path]:
lines.append(f"{file_path}:{line_num}:{line}")
return "\n".join(lines)
if output_mode == "count":
# Return file:count format
lines = []
for file_path in sorted(results.keys()):
count = len(results[file_path])
lines.append(f"{file_path}:{count}")
return "\n".join(lines)
# Default to files_with_matches
return "\n".join(sorted(results.keys()))
__all__ = [
"FilesystemFileSearchMiddleware",
]
|
FilesystemFileSearchMiddleware
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/lists.py
|
{
"start": 61353,
"end": 63806
}
|
class ____(IteratorVariable):
_nonvar_fields = {
"index",
*IteratorVariable._nonvar_fields,
}
def __init__(
self, items: list[VariableTracker], index: int = 0, **kwargs: Any
) -> None:
super().__init__(**kwargs)
assert isinstance(items, list)
# Removing this check as it slows things down too much
# https://github.com/pytorch/pytorch/pull/87533#issuecomment-1287574492
# assert all(isinstance(x, VariableTracker) for x in items)
self.items = items
self.index = index
self.is_exhausted = False
def __repr__(self) -> str:
return f"{self.__class__.__name__}(length={len(self.items)}, index={repr(self.index)})"
def next_variable(self, tx: "InstructionTranslator") -> VariableTracker:
assert self.is_mutable()
old_index = self.index
if old_index >= len(self.items) or self.is_exhausted:
self.is_exhausted = True
raise_observed_exception(StopIteration, tx)
tx.output.side_effects.mutation(self)
self.index += 1
return self.items[old_index]
def call_obj_hasattr(
self, tx: "InstructionTranslator", name: str
) -> ConstantVariable:
return variables.ConstantVariable.create(hasattr(iter([]), name))
def python_type(self) -> type:
return type(iter([]))
def as_python_constant(self) -> Any:
if self.index > 0:
raise NotImplementedError
return iter([x.as_python_constant() for x in self.items])
def has_unpack_var_sequence(self, tx: "InstructionTranslator") -> bool:
return True
def unpack_var_sequence(self, tx: "InstructionTranslator") -> list[VariableTracker]:
if self.is_exhausted:
return []
self.is_exhausted = True
return list(self.items[self.index :])
def force_unpack_var_sequence(
self, tx: "InstructionTranslator"
) -> list[VariableTracker]:
return self.unpack_var_sequence(tx)
def reconstruct(self, codegen: "PyCodegen") -> None:
if not self.is_exhausted:
remaining_items = self.items[self.index :]
else:
remaining_items = []
codegen.foreach(remaining_items)
codegen.extend_output(
[
create_build_tuple(len(remaining_items)),
create_instruction("GET_ITER"),
]
)
|
ListIteratorVariable
|
python
|
walkccc__LeetCode
|
solutions/1032. Stream of Characters/1032.py
|
{
"start": 108,
"end": 707
}
|
class ____:
def __init__(self, words: list[str]):
self.root = TrieNode()
self.letters = []
for word in words:
self._insert(word)
def query(self, letter: str) -> bool:
self.letters.append(letter)
node = self.root
for c in reversed(self.letters):
if c not in node.children:
return False
node = node.children[c]
if node.isWord:
return True
return False
def _insert(self, word: str) -> None:
node = self.root
for c in reversed(word):
node = node.children.setdefault(c, TrieNode())
node.isWord = True
|
StreamChecker
|
python
|
imageio__imageio
|
imageio/plugins/lytro.py
|
{
"start": 18393,
"end": 25309
}
|
class ____(LytroFormat):
"""This is the Lytro Illum LFP format.
The lfp is a image and meta data container format as used by the
Lytro F01 light field camera.
The format will read the specified lfp file.
This format does not support writing.
Parameters for reading
----------------------
meta_only : bool
Whether to only read the metadata.
include_thumbnail : bool
Whether to include an image thumbnail in the metadata.
"""
def _can_read(self, request):
# Check if mode and extensions are supported by the format
if request.extension in (".lfp",):
return True
# -- reader
class Reader(Format.Reader):
def _open(self, meta_only=False):
self._file = self.request.get_file()
self._data = None
self._chunks = {}
self.metadata = {}
self._content = None
self._meta_only = meta_only
self._find_header()
self._find_meta()
self._find_chunks()
try:
# Get sha1 dict and check if it is in dictionary of data chunks
chunk_dict = self._content["picture"]["frameArray"][0]["frame"]
if (
chunk_dict["metadataRef"] in self._chunks
and chunk_dict["imageRef"] in self._chunks
and chunk_dict["privateMetadataRef"] in self._chunks
):
if not self._meta_only:
# Read raw image data byte buffer
data_pos, size = self._chunks[chunk_dict["imageRef"]]
self._file.seek(data_pos, 0)
self.raw_image_data = self._file.read(size)
# Read meta data
data_pos, size = self._chunks[chunk_dict["metadataRef"]]
self._file.seek(data_pos, 0)
metadata = self._file.read(size)
# Add metadata to meta data dict
self.metadata["metadata"] = json.loads(metadata.decode("ASCII"))
# Read private metadata
data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]]
self._file.seek(data_pos, 0)
serial_numbers = self._file.read(size)
self.serial_numbers = json.loads(serial_numbers.decode("ASCII"))
# Add private metadata to meta data dict
self.metadata["privateMetadata"] = self.serial_numbers
except KeyError:
raise RuntimeError("The specified file is not a valid LFP file.")
def _close(self):
# Close the reader.
# Note that the request object will close self._file
del self._data
def _get_length(self):
# Return the number of images. Can be np.inf
return 1
def _find_header(self):
"""
Checks if file has correct header and skip it.
"""
file_header = b"\x89LFP\x0d\x0a\x1a\x0a\x00\x00\x00\x01"
# Read and check header of file
header = self._file.read(HEADER_LENGTH)
if header != file_header:
raise RuntimeError("The LFP file header is invalid.")
# Read first bytes to skip header
self._file.read(SIZE_LENGTH)
def _find_chunks(self):
"""
Gets start position and size of data chunks in file.
"""
chunk_header = b"\x89LFC\x0d\x0a\x1a\x0a\x00\x00\x00\x00"
for i in range(0, DATA_CHUNKS_F01):
data_pos, size, sha1 = self._get_chunk(chunk_header)
self._chunks[sha1] = (data_pos, size)
def _find_meta(self):
"""
Gets a data chunk that contains information over content
of other data chunks.
"""
meta_header = b"\x89LFM\x0d\x0a\x1a\x0a\x00\x00\x00\x00"
data_pos, size, sha1 = self._get_chunk(meta_header)
# Get content
self._file.seek(data_pos, 0)
data = self._file.read(size)
self._content = json.loads(data.decode("ASCII"))
data = self._file.read(5) # Skip 5
def _get_chunk(self, header):
"""
Checks if chunk has correct header and skips it.
Finds start position and length of next chunk and reads
sha1-string that identifies the following data chunk.
Parameters
----------
header : bytes
Byte string that identifies start of chunk.
Returns
-------
data_pos : int
Start position of data chunk in file.
size : int
Size of data chunk.
sha1 : str
Sha1 value of chunk.
"""
# Read and check header of chunk
header_chunk = self._file.read(HEADER_LENGTH)
if header_chunk != header:
raise RuntimeError("The LFP chunk header is invalid.")
data_pos = None
sha1 = None
# Read size
size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0]
if size > 0:
# Read sha1
sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII"))
# Skip fixed null chars
self._file.read(PADDING_LENGTH)
# Find start of data and skip data
data_pos = self._file.tell()
self._file.seek(size, 1)
# Skip extra null chars
ch = self._file.read(1)
while ch == b"\0":
ch = self._file.read(1)
self._file.seek(-1, 1)
return data_pos, size, sha1
def _get_data(self, index):
# Return the data and meta data for the given index
if index not in [0, None]:
raise IndexError("Lytro lfp file contains only one dataset")
if not self._meta_only:
# Read bytes from string and convert to uint16
raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype(
np.uint16
)
im = LytroF01RawFormat.rearrange_bits(raw)
else:
im = np.array([])
# Return array and dummy meta data
return im, self.metadata
def _get_meta_data(self, index):
# Get the meta data for the given index. If index is None,
# it returns the global meta data.
if index not in [0, None]:
raise IndexError("Lytro meta data file contains only one dataset")
return self.metadata
|
LytroLfpFormat
|
python
|
spack__spack
|
lib/spack/spack/database.py
|
{
"start": 10464,
"end": 13355
}
|
class ____:
"""Manages acquiring and releasing read or write locks on concrete specs."""
def __init__(self, lock_path: Union[str, pathlib.Path], default_timeout: Optional[float]):
self.lock_path = pathlib.Path(lock_path)
self.default_timeout = default_timeout
# Maps (spec.dag_hash(), spec.name) to the corresponding lock object
self.locks: Dict[Tuple[str, str], lk.Lock] = {}
def lock(self, spec: "spack.spec.Spec", timeout: Optional[float] = None) -> lk.Lock:
"""Returns a lock on a concrete spec.
The lock is a byte range lock on the nth byte of a file.
The lock file is ``self.lock_path``.
n is the sys.maxsize-bit prefix of the DAG hash. This makes likelihood of collision is
very low AND it gives us readers-writer lock semantics with just a single lockfile, so
no cleanup required.
"""
assert spec.concrete, "cannot lock a non-concrete spec"
timeout = timeout or self.default_timeout
key = self._lock_key(spec)
if key not in self.locks:
self.locks[key] = self.raw_lock(spec, timeout=timeout)
else:
self.locks[key].default_timeout = timeout
return self.locks[key]
def raw_lock(self, spec: "spack.spec.Spec", timeout: Optional[float] = None) -> lk.Lock:
"""Returns a raw lock for a Spec, but doesn't keep track of it."""
return lk.Lock(
str(self.lock_path),
start=spec.dag_hash_bit_prefix(bit_length(sys.maxsize)),
length=1,
default_timeout=timeout,
desc=spec.name,
)
def has_lock(self, spec: "spack.spec.Spec") -> bool:
"""Returns True if the spec is already managed by this spec locker"""
return self._lock_key(spec) in self.locks
def _lock_key(self, spec: "spack.spec.Spec") -> Tuple[str, str]:
return (spec.dag_hash(), spec.name)
@contextlib.contextmanager
def write_lock(self, spec: "spack.spec.Spec") -> Generator["SpecLocker", None, None]:
lock = self.lock(spec)
lock.acquire_write()
try:
yield self
except lk.LockError:
# This addresses the case where a nested lock attempt fails inside
# of this context manager
raise
except (Exception, KeyboardInterrupt):
lock.release_write()
raise
else:
lock.release_write()
def clear(self, spec: "spack.spec.Spec") -> Tuple[bool, Optional[lk.Lock]]:
key = self._lock_key(spec)
lock = self.locks.pop(key, None)
return bool(lock), lock
def clear_all(self, clear_fn: Optional[Callable[[lk.Lock], Any]] = None) -> None:
if clear_fn is not None:
for lock in self.locks.values():
clear_fn(lock)
self.locks.clear()
|
SpecLocker
|
python
|
run-llama__llama_index
|
llama-index-core/tests/output_parsers/test_pydantic.py
|
{
"start": 250,
"end": 311
}
|
class ____(BaseModel):
test_attr: str
foo: int
|
AttrDict
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/pipelines/pipelines/consts.py
|
{
"start": 2937,
"end": 3555
}
|
class ____(Enum):
"""Enum to characterize the current context state, values are used for external representation on GitHub commit checks."""
INITIALIZED = {"github_state": "pending", "description": "Pipelines are being initialized..."}
RUNNING = {"github_state": "pending", "description": "Pipelines are running..."}
ERROR = {"github_state": "error", "description": "Something went wrong while running the Pipelines."}
SUCCESSFUL = {"github_state": "success", "description": "All Pipelines ran successfully."}
FAILURE = {"github_state": "failure", "description": "Pipeline failed."}
|
ContextState
|
python
|
lxml__lxml
|
src/lxml/tests/test_elementpath.py
|
{
"start": 15107,
"end": 15536
}
|
class ____(EtreeElementPathTestCase):
import xml.etree.ElementTree as etree
import xml.etree.ElementPath as _elementpath
test_cache = unittest.skip("lxml-only")(EtreeElementPathTestCase.test_cache)
test_tokenizer = unittest.skip("lxml-only")(EtreeElementPathTestCase.test_tokenizer)
test_tokenizer_index = unittest.skip("lxml-only")(EtreeElementPathTestCase.test_tokenizer_index)
|
ElementTreeElementPathTestCase
|
python
|
keras-team__keras
|
keras/src/dtype_policies/dtype_policy.py
|
{
"start": 7828,
"end": 9352
}
|
class ____(DTypePolicy):
def __init__(self, mode, source_name=None):
# Use the global dtype policy if `source_name` is not specified
if source_name is None:
source_name = dtype_policy().name
name = f"{mode}_from_{source_name}"
self._compute_dtype, self._variable_dtype = self._parse_name(
source_name
)
self._check_quantization_mode(mode, self._compute_dtype)
self._name = name
self._source_name = source_name
self._quantization_mode = mode
def __eq__(self, other):
if super().__eq__(other) is False:
return False
return (
self._quantization_mode == other._quantization_mode
and self._source_name == other._source_name
)
def get_config(self):
return {
"mode": self._quantization_mode,
"source_name": self._source_name,
}
def _check_quantization_mode(self, mode, compute_dtype):
if mode not in QUANTIZATION_MODES:
raise ValueError(
"Invalid quantization mode. "
f"Expected one of {QUANTIZATION_MODES}. "
f"Received: mode={mode}"
)
if compute_dtype == "float16" and mode == "int8":
raise ValueError(
f"Quantization mode='{mode}' doesn't work well with "
"compute_dtype='float16'."
)
@keras_export("keras.dtype_policies.QuantizedFloat8DTypePolicy")
|
QuantizedDTypePolicy
|
python
|
instagram__MonkeyType
|
demo/test_inbox.py
|
{
"start": 272,
"end": 8655
}
|
class ____(models.RepoInterface):
def __init__(self, *objs: object) -> None:
self.objs = objs
def get_feed_entries_by_ids(
self, ids: Collection[models.FeedEntryId]
) -> Dict[models.FeedEntryId, Optional[models.FeedEntry]]:
found = {
f.id: f
for f in self.objs
if isinstance(f, models.FeedEntry) and f.id in ids
}
return {id: found.get(id) for id in ids}
def get_feed_entries_for_user_id(
self, user_id: models.UserId
) -> List[models.FeedEntry]:
return [
o
for o in self.objs
if isinstance(o, models.FeedEntry) and o.user_id == user_id
]
def get_users_by_ids(
self, ids: Collection[models.UserId]
) -> Dict[models.UserId, Optional[models.User]]:
found = {
u.id: u for u in self.objs if isinstance(u, models.User) and u.id in ids
}
return {id: found.get(id) for id in ids}
def get_inbox_events_for_user_id(
self, user_id: models.UserId
) -> List[models.InboxEvent]:
return [
o
for o in self.objs
if isinstance(o, models.InboxEvent) and o.user_id == user_id
]
last_auto_id = 0
def make_user(**kwargs):
global last_auto_id
last_auto_id += 1
defaults = {"id": models.UserId(last_auto_id), "name": "Test User", "following": []}
defaults.update(kwargs)
return models.User(**defaults)
def now():
if sys.platform != 'win32':
return datetime.now()
# Workaround for Windows where two close call to datetime.now() return
# exactly the same datetime
return datetime.now() + timedelta(microseconds=last_auto_id)
def make_feedentry(**kwargs):
global last_auto_id
last_auto_id += 1
defaults = {
"id": models.FeedEntryId(last_auto_id),
"caption": "Test FeedEntry",
"published": now(),
}
defaults.update(kwargs)
return models.FeedEntry(**defaults)
def make_commented(**kwargs):
global last_auto_id
last_auto_id += 1
defaults = {
"id": models.InboxEventId(last_auto_id),
"comment_text": "Test comment",
"published": now(),
}
defaults.update(kwargs)
return models.CommentedEvent(**defaults)
def make_liked(**kwargs):
global last_auto_id
last_auto_id += 1
defaults = {"id": models.InboxEventId(last_auto_id), "published": now()}
defaults.update(kwargs)
return models.LikedEvent(**defaults)
def make_followed(**kwargs):
global last_auto_id
last_auto_id += 1
defaults = {"id": models.InboxEventId(last_auto_id), "published": now()}
defaults.update(kwargs)
return models.FollowedEvent(**defaults)
def test_empty_inbox():
u = make_user()
repo = FakeRepo(u)
box = inbox.Inbox(u, repo)
assert box.aggregate() == []
assert box.summarize() == "You have no new activity."
def test_commented():
u = make_user()
other = make_user(name="Commenter")
feedentry = make_feedentry(user_id=u.id)
commented = make_commented(
user_id=u.id, feedentry_id=feedentry.id, commenter_id=other.id
)
repo = FakeRepo(u, other, feedentry, commented)
box = inbox.Inbox(u, repo)
assert (
box.aggregate()
== [
models.AggregatedItem(
type=models.EventType.COMMENTED,
text="Commenter commented on your post.",
published=commented.published,
)
]
)
assert box.summarize() == "You have 1 new comment."
def test_followed():
u = make_user()
other = make_user(name="Follower", following=[u.id])
event = make_followed(user_id=u.id, follower_id=other.id)
repo = FakeRepo(u, other, event)
box = inbox.Inbox(u, repo)
assert (
box.aggregate()
== [
models.AggregatedItem(
type=models.EventType.FOLLOWED,
text="Follower started following you.",
published=event.published,
)
]
)
assert box.summarize() == "You have 1 new follower."
def test_one_like():
u = make_user()
liker = make_user(name="Liker")
feedentry = make_feedentry(user_id=u.id, caption="My Post")
event = make_liked(user_id=u.id, liker_id=liker.id, feedentry_id=feedentry.id)
repo = FakeRepo(u, liker, feedentry, event)
box = inbox.Inbox(u, repo)
assert (
box.aggregate()
== [
models.AggregatedItem(
type=models.EventType.LIKED,
text='Liker liked your post "My Post".',
published=event.published,
)
]
)
assert box.summarize() == "You have 1 new like."
def test_two_likes():
u = make_user()
liker1 = make_user(name="Liker One")
liker2 = make_user(name="Liker Two")
feedentry = make_feedentry(user_id=u.id, caption="My Post")
like1 = make_liked(user_id=u.id, liker_id=liker1.id, feedentry_id=feedentry.id)
like2 = make_liked(user_id=u.id, liker_id=liker2.id, feedentry_id=feedentry.id)
repo = FakeRepo(u, liker1, liker2, feedentry, like1, like2)
box = inbox.Inbox(u, repo)
assert (
box.aggregate()
== [
models.AggregatedItem(
type=models.EventType.LIKED,
text='Liker One and Liker Two liked your post "My Post".',
published=like2.published,
)
]
)
assert box.summarize() == "You have 2 new likes."
def test_three_likes():
u = make_user()
liker1 = make_user(name="Liker One")
liker2 = make_user(name="Liker Two")
liker3 = make_user(name="Liker Three")
feedentry = make_feedentry(user_id=u.id, caption="My Post")
like1 = make_liked(user_id=u.id, liker_id=liker1.id, feedentry_id=feedentry.id)
like2 = make_liked(user_id=u.id, liker_id=liker2.id, feedentry_id=feedentry.id)
like3 = make_liked(user_id=u.id, liker_id=liker3.id, feedentry_id=feedentry.id)
repo = FakeRepo(u, liker1, liker2, liker3, feedentry, like1, like2, like3)
box = inbox.Inbox(u, repo)
assert (
box.aggregate()
== [
models.AggregatedItem(
type=models.EventType.LIKED,
text='Liker One, Liker Two and 1 others liked your post "My Post".',
published=like3.published,
)
]
)
assert box.summarize() == "You have 3 new likes."
def test_everything():
u = make_user()
other = make_user(name="Other", following=[u.id])
first_entry = make_feedentry(user_id=u.id, caption="My First Post")
follow = make_followed(user_id=u.id, follower_id=other.id)
second_entry = make_feedentry(user_id=u.id, caption="Second Post")
like1 = make_liked(user_id=u.id, liker_id=other.id, feedentry_id=first_entry.id)
comment = make_commented(
user_id=u.id, commenter_id=other.id, feedentry_id=first_entry.id
)
like2 = make_liked(user_id=u.id, liker_id=other.id, feedentry_id=second_entry.id)
repo = FakeRepo(u, other, first_entry, second_entry, like1, like2, comment, follow)
box = inbox.Inbox(u, repo)
assert (
box.aggregate()
== [
models.AggregatedItem(
type=models.EventType.LIKED,
text='Other liked your post "Second Post".',
published=like2.published,
),
models.AggregatedItem(
type=models.EventType.COMMENTED,
text="Other commented on your post.",
published=comment.published,
),
models.AggregatedItem(
type=models.EventType.LIKED,
text='Other liked your post "My First Post".',
published=like1.published,
),
models.AggregatedItem(
type=models.EventType.FOLLOWED,
text="Other started following you.",
published=follow.published,
),
]
)
assert box.summarize() == "You have 2 new likes, 1 new follower and 1 new comment."
def test_aggregator_interface():
agg = inbox.AggregatorInterface(FakeRepo())
agg.add(
models.InboxEvent(
models.InboxEventId(1), models.UserId(2), published=now()
)
)
assert agg.aggregate() == []
|
FakeRepo
|
python
|
doocs__leetcode
|
lcof/面试题40. 最小的k个数/Solution2.py
|
{
"start": 0,
"end": 234
}
|
class ____:
def getLeastNumbers(self, arr: List[int], k: int) -> List[int]:
h = []
for x in arr:
heappush(h, -x)
if len(h) > k:
heappop(h)
return [-x for x in h]
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/backup/findings.py
|
{
"start": 7224,
"end": 7732
}
|
class ____(json.JSONEncoder):
"""JSON serializer that handles findings properly."""
def default(self, obj):
if isinstance(obj, Finding):
kind = getattr(obj, "kind", None)
d = obj.to_dict()
d["finding"] = obj.get_finding_name()
if isinstance(kind, FindingKind):
d["kind"] = kind.name
elif isinstance(kind, str):
d["kind"] = kind
return d
return super().default(obj)
|
FindingJSONEncoder
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_subquery_relations.py
|
{
"start": 111027,
"end": 117600
}
|
class ____(fixtures.DeclarativeMappedTest):
"""because subqueryloader relies upon the .subquery() method, this means
if the original Query has a from_self() present, it needs to create
.subquery() in terms of the Query class as a from_self() selectable
doesn't work correctly with the future select. So it has
to create a Query object now that it gets only a select.
neutron is currently dependent on this use case which means others
are too.
Additionally tests functionality related to #5836, where we are using the
non-cached context.query, rather than
context.compile_state.select_statement to generate the subquery. this is
so we get the current parameters from the new statement being run, but it
also means we have to get a new CompileState from that query in order to
deal with the correct entities.
"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base, ComparableEntity):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
cs = relationship("C", order_by="C.id")
class B(Base, ComparableEntity):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
a = relationship("A")
ds = relationship("D", order_by="D.id")
class C(Base, ComparableEntity):
__tablename__ = "c"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
class D(Base, ComparableEntity):
__tablename__ = "d"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
@classmethod
def insert_data(cls, connection):
A, B, C, D = cls.classes("A", "B", "C", "D")
s = Session(connection)
as_ = [
A(
id=i,
cs=[C(), C()],
)
for i in range(1, 5)
]
s.add_all(
[
B(a=as_[0], ds=[D()]),
B(a=as_[1], ds=[D()]),
B(a=as_[2]),
B(a=as_[3]),
]
)
s.commit()
def test_subq_w_from_self_one(self):
A, B, C = self.classes("A", "B", "C")
s = fixture_session()
cache = {}
for i in range(3):
subq = (
s.query(B)
.join(B.a)
.filter(B.id < 4)
.filter(A.id > 1)
.subquery()
)
bb = aliased(B, subq)
subq2 = s.query(bb).subquery()
bb2 = aliased(bb, subq2)
q = (
s.query(bb2)
.execution_options(compiled_cache=cache)
.options(subqueryload(bb2.a).subqueryload(A.cs))
)
def go():
results = q.all()
eq_(
results,
[
B(
a=A(cs=[C(a_id=2, id=3), C(a_id=2, id=4)], id=2),
a_id=2,
id=2,
),
B(
a=A(cs=[C(a_id=3, id=5), C(a_id=3, id=6)], id=3),
a_id=3,
id=3,
),
],
)
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT anon_1.id AS anon_1_id, "
"anon_1.a_id AS anon_1_a_id FROM "
"(SELECT anon_2.id AS id, anon_2.a_id "
"AS a_id FROM (SELECT b.id AS id, b.a_id "
"AS a_id FROM b JOIN a ON a.id = b.a_id "
"WHERE b.id < :id_1 AND a.id > :id_2) AS anon_2) AS anon_1"
),
CompiledSQL(
"SELECT a.id AS a_id, anon_1.anon_2_a_id AS "
"anon_1_anon_2_a_id FROM (SELECT DISTINCT "
"anon_2.a_id AS anon_2_a_id FROM "
"(SELECT anon_3.id AS id, anon_3.a_id "
"AS a_id FROM (SELECT b.id AS id, b.a_id "
"AS a_id FROM b JOIN a ON a.id = b.a_id "
"WHERE b.id < :id_1 AND a.id > :id_2) AS anon_3) "
"AS anon_2) AS anon_1 JOIN a "
"ON a.id = anon_1.anon_2_a_id"
),
CompiledSQL(
"SELECT c.id AS c_id, c.a_id AS c_a_id, a_1.id "
"AS a_1_id FROM (SELECT DISTINCT anon_2.a_id AS "
"anon_2_a_id FROM "
"(SELECT anon_3.id AS id, anon_3.a_id "
"AS a_id FROM (SELECT b.id AS id, b.a_id "
"AS a_id FROM b JOIN a ON a.id = b.a_id "
"WHERE b.id < :id_1 AND a.id > :id_2) AS anon_3) "
"AS anon_2) AS anon_1 JOIN a AS a_1 ON a_1.id = "
"anon_1.anon_2_a_id JOIN c ON a_1.id = c.a_id "
"ORDER BY c.id"
),
)
s.close()
def test_subq_w_from_self_two(self):
A, B, C = self.classes("A", "B", "C")
s = fixture_session()
cache = {}
for i in range(3):
def go():
subq = s.query(B).join(B.a).subquery()
bq = aliased(B, subq)
q = (
s.query(bq)
.execution_options(compiled_cache=cache)
.options(subqueryload(bq.ds))
)
q.all()
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT anon_1.id AS anon_1_id, anon_1.a_id AS "
"anon_1_a_id FROM (SELECT b.id AS id, b.a_id "
"AS a_id FROM b JOIN a ON a.id = b.a_id) AS anon_1"
),
CompiledSQL(
"SELECT d.id AS d_id, d.b_id AS d_b_id, "
"anon_1.anon_2_id AS anon_1_anon_2_id "
"FROM (SELECT anon_2.id AS anon_2_id FROM "
"(SELECT b.id AS id, b.a_id AS a_id FROM b "
"JOIN a ON a.id = b.a_id) AS anon_2) AS anon_1 "
"JOIN d ON anon_1.anon_2_id = d.b_id ORDER BY d.id"
),
)
s.close()
|
FromSubqTest
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/tasks.py
|
{
"start": 96265,
"end": 98132
}
|
class ____(Response):
"""
Response of tasks.completed endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "completed"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(CompletedResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
|
CompletedResponse
|
python
|
huggingface__transformers
|
tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py
|
{
"start": 1655,
"end": 4958
}
|
class ____:
def __init__(
self,
parent,
batch_size=13,
num_channels=3,
image_size=32,
depth_multiplier=0.25,
min_depth=8,
tf_padding=True,
last_hidden_size=1024,
output_stride=32,
hidden_act="relu6",
classifier_dropout_prob=0.1,
initializer_range=0.02,
is_training=True,
use_labels=True,
num_labels=10,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.depth_multiplier = depth_multiplier
self.min_depth = min_depth
self.tf_padding = tf_padding
self.last_hidden_size = int(last_hidden_size * depth_multiplier)
self.output_stride = output_stride
self.hidden_act = hidden_act
self.classifier_dropout_prob = classifier_dropout_prob
self.use_labels = use_labels
self.is_training = is_training
self.num_labels = num_labels
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
pixel_labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels, pixel_labels
def get_config(self):
return MobileNetV1Config(
num_channels=self.num_channels,
image_size=self.image_size,
depth_multiplier=self.depth_multiplier,
min_depth=self.min_depth,
tf_padding=self.tf_padding,
hidden_act=self.hidden_act,
classifier_dropout_prob=self.classifier_dropout_prob,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values, labels, pixel_labels):
model = MobileNetV1Model(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape,
(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
),
)
def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels):
config.num_labels = self.num_labels
model = MobileNetV1ForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels, pixel_labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
|
MobileNetV1ModelTester
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/engine/base_layer.py
|
{
"start": 123419,
"end": 128548
}
|
class ____(Layer):
"""Wraps a TensorFlow Operation in a Layer.
This class is used internally by the Functional API. When a user
uses a raw TensorFlow Operation on symbolic tensors originating
from an `Input` Layer, the resultant operation will be wrapped
with this Layer object in order to make the operation compatible
with the Keras API.
This Layer will create a new, identical operation (except for inputs
and outputs) every time it is called. If `run_eagerly` is `True`,
the op creation and calculation will happen inside an Eager function.
Instances of this Layer are created when `autolambda` is called, which
is whenever a Layer's `__call__` encounters symbolic inputs that do
not have Keras metadata, or when a Network's `__init__` encounters
outputs that do not have Keras metadata.
Attributes:
node_def: String, the serialized NodeDef of the Op this layer will wrap.
name: String, the name of the Layer.
constants: Dict of NumPy arrays, the values of any Tensors needed for this
Operation that do not originate from a Keras `Input` Layer. Since all
placeholders must come from Keras `Input` Layers, these Tensors must be
treated as constant in the Functional API.
trainable: Bool, whether this Layer is trainable. Currently Variables are
not supported, and so this parameter has no effect.
dtype: The default dtype of this Layer. Inherited from `Layer` and has no
effect on this class, however is used in `get_config`.
"""
@trackable.no_automatic_dependency_tracking
def __init__(self,
node_def,
name,
constants=None,
trainable=True,
dtype=None):
# Pass autocast=False, as if inputs are cast, input types might not match
# Operation type.
super(TensorFlowOpLayer, self).__init__(
name=_TF_OP_LAYER_NAME_PREFIX + name, trainable=trainable, dtype=dtype,
autocast=False)
if isinstance(node_def, dict):
self.node_def = json_format.ParseDict(node_def, node_def_pb2.NodeDef())
else:
if not isinstance(node_def, bytes):
node_def = node_def.encode('utf-8')
self.node_def = node_def_pb2.NodeDef.FromString(node_def)
# JSON serialization stringifies keys which are integer input indices.
self.constants = ({
int(index): constant for index, constant in constants.items()
} if constants is not None else {})
# Layer uses original op unless it is called on new inputs.
# This means `built` is not set in `__call__`.
self.built = True
# Do not individually trace TensorflowOpLayers in the SavedModel.
self._must_restore_from_config = True
def call(self, inputs):
if context.executing_eagerly():
return self._defun_call(inputs)
return self._make_op(inputs)
def _make_node_def(self, graph):
node_def = node_def_pb2.NodeDef()
node_def.CopyFrom(self.node_def)
# Used in TPUReplicateContext to indicate whether this node has been cloned
# and to not add TPU attributes.
node_def.attr['_cloned'].b = True
node_def.name = graph.unique_name(node_def.name)
return node_def
def _make_op(self, inputs):
inputs = nest.flatten(inputs)
graph = inputs[0].graph
node_def = self._make_node_def(graph)
with graph.as_default():
for index, constant in self.constants.items():
# Recreate constant in graph to add distribution context.
value = tensor_util.constant_value(constant)
if value is not None:
constant = constant_op.constant(value, name=node_def.input[index])
inputs.insert(index, constant)
# TODO(b/183990973): We should drop or consolidate these private api calls
# for adding an op to the graph and recording its gradient.
c_op = ops._create_c_op(graph, node_def, inputs, control_inputs=[])
op = graph._create_op_from_tf_operation(c_op)
op._control_flow_post_processing()
# Record the gradient because custom-made ops don't go through the
# code-gen'd eager call path
op_type = compat.as_str(op.op_def.name)
attr_names = [compat.as_str(attr.name) for attr in op.op_def.attr]
attrs = []
for attr_name in attr_names:
attrs.append(attr_name)
attrs.append(op.get_attr(attr_name))
attrs = tuple(attrs)
backprop.record_gradient(op_type, op.inputs, attrs, op.outputs)
if len(op.outputs) == 1:
return op.outputs[0]
return op.outputs
@def_function.function
def _defun_call(self, inputs):
"""Wraps the op creation method in an Eager function for `run_eagerly`."""
return self._make_op(inputs)
def get_config(self):
config = super(TensorFlowOpLayer, self).get_config()
config.update({
# `__init__` prefixes the name. Revert to the constructor argument.
'name': config['name'][len(_TF_OP_LAYER_NAME_PREFIX):],
'node_def': json_format.MessageToDict(self.node_def),
'constants': {
i: backend.get_value(c) for i, c in self.constants.items()
}
})
return config
|
TensorFlowOpLayer
|
python
|
celery__celery
|
t/unit/tasks/test_stamping.py
|
{
"start": 2492,
"end": 4291
}
|
class ____(StampingVisitor):
def on_signature(self, actual_sig: Signature, **headers) -> dict:
return {
"on_signature": ["ListStampingVisitor: on_signature-item1", "ListStampingVisitor: on_signature-item2"]
}
def on_group_start(self, actual_sig: Signature, **headers) -> dict:
return {
"on_group_start": [
"ListStampingVisitor: on_group_start-item1",
"ListStampingVisitor: on_group_start-item2",
]
}
def on_chain_start(self, actual_sig: Signature, **headers) -> dict:
return {
"on_chain_start": [
"ListStampingVisitor: on_chain_start-item1",
"ListStampingVisitor: on_chain_start-item2",
]
}
def on_chord_header_start(self, actual_sig: Signature, **header) -> dict:
s = super().on_chord_header_start(actual_sig, **header)
s.update(
{
"on_chord_header_start": [
"ListStampingVisitor: on_chord_header_start-item1",
"ListStampingVisitor: on_chord_header_start-item2",
]
}
)
return s
def on_chord_body(self, actual_sig: Signature, **header) -> dict:
return {
"on_chord_body": ["ListStampingVisitor: on_chord_body-item1", "ListStampingVisitor: on_chord_body-item2"]
}
def on_callback(self, actual_sig: Signature, **header) -> dict:
return {"on_callback": ["ListStampingVisitor: on_callback-item1", "ListStampingVisitor: on_callback-item2"]}
def on_errback(self, actual_sig: Signature, **header) -> dict:
return {"on_errback": ["ListStampingVisitor: on_errback-item1", "ListStampingVisitor: on_errback-item2"]}
|
ListStampingVisitor
|
python
|
kamyu104__LeetCode-Solutions
|
Python/replace-elements-in-an-array.py
|
{
"start": 72,
"end": 515
}
|
class ____(object):
def arrayChange(self, nums, operations):
"""
:type nums: List[int]
:type operations: List[List[int]]
:rtype: List[int]
"""
lookup = {x:i for i, x in enumerate(nums)}
for x, y in operations:
lookup[y] = lookup.pop(x)
for x, i in lookup.iteritems():
nums[i] = x
return nums
# Time: O(n + m)
# Space: O(n)
# hash table
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py
|
{
"start": 159322,
"end": 163108
}
|
class ____(Qwen3OmniMoePreTrainedModel):
_can_record_outputs = {
"hidden_states": Qwen3OmniMoeCode2WavTransformerLayer,
"attentions": Qwen3OmniMoeCode2WavAttention,
}
def __init__(self, config: Qwen3OmniMoeCode2WavConfig):
super().__init__(config)
self.layers = nn.ModuleList(
[Qwen3OmniMoeCode2WavTransformerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Qwen3OmniMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Qwen3OmniMoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.has_sliding_layers = "sliding_attention" in self.config.layer_types
self.window_size = config.sliding_window
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
cache_position=None,
**kwargs,
) -> BaseModelOutputWithPast:
if input_ids is not None:
raise ValueError("input_ids is not expected")
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
}
# The sliding window alternating layers are not always activated depending on the config
if self.has_sliding_layers:
causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
|
Qwen3OmniMoeCode2WavTransformerModel
|
python
|
pydata__xarray
|
asv_bench/benchmarks/indexing.py
|
{
"start": 3773,
"end": 4253
}
|
class ____(Base):
@parameterized(["key"], [list(basic_indexes.keys())])
def time_indexing_basic(self, key):
self.ds.isel(**basic_indexes[key])
@parameterized(["key"], [list(outer_indexes.keys())])
def time_indexing_outer(self, key):
self.ds.isel(**outer_indexes[key])
@parameterized(["key"], [list(big_vectorized_indexes.keys())])
def time_indexing_big_vectorized(self, key):
self.ds.isel(**big_vectorized_indexes[key])
|
IndexingOnly
|
python
|
pytorch__pytorch
|
test/test_autograd.py
|
{
"start": 423108,
"end": 464098
}
|
class ____(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor(
[1.0, 0.0, 1.0, 0.0, 1.0, 0.0], device=device, requires_grad=True
)
x2 = torch.tensor(
[float("nan"), float("nan"), float("nan")], requires_grad=True
)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.0)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_scatter_index_reduce_amin_amax_backprops_to_all_values(self, device):
# tests that gradients are evenly distributed when there are multiple max/min values
# tested here instead of adding a SampleInput as the backward for this case is non-differentiable for gradgrad
# as is the case for test_min_max_median_backprops_to_all_values above
fns = (torch.scatter_reduce, torch.index_reduce)
reduces = ("amin", "amax")
for fn, reduction in product(fns, reduces):
input = torch.randn(
(2, 3), device=device, dtype=torch.float64, requires_grad=True
)
src = input.clone().detach_().requires_grad_(True)
idx = torch.arange(2).to(dtype=torch.long, device=device)
if fn == torch.scatter_reduce:
idx = idx.unsqueeze(-1).expand((2, 3))
gradcheck(fn, (input, 0, idx, src, reduction), check_batched_grad=False)
def test_scatter_index_reduce_prod_gradgrad_error(self, device):
# test that double backward raises an error for the case where 2 zeros in src
# are scattered to the same position in self
input = torch.tensor(
[1.0], device=device, dtype=torch.float64, requires_grad=True
)
src = torch.tensor(
[0.0, 0.0], device=device, dtype=torch.float64, requires_grad=True
)
idx = torch.tensor([0, 0], device=device, dtype=torch.long)
for fn in (torch.scatter_reduce, torch.index_reduce):
# check that this case passes on gradcheck
gradcheck(fn, (input, 0, idx, src, "prod"), check_batched_grad=False)
with self.assertRaisesRegex(
RuntimeError, "Double backward is unsupported for"
):
gradgradcheck(fn, (input, 0, idx, src, "prod"))
@skipIfMPS # the test doesn't work on MPS as double types are not supported
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for _ in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@skipIfMPS # the test doesn't work on MPS as double types are not supported
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(
v_size, dtype=torch.double, device=device, requires_grad=True
)
other = self.genSparseTensor(
size, sparse_dim, nnz, is_uncoalesced=True, device=device, dtype=dtype
)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(
torch.ones_like(other._values())
)
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@skipMeta
@skipIfMPS
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
(saved_grad_x,) = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([[0, 3, 4], [0, 2, 2]], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([[0, 1, 3, 4], [0, 1, 2, 2]], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(
fn.apply(x, sparse_grad1)
+ fn.apply(x, dense_grad)
+ fn.apply(x, sparse_grad2)
).sum().abs().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(
fn.apply(x, dense_grad)
+ fn.apply(x, sparse_grad1)
+ fn.apply(x, sparse_grad2)
).sum().abs().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().abs().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
@skipIfMPS
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
@skipIfMPS # the test doesn't work on MPS as double types are not supported
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, bool(tensor))
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfMPS(torch.float32)
@dtypesIfCUDA(
torch.half,
torch.float,
torch.double,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
)
@dtypes(
torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64
)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(
RuntimeError,
"floating point",
msg=f"dt: {a.dtype} device: {a.device}",
):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = 1 << 16
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(
memory_format=torch.channels_last
)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for _ in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
def _get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
before = _get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = _get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = _get_cuda_memory_usage()
self.assertEqual(before, after)
@skipIfMPS # the test doesn't work on MPS
# TODO: see if these tests can be ported to OpInfos or moved to where's test suite
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
@skipIfMPS # the test doesn't work on MPS
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.0
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@unittest.skipIf(not torch.profiler.itt.is_available(), "ITT is required")
def test_profiler_emit_itt(self, device):
# This test is not intended to ensure correctness of itt ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_itt breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with emit_itt():
a.add(1.0)
@skipIfMPS # the test doesn't work as randn is not supported with type long
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong type raises
with self.assertRaisesRegex(TypeError, "expected to be a Tensor or None"):
x.grad = 0
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != "cpu":
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == "cuda":
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@dtypesIfMPS(torch.float32)
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(
x, dtype=dtype, device=devices[0], requires_grad=requires_grad
)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = floating_types_and(torch.half, torch.bfloat16)
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
def test_copy_forward_ad_broadcasting(self, device):
# copy_ allows the src to have a different shape from self as long as src is
# broadcastable to self. Make sure forward AD handles this case.
primal = torch.rand(3, 3, device=device)
tangent = torch.rand(3, 3, device=device)
non_dual = torch.rand(1, 3, 3, device=device)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
non_dual.copy_(dual)
def test_copy_forward_ad_same_layout_copies_grad(self, device):
primal = torch.tensor([[3.0], [4.0]], device=device)
tangent = torch.tensor([[5.0], [6.0]], device=device)
with fwAD.dual_level():
x_dual = fwAD.make_dual(primal, tangent)
non_dual = torch.tensor([[1.0], [2.0]])
non_dual.copy_(x_dual)
self.assertTrue(fwAD.unpack_dual(non_dual).tangent is not tangent)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param**2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param**2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishes on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishes on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishes on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp, use_reentrant=True)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_on_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_on_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_on_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
@skipIfMPS # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
@skipIfMPS # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(
a.size(), dtype=torch.double, device=device, requires_grad=True
)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_multiple_outputs(self, device):
root = torch.arange(9.0, dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
@skipIfMPS # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_of_multiple_output_view(self, device):
a = torch.rand(
10, dtype=torch.double, device=device, requires_grad=True
).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
@skipIfMPS # MPS backend doesn't support double types
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(
10, dtype=torch.double, device=device, requires_grad=True
).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
@skipIfMPS # MPS backend doesn't support double types
def test_inplace_on_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(
a.size(), dtype=torch.double, device=device, requires_grad=True
)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2.0, 5.0], device=device, requires_grad=False)
b = torch.tensor([3.0], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
@skipIfMPS # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
@skipIfMPS # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(
a.size(), dtype=torch.double, device=device, requires_grad=True
)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_on_view_multi_output_unsafe(self, device):
for f in [
lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3),
]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_on_view_multi_output_safe(self, device):
for f in [
lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3),
]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = (
"This view is the output of a function that returns multiple views."
)
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
def test_inplace_on_view_undefined_grad_output(self, device):
a = torch.tensor([1.0], requires_grad=True)
c = a.clone()
v = c[:]
b = torch.tensor(1.0, requires_grad=True)
class InplaceFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x, other):
ctx.mark_dirty(x)
return x.mul_(2)
@staticmethod
def backward(ctx, grad):
return grad * 2, None
out = InplaceFunc.apply(v, b)
out.backward()
self.assertIsNone(b.grad)
self.assertEqual(a.grad.item(), 2)
@skipIfMPS # the test doesn't work on MPS as double types are not supported
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(
torch.rand((2, 2), device=device)
)
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
@skipIfMPS
def test_copy_r_to_c(self, device):
out_c = torch.empty(3, 2, dtype=torch.cdouble, device=device)
inp_r = torch.randn(3, 2, dtype=torch.double, device=device, requires_grad=True)
def do_test():
out_c.copy_(inp_r)
out_c_inter = out_c.sum()
out_c_inter.abs().backward()
with torch.no_grad():
self.assertEqual(
inp_r.grad, torch.ones_like(inp_r) * torch.sgn(out_c_inter).real
)
self.assertNotWarn(do_test)
def test_to_r_to_c(self, device):
def do_test():
inp_r = torch.randn(
3, 2, dtype=torch.double, device=device, requires_grad=True
)
out = inp_r.to(torch.complex128)
out_inter = out.sum()
out_inter.abs().backward()
with torch.no_grad():
self.assertEqual(
inp_r.grad, torch.ones_like(inp_r) * torch.sgn(out_inter).real
)
self.assertNotWarn(do_test)
def test_non_differentiable_ops(self, device):
# Just make sure the op doesn't raise an error
# and resulting tensor has requires_grad=False.
x = torch.tensor([[1, 2], [3, 4.0]], requires_grad=True, device=device)
out = torch.isin(x, torch.tensor([2, 3], device=device))
self.assertFalse(out.requires_grad)
x = torch.randn(3, 3, requires_grad=True)
out = torch.signbit(x)
self.assertFalse(out.requires_grad)
def test_warning_in_backward(self, device):
# Test warning during backward are always propagated as python warnings (gh-50209)
# NOTE: For device=cuda, warning gets propagated from a worker thread
a = torch.zeros((), device=device, requires_grad=True)
b = torch._C._nn._test_warn_in_autograd(a)
with self.assertWarnsRegex(UserWarning, "Warn from backward"):
b.backward()
def test_complex_scalar_backward(self, device):
a = torch.zeros(1, device=device, requires_grad=True)
b = a * 0.5j
msg = "grad can be implicitly created only for real scalar outputs"
with self.assertRaisesRegex(RuntimeError, msg):
b.backward()
with self.assertRaisesRegex(RuntimeError, msg):
torch.autograd.grad(b, a)
def test_pow_real_negative_base_complex_exponent(self, device):
# OpInfo doesn't naturally support input of mixed types, hence this test here.
base = -torch.ones(2, device=device, dtype=torch.double)
exponent = torch.randn(
2, device=device, dtype=torch.cdouble, requires_grad=True
)
def fn(exponent):
return torch.pow(base, exponent)
torch.autograd.gradcheck(fn, (exponent,))
def fn(exponent):
return torch.pow(-1, exponent)
torch.autograd.gradcheck(fn, (exponent,))
def test_resize_version_bump(self, device):
x = torch.rand((1,), device=device)
y = torch.randn((3,), device=device)
x.resize_((1, 2))
self.assertEqual(x._version, 1)
x.resize_as_(y)
self.assertEqual(x._version, 2)
# In the following cases, `resize` is no-op,
# so no version bumps.
x.resize_((3,))
self.assertEqual(x._version, 2)
x.resize_as_(y)
self.assertEqual(x._version, 2)
@unittest.skipIf(not torch.accelerator.is_available(), "requires accelerator")
def test_zero_dim_param_mixed_device_grad(self, device):
# cpu 0-dim params with an accelerator device grad
# https://github.com/pytorch/pytorch/issues/160084
class RegressionModel(torch.nn.Module):
def __init__(self, a=0, b=0):
super().__init__()
self.a = torch.nn.Parameter(torch.tensor(a).float())
self.b = torch.nn.Parameter(torch.tensor(b).float())
def forward(self, x):
return x * self.a + self.b
# Keep the model on cpu as we do want to test the mixed cpu/accelerator behavior here
model = RegressionModel()
inputs = torch.randn(4, 10, device=device)
out = model(inputs)
out.sum().backward()
self.assertIsNotNone(model.a.grad)
self.assertIsNotNone(model.b.grad)
self.assertEqual(model.a.grad.device, torch.device("cpu"))
self.assertEqual(model.b.grad.device, torch.device("cpu"))
|
TestAutogradDeviceType
|
python
|
spack__spack
|
lib/spack/spack/compilers/libraries.py
|
{
"start": 12985,
"end": 13415
}
|
class ____:
"""Base class for compiler output cache. Default implementation does not cache anything."""
def value(self, compiler: spack.spec.Spec) -> Dict[str, Optional[str]]:
return {"c_compiler_output": CompilerPropertyDetector(compiler)._compile_dummy_c_source()}
def get(self, compiler: spack.spec.Spec) -> CompilerCacheEntry:
return CompilerCacheEntry.from_dict(self.value(compiler))
|
CompilerCache
|
python
|
scipy__scipy
|
scipy/fft/tests/test_helper.py
|
{
"start": 17814,
"end": 18682
}
|
class ____:
def test_definition(self, xp):
x = xp.asarray([0, 1, 2, 3, 4, -4, -3, -2, -1], dtype=xp.float64)
x2 = xp.asarray([0, 1, 2, 3, 4, -5, -4, -3, -2, -1], dtype=xp.float64)
# default dtype varies across backends
y = 9 * fft.fftfreq(9, xp=xp)
xp_assert_close(y, x, check_dtype=False, check_namespace=True)
y = 9 * xp.pi * fft.fftfreq(9, xp.pi, xp=xp)
xp_assert_close(y, x, check_dtype=False)
y = 10 * fft.fftfreq(10, xp=xp)
xp_assert_close(y, x2, check_dtype=False)
y = 10 * xp.pi * fft.fftfreq(10, xp.pi, xp=xp)
xp_assert_close(y, x2, check_dtype=False)
def test_device(self, xp, devices):
for d in devices:
y = fft.fftfreq(9, xp=xp, device=d)
x = xp.empty(0, device=d)
assert xp_device(y) == xp_device(x)
|
TestFFTFreq
|
python
|
chroma-core__chroma
|
chromadb/execution/executor/local.py
|
{
"start": 1356,
"end": 7520
}
|
class ____(Executor):
_manager: LocalSegmentManager
def __init__(self, system: System):
super().__init__(system)
self._manager = self.require(LocalSegmentManager)
@overrides
def count(self, plan: CountPlan) -> int:
return self._metadata_segment(plan.scan.collection).count(plan.scan.version)
@overrides
def get(self, plan: GetPlan) -> GetResult:
records = self._metadata_segment(plan.scan.collection).get_metadata(
request_version_context=plan.scan.version,
where=plan.filter.where,
where_document=plan.filter.where_document,
ids=plan.filter.user_ids,
limit=plan.limit.limit,
offset=plan.limit.offset,
include_metadata=True,
)
ids = [r["id"] for r in records]
embeddings = None
documents = None
uris = None
metadatas = None
included = list()
if plan.projection.embedding:
if len(records) > 0:
vectors = self._vector_segment(plan.scan.collection).get_vectors(
ids=ids, request_version_context=plan.scan.version
)
embeddings = [v["embedding"] for v in vectors]
else:
embeddings = list()
included.append("embeddings")
if plan.projection.document:
documents = [_doc(r["metadata"]) for r in records]
included.append("documents")
if plan.projection.uri:
uris = [_uri(r["metadata"]) for r in records]
included.append("uris")
if plan.projection.metadata:
metadatas = [_clean_metadata(r["metadata"]) for r in records]
included.append("metadatas")
# TODO: Fix typing
return GetResult(
ids=ids,
embeddings=embeddings,
documents=documents, # type: ignore[typeddict-item]
uris=uris, # type: ignore[typeddict-item]
data=None,
metadatas=metadatas, # type: ignore[typeddict-item]
included=included,
)
@overrides
def knn(self, plan: KNNPlan) -> QueryResult:
prefiltered_ids = None
if plan.filter.user_ids or plan.filter.where or plan.filter.where_document:
records = self._metadata_segment(plan.scan.collection).get_metadata(
request_version_context=plan.scan.version,
where=plan.filter.where,
where_document=plan.filter.where_document,
ids=plan.filter.user_ids,
limit=None,
offset=0,
include_metadata=False,
)
prefiltered_ids = [r["id"] for r in records]
knns: Sequence[Sequence[VectorQueryResult]] = [[]] * len(plan.knn.embeddings)
# Query vectors only when the user did not specify a filter or when the filter
# yields non-empty ids. Otherwise, the user specified a filter but it yields
# no matching ids, in which case we can return an empty result.
if prefiltered_ids is None or len(prefiltered_ids) > 0:
query = VectorQuery(
vectors=plan.knn.embeddings,
k=plan.knn.fetch,
allowed_ids=prefiltered_ids,
include_embeddings=plan.projection.embedding,
options=None,
request_version_context=plan.scan.version,
)
knns = self._vector_segment(plan.scan.collection).query_vectors(query)
ids = [[r["id"] for r in result] for result in knns]
embeddings = None
documents = None
uris = None
metadatas = None
distances = None
included = list()
if plan.projection.embedding:
embeddings = [[r["embedding"] for r in result] for result in knns]
included.append("embeddings")
if plan.projection.rank:
distances = [[r["distance"] for r in result] for result in knns]
included.append("distances")
if plan.projection.document or plan.projection.metadata or plan.projection.uri:
merged_ids = list(set([id for result in ids for id in result]))
hydrated_records = self._metadata_segment(
plan.scan.collection
).get_metadata(
request_version_context=plan.scan.version,
where=None,
where_document=None,
ids=merged_ids,
limit=None,
offset=0,
include_metadata=True,
)
metadata_by_id = {r["id"]: r["metadata"] for r in hydrated_records}
if plan.projection.document:
documents = [
[_doc(metadata_by_id.get(id, None)) for id in result]
for result in ids
]
included.append("documents")
if plan.projection.uri:
uris = [
[_uri(metadata_by_id.get(id, None)) for id in result]
for result in ids
]
included.append("uris")
if plan.projection.metadata:
metadatas = [
[_clean_metadata(metadata_by_id.get(id, None)) for id in result]
for result in ids
]
included.append("metadatas")
# TODO: Fix typing
return QueryResult(
ids=ids,
embeddings=embeddings, # type: ignore[typeddict-item]
documents=documents, # type: ignore[typeddict-item]
uris=uris, # type: ignore[typeddict-item]
data=None,
metadatas=metadatas, # type: ignore[typeddict-item]
distances=distances,
included=included,
)
def _metadata_segment(self, collection: Collection) -> MetadataReader:
return self._manager.get_segment(collection.id, MetadataReader)
def _vector_segment(self, collection: Collection) -> VectorReader:
return self._manager.get_segment(collection.id, VectorReader)
|
LocalExecutor
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-posthog/source_posthog/components.py
|
{
"start": 500,
"end": 2122
}
|
class ____(SimpleRetriever):
def __post_init__(self, parameters: Mapping[str, Any]):
super().__post_init__(parameters)
self.cursor = self.stream_slicer if isinstance(self.stream_slicer, Cursor) else None
def request_params(
self,
stream_state: StreamSlice,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
"""Events API return records in descendent order (newest first).
Default page limit is 100 items.
Even though API mentions such pagination params as 'limit' and 'offset', they are actually ignored.
Instead, response contains 'next' url with datetime range for next OLDER records, like:
response:
{
"next": "https://app.posthog.com/api/projects/2331/events?after=2021-01-01T00%3A00%3A00.000000Z&before=2021-05-29T16%3A44%3A43.175000%2B00%3A00",
"results": [
{id ...},
{id ...},
]
}
So if next_page_token is set (contains 'after'/'before' params),
then stream_slice params ('after'/'before') should be ignored.
"""
if next_page_token:
stream_slice = {}
return self._get_request_options(
stream_slice,
next_page_token,
self.requester.get_request_params,
self.paginator.get_request_params,
self.stream_slicer.get_request_params,
self.requester.get_authenticator().get_request_body_json,
)
@dataclass
|
EventsSimpleRetriever
|
python
|
sympy__sympy
|
sympy/stats/crv_types.py
|
{
"start": 16814,
"end": 18834
}
|
class ____(SingleContinuousDistribution):
_argnames = ('alpha', 'left', 'right')
@property
def set(self):
return Interval(self.left, self.right)
@staticmethod
def check(alpha, left, right):
_value_check (alpha.is_positive, "Shape must be positive.")
_value_check (left.is_positive, "Left value should be positive.")
_value_check (right > left, "Right should be greater than left.")
def pdf(self, x):
alpha, left, right = self.alpha, self.left, self.right
num = alpha * (left**alpha) * x**(- alpha -1)
den = 1 - (left/right)**alpha
return num/den
def BoundedPareto(name, alpha, left, right):
r"""
Create a continuous random variable with a Bounded Pareto distribution.
The density of the Bounded Pareto distribution is given by
.. math::
f(x) := \frac{\alpha L^{\alpha}x^{-\alpha-1}}{1-(\frac{L}{H})^{\alpha}}
Parameters
==========
alpha : Real Number, `\alpha > 0`
Shape parameter
left : Real Number, `left > 0`
Location parameter
right : Real Number, `right > left`
Location parameter
Examples
========
>>> from sympy.stats import BoundedPareto, density, cdf, E
>>> from sympy import symbols
>>> L, H = symbols('L, H', positive=True)
>>> X = BoundedPareto('X', 2, L, H)
>>> x = symbols('x')
>>> density(X)(x)
2*L**2/(x**3*(1 - L**2/H**2))
>>> cdf(X)(x)
Piecewise((-H**2*L**2/(x**2*(H**2 - L**2)) + H**2/(H**2 - L**2), L <= x), (0, True))
>>> E(X).simplify()
2*H*L/(H + L)
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Pareto_distribution#Bounded_Pareto_distribution
"""
return rv (name, BoundedParetoDistribution, (alpha, left, right))
# ------------------------------------------------------------------------------
# Cauchy distribution ----------------------------------------------------------
|
BoundedParetoDistribution
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_run_launcher.py
|
{
"start": 10669,
"end": 13609
}
|
class ____(BaseTestSuite):
def test_launch_multiple_runs_success_and_failure(
self, graphql_context: WorkspaceRequestContext
):
launchSuccessExecutionParams = [
{
"selector": {
"repositoryLocationName": "test_location",
"repositoryName": "test_repo",
"pipelineName": "no_config_job",
"solidSelection": None,
"assetSelection": None,
"assetCheckSelection": None,
},
"mode": "default",
},
{
"selector": {
"repositoryLocationName": "test_location",
"repositoryName": "test_repo",
"pipelineName": "no_config_job",
"solidSelection": None,
"assetSelection": None,
"assetCheckSelection": None,
},
"mode": "default",
},
]
pipelineNotFoundExecutionParams = [
{
"selector": {
"repositoryLocationName": "test_location",
"repositoryName": "test_dict_repo",
"pipelineName": "no_config_job",
"solidSelection": None,
"assetSelection": None,
"assetCheckSelection": None,
},
"mode": "default",
},
{
"selector": {
"repositoryLocationName": "test_location",
"repositoryName": "test_dict_repo",
"pipelineName": "no_config_job",
"solidSelection": None,
"assetSelection": None,
"assetCheckSelection": None,
},
"mode": "default",
},
]
executionParamsList = [executionParams for executionParams in launchSuccessExecutionParams]
executionParamsList.extend(
[executionParams for executionParams in pipelineNotFoundExecutionParams]
)
result: GqlResult = execute_dagster_graphql(
context=graphql_context,
query=LAUNCH_MULTIPLE_RUNS_MUTATION,
variables={"executionParamsList": executionParamsList},
)
assert "launchMultipleRuns" in result.data
result_data = result.data["launchMultipleRuns"]
assert result_data["__typename"] == "LaunchMultipleRunsResult"
results = result_data["launchMultipleRunsResult"]
assert len(results) == 4
assert results[0]["__typename"] == "LaunchRunSuccess"
assert results[1]["__typename"] == "LaunchRunSuccess"
assert results[2]["__typename"] == "PipelineNotFoundError"
assert results[3]["__typename"] == "PipelineNotFoundError"
|
TestSuccessAndFailureMultipleLaunch
|
python
|
mlflow__mlflow
|
mlflow/gateway/schemas/completions.py
|
{
"start": 2023,
"end": 2287
}
|
class ____(ResponseModel):
id: str | None = None
object: str = "text_completion_chunk"
created: int
model: str
choices: list[StreamChoice]
model_config = ConfigDict(json_schema_extra=_STREAM_RESPONSE_PAYLOAD_EXTRA_SCHEMA)
|
StreamResponsePayload
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/monitored_session_test.py
|
{
"start": 24023,
"end": 24358
}
|
class ____(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
|
StopAtNSession
|
python
|
walkccc__LeetCode
|
solutions/1652. Defuse the Bomb/1652.py
|
{
"start": 0,
"end": 500
}
|
class ____:
def decrypt(self, code: list[int], k: int) -> list[int]:
n = len(code)
ans = [0] * n
if k == 0:
return ans
summ = 0
start = 1 if k > 0 else n + k # the start of the next k numbers
end = k if k > 0 else n - 1 # the end of the next k numbers
for i in range(start, end + 1):
summ += code[i]
for i in range(n):
ans[i] = summ
summ -= code[start % n]
start += 1
end += 1
summ += code[end % n]
return ans
|
Solution
|
python
|
wandb__wandb
|
wandb/automations/scopes.py
|
{
"start": 518,
"end": 683
}
|
class ____(LenientStrEnum):
"""The kind of scope that triggers an automation."""
PROJECT = "PROJECT"
ARTIFACT_COLLECTION = "ARTIFACT_COLLECTION"
|
ScopeType
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_dlp.py
|
{
"start": 9798,
"end": 10557
}
|
class ____:
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_delete_dlp_job(self, mock_hook):
mock_hook.return_value.delete_dlp_job.return_value = mock.MagicMock()
operator = CloudDLPDeleteDLPJobOperator(dlp_job_id=DLP_JOB_ID, project_id=PROJECT_ID, task_id="id")
operator.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_dlp_job.assert_called_once_with(
dlp_job_id=DLP_JOB_ID,
project_id=PROJECT_ID,
retry=DEFAULT,
timeout=None,
metadata=(),
)
|
TestCloudDLPDeleteDlpJobOperator
|
python
|
pandas-dev__pandas
|
pandas/tests/reshape/test_melt.py
|
{
"start": 943,
"end": 19650
}
|
class ____:
def test_top_level_method(self, df):
result = melt(df)
assert result.columns.tolist() == ["variable", "value"]
def test_method_signatures(self, df, df1, var_name, value_name):
tm.assert_frame_equal(df.melt(), melt(df))
tm.assert_frame_equal(
df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]),
melt(df, id_vars=["id1", "id2"], value_vars=["A", "B"]),
)
tm.assert_frame_equal(
df.melt(var_name=var_name, value_name=value_name),
melt(df, var_name=var_name, value_name=value_name),
)
tm.assert_frame_equal(df1.melt(col_level=0), melt(df1, col_level=0))
def test_default_col_names(self, df):
result = df.melt()
assert result.columns.tolist() == ["variable", "value"]
result1 = df.melt(id_vars=["id1"])
assert result1.columns.tolist() == ["id1", "variable", "value"]
result2 = df.melt(id_vars=["id1", "id2"])
assert result2.columns.tolist() == ["id1", "id2", "variable", "value"]
def test_value_vars(self, df):
result3 = df.melt(id_vars=["id1", "id2"], value_vars="A")
assert len(result3) == 10
result4 = df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"])
expected4 = DataFrame(
{
"id1": df["id1"].tolist() * 2,
"id2": df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (df["A"].tolist() + df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
tm.assert_frame_equal(result4, expected4)
@pytest.mark.parametrize("type_", (tuple, list, np.array))
def test_value_vars_types(self, type_, df):
# GH 15348
expected = DataFrame(
{
"id1": df["id1"].tolist() * 2,
"id2": df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (df["A"].tolist() + df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
result = df.melt(id_vars=["id1", "id2"], value_vars=type_(("A", "B")))
tm.assert_frame_equal(result, expected)
def test_vars_work_with_multiindex(self, df1):
expected = DataFrame(
{
("A", "a"): df1[("A", "a")],
"CAP": ["B"] * len(df1),
"low": ["b"] * len(df1),
"value": df1[("B", "b")],
},
columns=[("A", "a"), "CAP", "low", "value"],
)
result = df1.melt(id_vars=[("A", "a")], value_vars=[("B", "b")])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"id_vars, value_vars, col_level, expected",
[
(
["A"],
["B"],
0,
{
"A": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"CAP": {0: "B", 1: "B", 2: "B"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
},
),
(
["a"],
["b"],
1,
{
"a": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"low": {0: "b", 1: "b", 2: "b"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
},
),
],
)
def test_single_vars_work_with_multiindex(
self, id_vars, value_vars, col_level, expected, df1
):
result = df1.melt(id_vars, value_vars, col_level=col_level)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"id_vars, value_vars",
[
[("A", "a"), [("B", "b")]],
[[("A", "a")], ("B", "b")],
[("A", "a"), ("B", "b")],
],
)
def test_tuple_vars_fail_with_multiindex(self, id_vars, value_vars, df1):
# melt should fail with an informative error message if
# the columns have a MultiIndex and a tuple is passed
# for id_vars or value_vars.
msg = r"(id|value)_vars must be a list of tuples when columns are a MultiIndex"
with pytest.raises(ValueError, match=msg):
df1.melt(id_vars=id_vars, value_vars=value_vars)
def test_custom_var_name(self, df, var_name):
result5 = df.melt(var_name=var_name)
assert result5.columns.tolist() == ["var", "value"]
result6 = df.melt(id_vars=["id1"], var_name=var_name)
assert result6.columns.tolist() == ["id1", "var", "value"]
result7 = df.melt(id_vars=["id1", "id2"], var_name=var_name)
assert result7.columns.tolist() == ["id1", "id2", "var", "value"]
result8 = df.melt(id_vars=["id1", "id2"], value_vars="A", var_name=var_name)
assert result8.columns.tolist() == ["id1", "id2", "var", "value"]
result9 = df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], var_name=var_name
)
expected9 = DataFrame(
{
"id1": df["id1"].tolist() * 2,
"id2": df["id2"].tolist() * 2,
var_name: ["A"] * 10 + ["B"] * 10,
"value": (df["A"].tolist() + df["B"].tolist()),
},
columns=["id1", "id2", var_name, "value"],
)
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self, df, value_name):
result10 = df.melt(value_name=value_name)
assert result10.columns.tolist() == ["variable", "val"]
result11 = df.melt(id_vars=["id1"], value_name=value_name)
assert result11.columns.tolist() == ["id1", "variable", "val"]
result12 = df.melt(id_vars=["id1", "id2"], value_name=value_name)
assert result12.columns.tolist() == ["id1", "id2", "variable", "val"]
result13 = df.melt(
id_vars=["id1", "id2"], value_vars="A", value_name=value_name
)
assert result13.columns.tolist() == ["id1", "id2", "variable", "val"]
result14 = df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], value_name=value_name
)
expected14 = DataFrame(
{
"id1": df["id1"].tolist() * 2,
"id2": df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
value_name: (df["A"].tolist() + df["B"].tolist()),
},
columns=["id1", "id2", "variable", value_name],
)
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self, df, value_name, var_name):
result15 = df.melt(var_name=var_name, value_name=value_name)
assert result15.columns.tolist() == ["var", "val"]
result16 = df.melt(id_vars=["id1"], var_name=var_name, value_name=value_name)
assert result16.columns.tolist() == ["id1", "var", "val"]
result17 = df.melt(
id_vars=["id1", "id2"], var_name=var_name, value_name=value_name
)
assert result17.columns.tolist() == ["id1", "id2", "var", "val"]
result18 = df.melt(
id_vars=["id1", "id2"],
value_vars="A",
var_name=var_name,
value_name=value_name,
)
assert result18.columns.tolist() == ["id1", "id2", "var", "val"]
result19 = df.melt(
id_vars=["id1", "id2"],
value_vars=["A", "B"],
var_name=var_name,
value_name=value_name,
)
expected19 = DataFrame(
{
"id1": df["id1"].tolist() * 2,
"id2": df["id2"].tolist() * 2,
var_name: ["A"] * 10 + ["B"] * 10,
value_name: (df["A"].tolist() + df["B"].tolist()),
},
columns=["id1", "id2", var_name, value_name],
)
tm.assert_frame_equal(result19, expected19)
df20 = df.copy()
df20.columns.name = "foo"
result20 = df20.melt()
assert result20.columns.tolist() == ["foo", "value"]
@pytest.mark.parametrize("col_level", [0, "CAP"])
def test_col_level(self, col_level, df1):
res = df1.melt(col_level=col_level)
assert res.columns.tolist() == ["CAP", "value"]
def test_multiindex(self, df1):
res = df1.melt()
assert res.columns.tolist() == ["CAP", "low", "value"]
@pytest.mark.parametrize(
"col",
[
date_range("2010", periods=5, tz="US/Pacific"),
pd.Categorical(["a", "b", "c", "a", "d"]),
[0, 1, 0, 0, 0],
],
)
def test_pandas_dtypes(self, col):
# GH 15785
col = pd.Series(col)
df = DataFrame(
{"klass": range(5), "col": col, "attr1": [1, 0, 0, 0, 0], "attr2": col}
)
expected_value = pd.concat([pd.Series([1, 0, 0, 0, 0]), col], ignore_index=True)
result = melt(
df, id_vars=["klass", "col"], var_name="attribute", value_name="value"
)
expected = DataFrame(
{
0: list(range(5)) * 2,
1: pd.concat([col] * 2, ignore_index=True),
2: ["attr1"] * 5 + ["attr2"] * 5,
3: expected_value,
}
)
expected.columns = ["klass", "col", "attribute", "value"]
tm.assert_frame_equal(result, expected)
def test_preserve_category(self):
# GH 15853
data = DataFrame({"A": [1, 2], "B": pd.Categorical(["X", "Y"])})
result = melt(data, ["B"], ["A"])
expected = DataFrame(
{"B": pd.Categorical(["X", "Y"]), "variable": ["A", "A"], "value": [1, 2]}
)
tm.assert_frame_equal(result, expected)
def test_melt_missing_columns_raises(self):
# GH-23575
# This test is to ensure that pandas raises an error if melting is
# attempted with column names absent from the dataframe
# Generate data
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 4)), columns=list("abcd")
)
# Try to melt with missing `value_vars` column name
msg = "The following id_vars or value_vars are not present in the DataFrame:"
with pytest.raises(KeyError, match=msg):
df.melt(["a", "b"], ["C", "d"])
# Try to melt with missing `id_vars` column name
with pytest.raises(KeyError, match=msg):
df.melt(["A", "b"], ["c", "d"])
# Multiple missing
with pytest.raises(
KeyError,
match=msg,
):
df.melt(["a", "b", "not_here", "or_there"], ["c", "d"])
# Multiindex melt fails if column is missing from multilevel melt
df.columns = [list("ABCD"), list("abcd")]
with pytest.raises(KeyError, match=msg):
df.melt([("E", "a")], [("B", "b")])
# Multiindex fails if column is missing from single level melt
with pytest.raises(KeyError, match=msg):
df.melt(["A"], ["F"], col_level=0)
def test_melt_mixed_int_str_id_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"], "b": [1], "d": [2]})
result = melt(df, id_vars=[0, "a"], value_vars=["b", "d"])
expected = DataFrame(
{0: ["foo"] * 2, "a": ["bar"] * 2, "variable": list("bd"), "value": [1, 2]}
)
# the df's columns are mixed type and thus object -> preserves object dtype
expected["variable"] = expected["variable"].astype(object)
tm.assert_frame_equal(result, expected)
def test_melt_mixed_int_str_value_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"]})
result = melt(df, value_vars=[0, "a"])
expected = DataFrame({"variable": [0, "a"], "value": ["foo", "bar"]})
tm.assert_frame_equal(result, expected)
def test_ignore_index(self):
# GH 17440
df = DataFrame({"foo": [0], "bar": [1]}, index=["first"])
result = melt(df, ignore_index=False)
expected = DataFrame(
{"variable": ["foo", "bar"], "value": [0, 1]}, index=["first", "first"]
)
tm.assert_frame_equal(result, expected)
def test_ignore_multiindex(self):
# GH 17440
index = pd.MultiIndex.from_tuples(
[("first", "second"), ("first", "third")], names=["baz", "foobar"]
)
df = DataFrame({"foo": [0, 1], "bar": [2, 3]}, index=index)
result = melt(df, ignore_index=False)
expected_index = pd.MultiIndex.from_tuples(
[("first", "second"), ("first", "third")] * 2, names=["baz", "foobar"]
)
expected = DataFrame(
{"variable": ["foo"] * 2 + ["bar"] * 2, "value": [0, 1, 2, 3]},
index=expected_index,
)
tm.assert_frame_equal(result, expected)
def test_ignore_index_name_and_type(self):
# GH 17440
index = Index(["foo", "bar"], dtype="category", name="baz")
df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index)
result = melt(df, ignore_index=False)
expected_index = Index(["foo", "bar"] * 2, dtype="category", name="baz")
expected = DataFrame(
{"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]},
index=expected_index,
)
tm.assert_frame_equal(result, expected)
def test_melt_with_duplicate_columns(self):
# GH#41951
df = DataFrame([["id", 2, 3]], columns=["a", "b", "b"])
result = df.melt(id_vars=["a"], value_vars=["b"])
expected = DataFrame(
[["id", "b", 2], ["id", "b", 3]], columns=["a", "variable", "value"]
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["Int8", "Int64"])
def test_melt_ea_dtype(self, dtype):
# GH#41570
df = DataFrame(
{
"a": pd.Series([1, 2], dtype="Int8"),
"b": pd.Series([3, 4], dtype=dtype),
}
)
result = df.melt()
expected = DataFrame(
{
"variable": ["a", "a", "b", "b"],
"value": pd.Series([1, 2, 3, 4], dtype=dtype),
}
)
tm.assert_frame_equal(result, expected)
def test_melt_ea_columns(self):
# GH 54297
df = DataFrame(
{
"A": {0: "a", 1: "b", 2: "c"},
"B": {0: 1, 1: 3, 2: 5},
"C": {0: 2, 1: 4, 2: 6},
}
)
df.columns = df.columns.astype("string[python]")
result = df.melt(id_vars=["A"], value_vars=["B"])
expected = DataFrame(
{
"A": list("abc"),
"variable": pd.Series(["B"] * 3, dtype="string[python]"),
"value": [1, 3, 5],
}
)
tm.assert_frame_equal(result, expected)
def test_melt_preserves_datetime(self):
df = DataFrame(
data=[
{
"type": "A0",
"start_date": pd.Timestamp("2023/03/01", tz="Asia/Tokyo"),
"end_date": pd.Timestamp("2023/03/10", tz="Asia/Tokyo"),
},
{
"type": "A1",
"start_date": pd.Timestamp("2023/03/01", tz="Asia/Tokyo"),
"end_date": pd.Timestamp("2023/03/11", tz="Asia/Tokyo"),
},
],
index=["aaaa", "bbbb"],
)
result = df.melt(
id_vars=["type"],
value_vars=["start_date", "end_date"],
var_name="start/end",
value_name="date",
)
expected = DataFrame(
{
"type": {0: "A0", 1: "A1", 2: "A0", 3: "A1"},
"start/end": {
0: "start_date",
1: "start_date",
2: "end_date",
3: "end_date",
},
"date": {
0: pd.Timestamp("2023-03-01 00:00:00+0900", tz="Asia/Tokyo"),
1: pd.Timestamp("2023-03-01 00:00:00+0900", tz="Asia/Tokyo"),
2: pd.Timestamp("2023-03-10 00:00:00+0900", tz="Asia/Tokyo"),
3: pd.Timestamp("2023-03-11 00:00:00+0900", tz="Asia/Tokyo"),
},
}
)
tm.assert_frame_equal(result, expected)
def test_melt_allows_non_scalar_id_vars(self):
df = DataFrame(
data={"a": [1, 2, 3], "b": [4, 5, 6]},
index=["11", "22", "33"],
)
result = df.melt(
id_vars="a",
var_name=0,
value_name=1,
)
expected = DataFrame({"a": [1, 2, 3], 0: ["b"] * 3, 1: [4, 5, 6]})
tm.assert_frame_equal(result, expected)
def test_melt_allows_non_string_var_name(self):
df = DataFrame(
data={"a": [1, 2, 3], "b": [4, 5, 6]},
index=["11", "22", "33"],
)
result = df.melt(
id_vars=["a"],
var_name=0,
value_name=1,
)
expected = DataFrame({"a": [1, 2, 3], 0: ["b"] * 3, 1: [4, 5, 6]})
tm.assert_frame_equal(result, expected)
def test_melt_non_scalar_var_name_raises(self):
df = DataFrame(
data={"a": [1, 2, 3], "b": [4, 5, 6]},
index=["11", "22", "33"],
)
with pytest.raises(ValueError, match=r".* must be a scalar."):
df.melt(id_vars=["a"], var_name=[1, 2])
def test_melt_multiindex_columns_var_name(self):
# GH 58033
df = DataFrame({("A", "a"): [1], ("A", "b"): [2]})
expected = DataFrame(
[("A", "a", 1), ("A", "b", 2)], columns=["first", "second", "value"]
)
tm.assert_frame_equal(df.melt(var_name=["first", "second"]), expected)
tm.assert_frame_equal(df.melt(var_name=["first"]), expected[["first", "value"]])
def test_melt_multiindex_columns_var_name_too_many(self):
# GH 58033
df = DataFrame({("A", "a"): [1], ("A", "b"): [2]})
with pytest.raises(
ValueError, match="but the dataframe columns only have 2 levels"
):
df.melt(var_name=["first", "second", "third"])
def test_melt_duplicate_column_header_raises(self):
# GH61475
df = DataFrame([[1, 2, 3], [3, 4, 5]], columns=["A", "A", "B"])
msg = "id_vars cannot contain duplicate columns."
with pytest.raises(ValueError, match=msg):
df.melt(id_vars=["A"], value_vars=["B"])
|
TestMelt
|
python
|
getsentry__sentry
|
src/sentry/models/orgauthtoken.py
|
{
"start": 1452,
"end": 6887
}
|
class ____(ReplicatedControlModel):
__relocation_scope__ = RelocationScope.Organization
category = OutboxCategory.ORG_AUTH_TOKEN_UPDATE
organization_id = HybridCloudForeignKey("sentry.Organization", null=False, on_delete="CASCADE")
# The JWT token in hashed form
token_hashed = models.TextField(unique=True, null=False)
# An optional representation of the last characters of the original token, to be shown to the user
token_last_characters = models.CharField(max_length=4, null=True)
name = models.CharField(max_length=MAX_NAME_LENGTH, null=False, blank=False)
scope_list = ArrayField(models.TextField(), validators=[validate_scope_list], default=list)
created_by = FlexibleForeignKey("sentry.User", null=True, blank=True, on_delete=models.SET_NULL)
date_added = models.DateTimeField(default=timezone.now, null=False)
date_last_used = models.DateTimeField(null=True, blank=True)
project_last_used_id = HybridCloudForeignKey(
"sentry.Project", null=True, blank=True, on_delete="SET_NULL"
)
date_deactivated = models.DateTimeField(null=True, blank=True)
objects: ClassVar[BaseManager[Self]] = BaseManager(cache_fields=("token_hashed",))
class Meta:
app_label = "sentry"
db_table = "sentry_orgauthtoken"
__repr__ = sane_repr("organization_id", "token_hashed")
def __str__(self) -> str:
return force_str(self.token_hashed)
def get_audit_log_data(self):
return {"name": self.name, "scopes": self.get_scopes()}
def get_allowed_origins(self) -> list[str]:
return []
def get_scopes(self):
return self.scope_list
def has_scope(self, scope):
return scope in self.get_scopes()
def is_active(self) -> bool:
return self.date_deactivated is None
def normalize_before_relocation_import(
self, pk_map: PrimaryKeyMap, scope: ImportScope, flags: ImportFlags
) -> int | None:
# TODO(getsentry/team-ospo#190): Prevents a circular import; could probably split up the
# source module in such a way that this is no longer an issue.
from sentry.api.utils import generate_region_url
from sentry.utils.security.orgauthtoken_token import (
SystemUrlPrefixMissingException,
generate_token,
hash_token,
)
# If there is a token collision, or the token does not exist for some reason, generate a new
# one.
matching_token_hashed = self.__class__.objects.filter(
token_hashed=self.token_hashed
).first()
if (not self.token_hashed) or matching_token_hashed:
org_slug = pk_map.get_slug(get_model_name(Organization), self.organization_id)
if org_slug is None:
return None
try:
token_str = generate_token(org_slug, generate_region_url())
except SystemUrlPrefixMissingException:
return None
self.token_hashed = hash_token(token_str)
self.token_last_characters = token_str[-4:]
old_pk = super().normalize_before_relocation_import(pk_map, scope, flags)
if old_pk is None:
return None
return old_pk
def handle_async_replication(self, region_name: str, shard_identifier: int) -> None:
from sentry.auth.services.orgauthtoken.serial import serialize_org_auth_token
from sentry.hybridcloud.services.replica import region_replica_service
region_replica_service.upsert_replicated_org_auth_token(
token=serialize_org_auth_token(self),
region_name=region_name,
)
def is_org_auth_token_auth(
auth: object,
) -> TypeGuard[AuthenticatedToken | OrgAuthToken | OrgAuthTokenReplica]:
""":returns True when an API token is hitting the API."""
from sentry.hybridcloud.models.orgauthtokenreplica import OrgAuthTokenReplica
if isinstance(auth, AuthenticatedToken):
return auth.kind == "org_auth_token"
return isinstance(auth, OrgAuthToken) or isinstance(auth, OrgAuthTokenReplica)
def get_org_auth_token_id_from_auth(auth: object) -> int | None:
from sentry.auth.services.auth import AuthenticatedToken
if isinstance(auth, OrgAuthToken):
return auth.id
if isinstance(auth, AuthenticatedToken):
return auth.entity_id
return None
def update_org_auth_token_last_used(auth: object, project_ids: list[int]) -> None:
org_auth_token_id = get_org_auth_token_id_from_auth(auth)
organization_id = getattr(auth, "organization_id", None)
if org_auth_token_id is None or organization_id is None:
return
# Debounce updates, as we often get bursts of requests when customer
# run CI or deploys and we don't need second level precision here.
# We vary on the project ids so that unique requests still make updates
project_segment = sha1_text(",".join(str(i) for i in project_ids)).hexdigest()
recent_key = f"orgauthtoken:{org_auth_token_id}:last_update:{project_segment}"
if cache.get(recent_key):
return
orgauthtoken_service.update_orgauthtoken(
organization_id=organization_id,
org_auth_token_id=org_auth_token_id,
date_last_used=timezone.now(),
project_last_used_id=project_ids[0] if len(project_ids) > 0 else None,
)
# Only update each minute.
cache.set(recent_key, 1, timeout=60)
|
OrgAuthToken
|
python
|
dagster-io__dagster
|
helm/dagster/schema/schema/charts/utils/kubernetes.py
|
{
"start": 493,
"end": 756
}
|
class ____(BaseModel):
model_config = {
"extra": "allow",
"json_schema_extra": {
"$ref": create_definition_ref(
"io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/labels"
)
},
}
|
Labels
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/api/client.py
|
{
"start": 23040,
"end": 24328
}
|
class ____:
__slots__ = ("client",)
def __init__(self, client: Client):
self.client = client
def get(
self,
name: str | None = None,
uri: str | None = None,
alias_name: str | None = None,
after: datetime | None = None,
before: datetime | None = None,
ascending: bool = True,
limit: int | None = None,
) -> AssetEventsResponse:
"""Get Asset event from the API server."""
common_params: dict[str, Any] = {}
if after:
common_params["after"] = after.isoformat()
if before:
common_params["before"] = before.isoformat()
common_params["ascending"] = ascending
if limit:
common_params["limit"] = limit
if name or uri:
resp = self.client.get(
"asset-events/by-asset", params={"name": name, "uri": uri, **common_params}
)
elif alias_name:
resp = self.client.get(
"asset-events/by-asset-alias", params={"name": alias_name, **common_params}
)
else:
raise ValueError("Either `name`, `uri` or `alias_name` must be provided")
return AssetEventsResponse.model_validate_json(resp.read())
|
AssetEventOperations
|
python
|
pdm-project__pdm
|
src/pdm/pytest.py
|
{
"start": 3711,
"end": 5042
}
|
class ____:
def __init__(self, pypi_json: Path) -> None:
self.pypi_data = self.load_fixtures(pypi_json)
@staticmethod
def load_fixtures(pypi_json: Path) -> dict[str, Any]:
return json.loads(pypi_json.read_text())
def add_candidate(self, name: str, version: str, requires_python: str = "") -> None:
pypi_data = self.pypi_data.setdefault(normalize_name(name), {}).setdefault(version, {})
pypi_data["requires_python"] = requires_python
def add_dependencies(self, name: str, version: str, requirements: list[str]) -> None:
pypi_data = self.pypi_data[normalize_name(name)][version]
pypi_data.setdefault("dependencies", []).extend(requirements)
def get_raw_dependencies(self, candidate: Candidate) -> tuple[str, list[str]]:
try:
pypi_data = self.pypi_data[cast(str, candidate.req.key)]
for version, data in sorted(pypi_data.items(), key=lambda item: len(item[0])):
base, *_ = version.partition("+")
if candidate.version in (version, base):
return version, data.get("dependencies", [])
except KeyError:
pass
assert candidate.prepared is not None
meta = candidate.prepared.metadata
return meta.version, meta.requires or []
|
RepositoryData
|
python
|
run-llama__llama_index
|
llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/tests/test_agentcore_memory.py
|
{
"start": 2804,
"end": 8137
}
|
class ____:
"""Test BaseAgentCoreMemory methods using AgentCoreMemory instance."""
def test_create_event_success(self, memory):
"""Test successful event creation."""
messages = [ChatMessage(role=MessageRole.USER, content="Hello")]
memory.create_event(
memory_id="test-memory",
actor_id="test-actor",
messages=messages,
session_id="test-session",
)
assert memory._client.create_event.called
call_args = memory._client.create_event.call_args
assert call_args[1]["memoryId"] == "test-memory"
assert call_args[1]["actorId"] == "test-actor"
assert call_args[1]["sessionId"] == "test-session"
def test_create_event_no_client(self, memory_context):
"""Test create_event raises error when client is None."""
with patch("boto3.Session"):
memory = AgentCoreMemory(context=memory_context)
memory._client = None # Set client to None after initialization
messages = [ChatMessage(role=MessageRole.USER, content="Hello")]
with pytest.raises(ValueError, match="Client is not initialized"):
memory.create_event(
memory_id="test-memory",
actor_id="test-actor",
messages=messages,
session_id="test-session",
)
def test_create_event_empty_messages(self, memory):
"""Test create_event raises error when messages is empty."""
with pytest.raises(ValueError, match="The messages field cannot be empty"):
memory.create_event(
memory_id="test-memory",
actor_id="test-actor",
messages=[],
session_id="test-session",
)
def test_create_event_no_event_id(self, memory):
"""Test create_event raises error when no event ID is returned."""
memory._client.create_event.return_value = {"event": {"eventId": None}}
messages = [ChatMessage(role=MessageRole.USER, content="Hello")]
with pytest.raises(
RuntimeError, match="Bedrock AgentCore did not return an event ID"
):
memory.create_event(
memory_id="test-memory",
actor_id="test-actor",
messages=messages,
session_id="test-session",
)
def test_list_events_simple(self, memory):
"""Test listing events with simple user message first."""
# Mock response with a user message first
mock_events = [
{
"payload": [
{"blob": json.dumps({})},
{"conversational": {"role": "USER", "content": {"text": "Hello"}}},
]
}
]
memory._client.list_events.return_value = {
"events": mock_events,
"nextToken": None,
}
messages = memory.list_events(
memory_id="test-memory", session_id="test-session", actor_id="test-actor"
)
assert len(messages) == 1
assert messages[0].role == MessageRole.USER
assert messages[0].content == "Hello"
def test_list_events_with_pagination(self, memory):
"""Test listing events with pagination to find user message."""
# First call returns assistant message
mock_events_1 = [
{
"payload": [
{"blob": json.dumps({})},
{
"conversational": {
"role": "ASSISTANT",
"content": {"text": "Hi there"},
}
},
]
}
]
# Second call returns user message
mock_events_2 = [
{
"payload": [
{"blob": json.dumps({})},
{"conversational": {"role": "USER", "content": {"text": "Hello"}}},
]
}
]
memory._client.list_events.side_effect = [
{"events": mock_events_1, "nextToken": "token1"},
{"events": mock_events_2, "nextToken": None},
]
messages = memory.list_events(
memory_id="test-memory", session_id="test-session", actor_id="test-actor"
)
assert len(messages) == 2
assert messages[0].role == MessageRole.USER
assert messages[0].content == "Hello"
assert messages[1].role == MessageRole.ASSISTANT
assert messages[1].content == "Hi there"
def test_retrieve_memories(self, memory):
"""Test retrieving memory records."""
memory._client.retrieve_memory_records.return_value = {
"memoryRecordSummaries": [{"content": "Memory 1"}, {"content": "Memory 2"}]
}
memories = memory.retrieve_memories(
memory_id="test-memory", search_criteria={"searchQuery": "test query"}
)
assert memories == ["Memory 1", "Memory 2"]
memory._client.retrieve_memory_records.assert_called_once_with(
memoryId="test-memory",
namespace="/",
searchCriteria={"searchQuery": "test query"},
maxResults=20,
)
|
TestBaseAgentCoreMemoryMethods
|
python
|
fluentpython__example-code
|
07-closure-deco/clockdeco_cls.py
|
{
"start": 365,
"end": 989
}
|
class ____:
def __init__(self, fmt=DEFAULT_FMT):
self.fmt = fmt
def __call__(self, func):
def clocked(*_args):
t0 = time.time()
_result = func(*_args)
elapsed = time.time() - t0
name = func.__name__
args = ', '.join(repr(arg) for arg in _args)
result = repr(_result)
print(self.fmt.format(**locals()))
return _result
return clocked
if __name__ == '__main__':
@clock()
def snooze(seconds):
time.sleep(seconds)
for i in range(3):
snooze(.123)
# END CLOCKDECO_CLS
|
clock
|
python
|
getsentry__sentry
|
src/sentry/taskworker/retry.py
|
{
"start": 517,
"end": 735
}
|
class ____(RetryTaskError):
"""
Exception that is raised by retry helper methods to signal to tasks that
the current attempt is terminal and there won't be any further retries.
"""
|
NoRetriesRemainingError
|
python
|
numba__numba
|
numba/cuda/tests/cudadrv/test_array_attr.py
|
{
"start": 115,
"end": 5300
}
|
class ____(CUDATestCase):
def test_contigous_2d(self):
ary = np.arange(10)
cary = ary.reshape(2, 5)
fary = np.asfortranarray(cary)
dcary = cuda.to_device(cary)
dfary = cuda.to_device(fary)
self.assertTrue(dcary.is_c_contiguous())
self.assertTrue(not dfary.is_c_contiguous())
self.assertTrue(not dcary.is_f_contiguous())
self.assertTrue(dfary.is_f_contiguous())
def test_contigous_3d(self):
ary = np.arange(20)
cary = ary.reshape(2, 5, 2)
fary = np.asfortranarray(cary)
dcary = cuda.to_device(cary)
dfary = cuda.to_device(fary)
self.assertTrue(dcary.is_c_contiguous())
self.assertTrue(not dfary.is_c_contiguous())
self.assertTrue(not dcary.is_f_contiguous())
self.assertTrue(dfary.is_f_contiguous())
def test_contigous_4d(self):
ary = np.arange(60)
cary = ary.reshape(2, 5, 2, 3)
fary = np.asfortranarray(cary)
dcary = cuda.to_device(cary)
dfary = cuda.to_device(fary)
self.assertTrue(dcary.is_c_contiguous())
self.assertTrue(not dfary.is_c_contiguous())
self.assertTrue(not dcary.is_f_contiguous())
self.assertTrue(dfary.is_f_contiguous())
def test_ravel_1d(self):
ary = np.arange(60)
dary = cuda.to_device(ary)
for order in 'CFA':
expect = ary.ravel(order=order)
dflat = dary.ravel(order=order)
flat = dflat.copy_to_host()
self.assertTrue(dary is not dflat) # ravel returns new array
self.assertEqual(flat.ndim, 1)
self.assertPreciseEqual(expect, flat)
@skip_on_cudasim('CUDA Array Interface is not supported in the simulator')
def test_ravel_stride_1d(self):
ary = np.arange(60)
dary = cuda.to_device(ary)
# No-copy stride device array
darystride = dary[::2]
dary_data = dary.__cuda_array_interface__['data'][0]
ddarystride_data = darystride.__cuda_array_interface__['data'][0]
self.assertEqual(dary_data, ddarystride_data)
# Fail on ravel on non-contiguous array
with self.assertRaises(NotImplementedError):
darystride.ravel()
def test_ravel_c(self):
ary = np.arange(60)
reshaped = ary.reshape(2, 5, 2, 3)
expect = reshaped.ravel(order='C')
dary = cuda.to_device(reshaped)
dflat = dary.ravel()
flat = dflat.copy_to_host()
self.assertTrue(dary is not dflat)
self.assertEqual(flat.ndim, 1)
self.assertPreciseEqual(expect, flat)
# explicit order kwarg
for order in 'CA':
expect = reshaped.ravel(order=order)
dary = cuda.to_device(reshaped)
dflat = dary.ravel(order=order)
flat = dflat.copy_to_host()
self.assertTrue(dary is not dflat)
self.assertEqual(flat.ndim, 1)
self.assertPreciseEqual(expect, flat)
@skip_on_cudasim('CUDA Array Interface is not supported in the simulator')
def test_ravel_stride_c(self):
ary = np.arange(60)
reshaped = ary.reshape(2, 5, 2, 3)
dary = cuda.to_device(reshaped)
darystride = dary[::2, ::2, ::2, ::2]
dary_data = dary.__cuda_array_interface__['data'][0]
ddarystride_data = darystride.__cuda_array_interface__['data'][0]
self.assertEqual(dary_data, ddarystride_data)
with self.assertRaises(NotImplementedError):
darystride.ravel()
def test_ravel_f(self):
ary = np.arange(60)
reshaped = np.asfortranarray(ary.reshape(2, 5, 2, 3))
for order in 'FA':
expect = reshaped.ravel(order=order)
dary = cuda.to_device(reshaped)
dflat = dary.ravel(order=order)
flat = dflat.copy_to_host()
self.assertTrue(dary is not dflat)
self.assertEqual(flat.ndim, 1)
self.assertPreciseEqual(expect, flat)
@skip_on_cudasim('CUDA Array Interface is not supported in the simulator')
def test_ravel_stride_f(self):
ary = np.arange(60)
reshaped = np.asfortranarray(ary.reshape(2, 5, 2, 3))
dary = cuda.to_device(reshaped)
darystride = dary[::2, ::2, ::2, ::2]
dary_data = dary.__cuda_array_interface__['data'][0]
ddarystride_data = darystride.__cuda_array_interface__['data'][0]
self.assertEqual(dary_data, ddarystride_data)
with self.assertRaises(NotImplementedError):
darystride.ravel()
def test_reshape_c(self):
ary = np.arange(10)
expect = ary.reshape(2, 5)
dary = cuda.to_device(ary)
dary_reshaped = dary.reshape(2, 5)
got = dary_reshaped.copy_to_host()
self.assertPreciseEqual(expect, got)
def test_reshape_f(self):
ary = np.arange(10)
expect = ary.reshape(2, 5, order='F')
dary = cuda.to_device(ary)
dary_reshaped = dary.reshape(2, 5, order='F')
got = dary_reshaped.copy_to_host()
self.assertPreciseEqual(expect, got)
if __name__ == '__main__':
unittest.main()
|
TestArrayAttr
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 208097,
"end": 208287
}
|
class ____(VegaLiteSchema):
"""Color schema wrapper."""
_schema = {"$ref": "#/definitions/Color"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
|
Color
|
python
|
ipython__ipython
|
IPython/lib/display.py
|
{
"start": 463,
"end": 9136
}
|
class ____(DisplayObject):
"""Create an audio object.
When this object is returned by an input cell or passed to the
display function, it will result in Audio controls being displayed
in the frontend (only works in the notebook).
Parameters
----------
data : numpy array, list, unicode, str or bytes
Can be one of
* Numpy 1d array containing the desired waveform (mono)
* Numpy 2d array containing waveforms for each channel.
Shape=(NCHAN, NSAMPLES). For the standard channel order, see
http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
* List of float or integer representing the waveform (mono)
* String containing the filename
* Bytestring containing raw PCM data or
* URL pointing to a file on the web.
If the array option is used, the waveform will be normalized.
If a filename or url is used, the format support will be browser
dependent.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
embed : boolean
Should the audio data be embedded using a data URI (True) or should
the original source be referenced. Set this to True if you want the
audio to playable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
rate : integer
The sampling rate of the raw data.
Only required when data parameter is being used as an array
autoplay : bool
Set to True if the audio should immediately start playing.
Default is `False`.
normalize : bool
Whether audio should be normalized (rescaled) to the maximum possible
range. Default is `True`. When set to `False`, `data` must be between
-1 and 1 (inclusive), otherwise an error is raised.
Applies only when `data` is a list or array of samples; other types of
audio are never normalized.
Examples
--------
>>> import pytest
>>> np = pytest.importorskip("numpy")
Generate a sound
>>> import numpy as np
>>> framerate = 44100
>>> t = np.linspace(0,5,framerate*5)
>>> data = np.sin(2*np.pi*220*t) + np.sin(2*np.pi*224*t)
>>> Audio(data, rate=framerate)
<IPython.lib.display.Audio object>
Can also do stereo or more channels
>>> dataleft = np.sin(2*np.pi*220*t)
>>> dataright = np.sin(2*np.pi*224*t)
>>> Audio([dataleft, dataright], rate=framerate)
<IPython.lib.display.Audio object>
From URL:
>>> Audio("http://www.nch.com.au/acm/8k16bitpcm.wav") # doctest: +SKIP
>>> Audio(url="http://www.w3schools.com/html/horse.ogg") # doctest: +SKIP
From a File:
>>> Audio('IPython/lib/tests/test.wav') # doctest: +SKIP
>>> Audio(filename='IPython/lib/tests/test.wav') # doctest: +SKIP
From Bytes:
>>> Audio(b'RAW_WAV_DATA..') # doctest: +SKIP
>>> Audio(data=b'RAW_WAV_DATA..') # doctest: +SKIP
See Also
--------
ipywidgets.Audio
Audio widget with more more flexibility and options.
"""
_read_flags = 'rb'
def __init__(self, data=None, filename=None, url=None, embed=None, rate=None, autoplay=False, normalize=True, *,
element_id=None):
if filename is None and url is None and data is None:
raise ValueError("No audio data found. Expecting filename, url, or data.")
if embed is False and url is None:
raise ValueError("No url found. Expecting url when embed=False")
if url is not None and embed is not True:
self.embed = False
else:
self.embed = True
self.autoplay = autoplay
self.element_id = element_id
super(Audio, self).__init__(data=data, url=url, filename=filename)
if self.data is not None and not isinstance(self.data, bytes):
if rate is None:
raise ValueError("rate must be specified when data is a numpy array or list of audio samples.")
self.data = Audio._make_wav(data, rate, normalize)
def reload(self):
"""Reload the raw data from file or URL."""
import mimetypes
if self.embed:
super(Audio, self).reload()
if self.filename is not None:
self.mimetype = mimetypes.guess_type(self.filename)[0]
elif self.url is not None:
self.mimetype = mimetypes.guess_type(self.url)[0]
else:
self.mimetype = "audio/wav"
@staticmethod
def _make_wav(data, rate, normalize):
""" Transform a numpy array to a PCM bytestring """
from io import BytesIO
import wave
try:
scaled, nchan = Audio._validate_and_normalize_with_numpy(data, normalize)
except ImportError:
scaled, nchan = Audio._validate_and_normalize_without_numpy(data, normalize)
fp = BytesIO()
waveobj = wave.open(fp,mode='wb')
waveobj.setnchannels(nchan)
waveobj.setframerate(rate)
waveobj.setsampwidth(2)
waveobj.setcomptype('NONE','NONE')
waveobj.writeframes(scaled)
val = fp.getvalue()
waveobj.close()
return val
@staticmethod
def _validate_and_normalize_with_numpy(data, normalize) -> Tuple[bytes, int]:
import numpy as np
data = np.array(data, dtype=float)
if len(data.shape) == 1:
nchan = 1
elif len(data.shape) == 2:
# In wave files,channels are interleaved. E.g.,
# "L1R1L2R2..." for stereo. See
# http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
# for channel ordering
nchan = data.shape[0]
data = data.T.ravel()
else:
raise ValueError('Array audio input must be a 1D or 2D array')
max_abs_value = np.max(np.abs(data))
normalization_factor = Audio._get_normalization_factor(max_abs_value, normalize)
scaled = data / normalization_factor * 32767
return scaled.astype("<h").tobytes(), nchan
@staticmethod
def _validate_and_normalize_without_numpy(data, normalize):
import array
import sys
data = array.array('f', data)
try:
max_abs_value = float(max([abs(x) for x in data]))
except TypeError as e:
raise TypeError('Only lists of mono audio are '
'supported if numpy is not installed') from e
normalization_factor = Audio._get_normalization_factor(max_abs_value, normalize)
scaled = array.array('h', [int(x / normalization_factor * 32767) for x in data])
if sys.byteorder == 'big':
scaled.byteswap()
nchan = 1
return scaled.tobytes(), nchan
@staticmethod
def _get_normalization_factor(max_abs_value, normalize):
if not normalize and max_abs_value > 1:
raise ValueError('Audio data must be between -1 and 1 when normalize=False.')
return max_abs_value if normalize else 1
def _data_and_metadata(self):
"""shortcut for returning metadata with url information, if defined"""
md = {}
if self.url:
md['url'] = self.url
if md:
return self.data, md
else:
return self.data
def _repr_html_(self):
src = """
<audio {element_id} controls="controls" {autoplay}>
<source src="{src}" type="{type}" />
Your browser does not support the audio element.
</audio>
"""
return src.format(src=self.src_attr(), type=self.mimetype, autoplay=self.autoplay_attr(),
element_id=self.element_id_attr())
def src_attr(self):
import base64
if self.embed and (self.data is not None):
data = base64=base64.b64encode(self.data).decode('ascii')
return """data:{type};base64,{base64}""".format(type=self.mimetype,
base64=data)
elif self.url is not None:
return self.url
else:
return ""
def autoplay_attr(self):
if(self.autoplay):
return 'autoplay="autoplay"'
else:
return ''
def element_id_attr(self):
if (self.element_id):
return 'id="{element_id}"'.format(element_id=self.element_id)
else:
return ''
|
Audio
|
python
|
scipy__scipy
|
scipy/stats/_survival.py
|
{
"start": 413,
"end": 7924
}
|
class ____:
"""An empirical distribution function produced by `scipy.stats.ecdf`
Attributes
----------
quantiles : ndarray
The unique values of the sample from which the
`EmpiricalDistributionFunction` was estimated.
probabilities : ndarray
The point estimates of the cumulative distribution function (CDF) or
its complement, the survival function (SF), corresponding with
`quantiles`.
"""
quantiles: np.ndarray
probabilities: np.ndarray
# Exclude these from __str__
_n: np.ndarray = field(repr=False) # number "at risk"
_d: np.ndarray = field(repr=False) # number of "deaths"
_sf: np.ndarray = field(repr=False) # survival function for var estimate
_kind: str = field(repr=False) # type of function: "cdf" or "sf"
def __init__(self, q, p, n, d, kind):
self.probabilities = p
self.quantiles = q
self._n = n
self._d = d
self._sf = p if kind == 'sf' else 1 - p
self._kind = kind
f0 = 1 if kind == 'sf' else 0 # leftmost function value
f1 = 1 - f0
# fill_value can't handle edge cases at infinity
x = np.insert(q, [0, len(q)], [-np.inf, np.inf])
y = np.insert(p, [0, len(p)], [f0, f1])
# `or` conditions handle the case of empty x, points
self._f = interpolate.interp1d(x, y, kind='previous',
assume_sorted=True)
def evaluate(self, x):
"""Evaluate the empirical CDF/SF function at the input.
Parameters
----------
x : ndarray
Argument to the CDF/SF
Returns
-------
y : ndarray
The CDF/SF evaluated at the input
"""
return self._f(x)
def plot(self, ax=None, **matplotlib_kwargs):
"""Plot the empirical distribution function
Available only if ``matplotlib`` is installed.
Parameters
----------
ax : matplotlib.axes.Axes
Axes object to draw the plot onto, otherwise uses the current Axes.
**matplotlib_kwargs : dict, optional
Keyword arguments passed directly to `matplotlib.axes.Axes.step`.
Unless overridden, ``where='post'``.
Returns
-------
lines : list of `matplotlib.lines.Line2D`
Objects representing the plotted data
"""
try:
import matplotlib # noqa: F401
except ModuleNotFoundError as exc:
message = "matplotlib must be installed to use method `plot`."
raise ModuleNotFoundError(message) from exc
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
kwargs = {'where': 'post'}
kwargs.update(matplotlib_kwargs)
delta = np.ptp(self.quantiles)*0.05 # how far past sample edge to plot
q = self.quantiles
q = [q[0] - delta] + list(q) + [q[-1] + delta]
return ax.step(q, self.evaluate(q), **kwargs)
def confidence_interval(self, confidence_level=0.95, *, method='linear'):
"""Compute a confidence interval around the CDF/SF point estimate
Parameters
----------
confidence_level : float, default: 0.95
Confidence level for the computed confidence interval
method : str, {"linear", "log-log"}
Method used to compute the confidence interval. Options are
"linear" for the conventional Greenwood confidence interval
(default) and "log-log" for the "exponential Greenwood",
log-negative-log-transformed confidence interval.
Returns
-------
ci : ``ConfidenceInterval``
An object with attributes ``low`` and ``high``, instances of
`~scipy.stats._result_classes.EmpiricalDistributionFunction` that
represent the lower and upper bounds (respectively) of the
confidence interval.
Notes
-----
Confidence intervals are computed according to the Greenwood formula
(``method='linear'``) or the more recent "exponential Greenwood"
formula (``method='log-log'``) as described in [1]_. The conventional
Greenwood formula can result in lower confidence limits less than 0
and upper confidence limits greater than 1; these are clipped to the
unit interval. NaNs may be produced by either method; these are
features of the formulas.
References
----------
.. [1] Sawyer, Stanley. "The Greenwood and Exponential Greenwood
Confidence Intervals in Survival Analysis."
https://www.math.wustl.edu/~sawyer/handouts/greenwood.pdf
"""
message = ("Confidence interval bounds do not implement a "
"`confidence_interval` method.")
if self._n is None:
raise NotImplementedError(message)
methods = {'linear': self._linear_ci,
'log-log': self._loglog_ci}
message = f"`method` must be one of {set(methods)}."
if method.lower() not in methods:
raise ValueError(message)
message = "`confidence_level` must be a scalar between 0 and 1."
confidence_level = np.asarray(confidence_level)[()]
if confidence_level.shape or not (0 <= confidence_level <= 1):
raise ValueError(message)
method_fun = methods[method.lower()]
low, high = method_fun(confidence_level)
message = ("The confidence interval is undefined at some observations."
" This is a feature of the mathematical formula used, not"
" an error in its implementation.")
if np.any(np.isnan(low) | np.isnan(high)):
warnings.warn(message, RuntimeWarning, stacklevel=2)
low, high = np.clip(low, 0, 1), np.clip(high, 0, 1)
low = EmpiricalDistributionFunction(self.quantiles, low, None, None,
self._kind)
high = EmpiricalDistributionFunction(self.quantiles, high, None, None,
self._kind)
return ConfidenceInterval(low, high)
def _linear_ci(self, confidence_level):
sf, d, n = self._sf, self._d, self._n
# When n == d, Greenwood's formula divides by zero.
# When s != 0, this can be ignored: var == inf, and CI is [0, 1]
# When s == 0, this results in NaNs. Produce an informative warning.
with np.errstate(divide='ignore', invalid='ignore'):
var = sf ** 2 * np.cumsum(d / (n * (n - d)))
se = np.sqrt(var)
z = special.ndtri(1 / 2 + confidence_level / 2)
z_se = z * se
low = self.probabilities - z_se
high = self.probabilities + z_se
return low, high
def _loglog_ci(self, confidence_level):
sf, d, n = self._sf, self._d, self._n
with np.errstate(divide='ignore', invalid='ignore'):
var = 1 / np.log(sf) ** 2 * np.cumsum(d / (n * (n - d)))
se = np.sqrt(var)
z = special.ndtri(1 / 2 + confidence_level / 2)
with np.errstate(divide='ignore'):
lnl_points = np.log(-np.log(sf))
z_se = z * se
low = np.exp(-np.exp(lnl_points + z_se))
high = np.exp(-np.exp(lnl_points - z_se))
if self._kind == "cdf":
low, high = 1-high, 1-low
return low, high
@dataclass
|
EmpiricalDistributionFunction
|
python
|
pytorch__pytorch
|
test/test_serialization.py
|
{
"start": 40301,
"end": 40407
}
|
class ____:
__slots__ = ["x", "y"]
x: int
y: int
@dataclass
|
ClassThatUsesBuildInstructionAllSlots
|
python
|
pennersr__django-allauth
|
allauth/usersessions/middleware.py
|
{
"start": 100,
"end": 613
}
|
class ____:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if (
app_settings.TRACK_ACTIVITY
and hasattr(request, "session")
and request.session.session_key
and hasattr(request, "user")
and request.user.is_authenticated
):
UserSession.objects.create_from_request(request)
response = self.get_response(request)
return response
|
UserSessionsMiddleware
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/cli/argparsing/parsers.py
|
{
"start": 12358,
"end": 13406
}
|
class ____(ChoicesParser):
"""Composite argument parser which accepts any input value."""
def __init__(self, nothing: bool = False, no_match_message: t.Optional[str] = None) -> None:
self.no_match_message = no_match_message
conditions = MatchConditions.ANY
if nothing:
conditions |= MatchConditions.NOTHING
super().__init__([], conditions=conditions)
def no_completion_match(self, value: str) -> CompletionUnavailable:
"""Return an instance of CompletionUnavailable when no match was found for the given value."""
if self.no_match_message:
return CompletionUnavailable(message=self.no_match_message)
return super().no_completion_match(value)
def no_choices_available(self, value: str) -> ParserError:
"""Return an instance of ParserError when parsing fails and no choices are available."""
if self.no_match_message:
return ParserError(self.no_match_message)
return super().no_choices_available(value)
|
AnyParser
|
python
|
tornadoweb__tornado
|
maint/test/websocket/server.py
|
{
"start": 213,
"end": 607
}
|
class ____(WebSocketHandler):
def on_message(self, message):
self.write_message(message, binary=isinstance(message, bytes))
def get_compression_options(self):
return {}
if __name__ == '__main__':
parse_command_line()
app = Application([
('/', EchoHandler),
])
app.listen(options.port, address='127.0.0.1')
IOLoop.instance().start()
|
EchoHandler
|
python
|
spyder-ide__spyder
|
spyder/plugins/editor/widgets/main_widget.py
|
{
"start": 2764,
"end": 2894
}
|
class ____:
TodoList = "todo_list_menu"
WarningErrorList = "warning_error_list_menu"
EOL = "eol_menu"
|
EditorWidgetMenus
|
python
|
joke2k__faker
|
faker/providers/address/es_AR/__init__.py
|
{
"start": 115,
"end": 6530
}
|
class ____(AddressProvider):
provinces = {
"CABA": "Ciudad Autónoma de Buenos Aires",
"BA": "Buenos Aires",
"CA": "Catamarca",
"CH": "Chaco",
"CT": "Chubut",
"CB": "Córdoba",
"CR": "Corrientes",
"ER": "Entre Ríos",
"FO": "Formosa",
"JY": "Jujuy",
"LP": "La Pampa",
"LR": "La Rioja",
"MZ": "Mendoza",
"MI": "Misiones",
"NQN": "Neuquén",
"RN": "Río Negro",
"SA": "Salta",
"SJ": "San Juan",
"SL": "San Luis",
"SC": "Santa Cruz",
"SF": "Santa Fe",
"SE": "Santiago del Estero",
"TF": "Tierra del Fuego",
"TU": "Tucumán",
}
municipalities: List[Tuple[str, str, str]] = [
("1004", "Constitución", "CABA"),
("1900", "La Plata", "BA"),
("7600", "Mar del Plata", "BA"),
("8000", "Bahía Blanca", "BA"),
("4700", "San Ferando del Valle de Catamarca", "CA"),
("3500", "Resistencia", "CH"),
("9103", "Rawson", "CT"),
("9000", "Comodoro Rivadavia", "CT"),
("5000", "Córdoba", "CB"),
("3400", "Corrientes", "CR"),
("3100", "Paraná", "ER"),
("3600", "Formosa", "FO"),
("4600", "San Salvador de Jujuy", "JY"),
("6300", "Santa Rosa", "LP"),
("5300", "La Rioja", "LR"),
("5360", "Chilecito", "LR"),
("5500", "Mendoza", "MZ"),
("3300", "Posadas", "MI"),
("8300", "Neuquén", "NQN"),
("8500", "Viedma", "RN"),
("4400", "Salta", "SA"),
("5400", "San Juan", "SJ"),
("5700", "San Luis", "SL"),
("5881", "Merlo", "SL"),
("9400", "Río Gallegos", "SC"),
("3000", "Santa Fe", "SF"),
("2000", "Rosario", "SF"),
("4200", "Santiago del Estero", "SE"),
("9410", "Ushuaia", "TF"),
("4000", "San Miguel de Tucumán", "TU"),
]
street_prefixes = OrderedDict(
[
("Calle", 0.2),
("Avenida", 0.2),
("Av.", 0.2),
("Diagonal", 0.2),
("Diag.", 0.05),
("Camino", 0.05),
("Boulevard", 0.05),
("Blv.", 0.05),
]
)
street_suffixes = ["A", "B", "Bis"]
street_proceres = (
"San Martin",
"Belgrano",
"Saavedra",
"Rivadavia",
"Güemes",
"G. Brown",
"J.B. Alberdi",
"J.M. de Rosas",
"J.J. Castelli",
"Mitre",
"Alem",
"Alvear",
"Malvinas Argentinas",
"Pte. Perón",
"Omar Nuñez",
)
street_name_formats = OrderedDict(
[
("{{street_prefix}} %", 0.2),
("{{street_prefix}} {{street_municipality}}", 0.2),
("{{street_prefix}} {{street_province}}", 0.2),
("{{street_prefix}} {{street_procer}}", 0.2),
("{{street_prefix}} 1## {{street_suffix}}", 0.02),
]
)
building_number_formats = OrderedDict(
[
("%%", 0.2),
("%%#", 0.2),
("%#%", 0.2),
("%#%#", 0.2),
]
)
secondary_address_formats = [
"Piso % Dto. %",
"Dto. %",
"Torre % Dto. %",
"Local %!",
"Oficina %!",
]
postcode_formats = ["{{municipality_code}}####"]
def provinces_code(self) -> str:
"""
:example: "BA"
"""
return self.random_element(self.provinces.keys())
def province(self) -> str:
"""
:example: "Buenos Aires"
"""
return self.random_element(list(self.provinces.values()))
administrative_unit = province
def municipality_code(self) -> str:
"""
:example: "1900"
"""
return self.random_element(self.municipalities)[0] # type: ignore
def municipality(self) -> str:
"""
:example: "La Plata"
"""
return self.random_element(self.municipalities)[1] # type: ignore
city = municipality
def street_prefix(self) -> str:
"""
:example: "Calle"
"""
return self.random_element(self.street_prefixes)
def street_procer(self) -> str:
"""
:example: "Belgrano"
"""
return self.random_element(self.street_proceres)
def street_municipality(self) -> str:
"""
:example: "La Plata"
"""
return self.random_element(self.municipalities)[1]
def street_province(self) -> str:
"""
:example: "San Juan"
"""
return self.random_element(list(self.provinces.values()))
def street_suffix(self) -> str:
"""
:example: "Sur"
"""
return self.generator.parse(self.random_element(self.street_suffixes))
def street_name(self) -> str:
"""
:example: "Calle 1"
"""
pattern: str = self.random_element(self.street_name_formats)
return self.numerify(self.generator.parse(pattern))
def building_number(self) -> str:
"""
:example: "23"
"""
return self.numerify(self.generator.parse(self.random_element(self.building_number_formats)))
def secondary_address(self) -> str:
"""
:example: "Departamento 123"
"""
return self.numerify(self.random_element(self.secondary_address_formats))
def street_address(self) -> str:
"""
:example: "Calle 1 N° 23"
"""
return self.street_name() + " N° " + self.building_number()
def postcode(self) -> str:
"""
:example: "1900"
"""
return self.numerify(self.generator.parse(self.random_element(self.postcode_formats)))
def address(self) -> str:
"""
:example: "Calle 1 N° 23, La Plata 1900, Buenos Aires"
"""
municipality: Tuple[str, str, str] = self.random_element(self.municipalities)
municipality_code = municipality[0]
municipality_prov = municipality[2]
secondary_address: str = self.random_element(
[
" " + self.secondary_address(),
"",
]
)
postcode = "\n" + municipality[1] + " " + municipality_code
province_name = ", " + self.provinces[municipality_prov]
return self.street_address() + secondary_address + postcode + province_name
|
Provider
|
python
|
Pylons__pyramid
|
tests/test_config/test_views.py
|
{
"start": 148667,
"end": 149035
}
|
class ____:
def __init__(self, getval=None):
self.related = []
self.introspectables = []
self.getval = getval
def add(self, introspectable):
self.introspectables.append(introspectable)
def get(self, name, discrim):
return self.getval
def relate(self, a, b):
self.related.append((a, b))
|
DummyIntrospector
|
python
|
walkccc__LeetCode
|
solutions/2599. Make the Prefix Sum Non-negative/2599.py
|
{
"start": 0,
"end": 315
}
|
class ____:
def makePrefSumNonNegative(self, nums: list[int]) -> int:
ans = 0
prefix = 0
minHeap = []
for num in nums:
prefix += num
if num < 0:
heapq.heappush(minHeap, num)
while prefix < 0:
prefix -= heapq.heappop(minHeap)
ans += 1
return ans
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 565745,
"end": 566129
}
|
class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("Deployment", graphql_name="node")
"""The item at the end of the edge."""
|
DeploymentEdge
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_reduce.py
|
{
"start": 340,
"end": 607
}
|
class ____(Benchmark):
params = [[0, 1], TYPES1]
param_names = ['axis', 'type']
def setup(self, axis, typename):
self.a = get_squares()[typename]
def time_reduce(self, axis, typename):
np.add.reduce(self.a, axis=axis)
|
AddReduceSeparate
|
python
|
Pylons__pyramid
|
tests/test_threadlocal.py
|
{
"start": 2420,
"end": 2748
}
|
class ____(unittest.TestCase):
def _callFUT(self):
from pyramid.threadlocal import get_current_registry
return get_current_registry()
def test_it(self):
from pyramid.registry import global_registry
self.assertEqual(self._callFUT(), global_registry)
|
GetCurrentRegistryWithoutTestingRegistry
|
python
|
apache__airflow
|
helm-tests/tests/helm_tests/airflow_core/test_scheduler.py
|
{
"start": 38402,
"end": 39030
}
|
class ____:
"""Tests scheduler network policy."""
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"networkPolicies": {"enabled": True},
"scheduler": {
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/scheduler/scheduler-networkpolicy.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
|
TestSchedulerNetworkPolicy
|
python
|
getsentry__sentry
|
tests/sentry/utils/test_sdk.py
|
{
"start": 17599,
"end": 21548
}
|
class ____(TestCase):
def _make_orgs(self, n: int) -> list[Organization]:
return [self.create_organization() for _ in range(n)]
def test_simple(self) -> None:
orgs = self._make_orgs(3)
with patch_isolation_scope() as mock_scope:
bind_ambiguous_org_context(orgs, "integration id=1231")
assert mock_scope._tags == {
"organization": "[multiple orgs]",
"organization.slug": "[multiple orgs]",
}
assert mock_scope._contexts == {
"organization": {
"multiple possible": [org.slug for org in orgs],
"source": "integration id=1231",
},
}
def test_doesnt_overwrite_org_in_list(self) -> None:
orgs = self._make_orgs(3)
single_org = orgs[2]
expected_tags = {
"organization": single_org.id,
"organization.slug": single_org.slug,
}
expected_contexts = {
"organization": {
"id": single_org.id,
"slug": single_org.slug,
}
}
with patch_isolation_scope() as mock_scope:
# First add data from a single org in our list
bind_organization_context(single_org)
assert mock_scope._tags == expected_tags
assert mock_scope._contexts == expected_contexts
# Now try to overwrite that with the whole list, which should be a no-op
bind_ambiguous_org_context(orgs, "integration id=1231")
assert mock_scope._tags == expected_tags
assert mock_scope._contexts == expected_contexts
def test_does_overwrite_org_not_in_list(self) -> None:
other_org, *orgs = self._make_orgs(4)
assert other_org.slug not in [org.slug for org in orgs]
with patch_isolation_scope() as mock_scope:
# First add data from a single org not in our list
bind_organization_context(other_org)
assert mock_scope._tags == {
"organization": other_org.id,
"organization.slug": other_org.slug,
}
# Now try to overwrite that with the whole list, which should work
bind_ambiguous_org_context(orgs, "integration id=1231")
assert mock_scope._tags == {
"organization": "[multiple orgs]",
"organization.slug": "[multiple orgs]",
"possible_mistag": True,
"scope_bleed.organization.slug": True,
}
assert mock_scope._contexts == {
"organization": {
"multiple possible": [org.slug for org in orgs],
"source": "integration id=1231",
},
"scope_bleed": {
"previous_organization.slug_tag": other_org.slug,
"new_organization.slug_tag": "[multiple orgs]",
},
}
def test_truncates_list(self) -> None:
orgs = self._make_orgs(5)
with patch.object(sdk, "_AMBIGUOUS_ORG_CUTOFF", 3), patch_isolation_scope() as mock_scope:
bind_ambiguous_org_context(orgs, "integration id=1231")
slug_list_in_org_context = mock_scope._contexts["organization"]["multiple possible"]
assert len(slug_list_in_org_context) == 3
assert slug_list_in_org_context[-1] == "... (3 more)"
def test_before_send_error_level() -> None:
event = {
"tags": {
"silo_mode": "REGION",
"sentry_region": "testregion456576",
},
"level": "error",
}
hint = {"exc_info": (OperationalError, OperationalError("test"), None)}
event_with_before_send = sdk.before_send(event, hint) # type: ignore[arg-type]
assert event_with_before_send
assert event_with_before_send["level"] == "warning"
|
BindAmbiguousOrgContextTest
|
python
|
astropy__astropy
|
astropy/table/tests/test_masked.py
|
{
"start": 15638,
"end": 26961
}
|
class ____:
def test_add_masked_row_to_masked_table_iterable(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1], mask=[0]))
t.add_column(MaskedColumn(name="b", data=[4], mask=[1]))
t.add_row([2, 5], mask=[1, 0])
t.add_row([3, 6], mask=[0, 1])
assert t.masked
assert np.all(np.array(t["a"]) == np.array([1, 2, 3]))
assert np.all(t["a"].mask == np.array([0, 1, 0], bool))
assert np.all(np.array(t["b"]) == np.array([4, 5, 6]))
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping1(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1], mask=[0]))
t.add_column(MaskedColumn(name="b", data=[4], mask=[1]))
t.add_row({"b": 5, "a": 2}, mask={"a": 1, "b": 0})
t.add_row({"a": 3, "b": 6}, mask={"b": 1, "a": 0})
assert t.masked
assert np.all(np.array(t["a"]) == np.array([1, 2, 3]))
assert np.all(t["a"].mask == np.array([0, 1, 0], bool))
assert np.all(np.array(t["b"]) == np.array([4, 5, 6]))
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping2(self):
# When adding values to a masked table, if the mask is specified as a
# dict, then values not specified will have mask values set to True
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1], mask=[0]))
t.add_column(MaskedColumn(name="b", data=[4], mask=[1]))
t.add_row({"b": 5}, mask={"b": 0})
t.add_row({"a": 3}, mask={"a": 0})
assert t.masked
assert t["a"][0] == 1 and t["a"][2] == 3
assert np.all(t["a"].mask == np.array([0, 1, 0], bool))
assert t["b"][1] == 5
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping3(self):
# When adding values to a masked table, if mask is not passed to
# add_row, then the mask should be set to False if values are present
# and True if not.
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1], mask=[0]))
t.add_column(MaskedColumn(name="b", data=[4], mask=[1]))
t.add_row({"b": 5})
t.add_row({"a": 3})
assert t.masked
assert t["a"][0] == 1 and t["a"][2] == 3
assert np.all(t["a"].mask == np.array([0, 1, 0], bool))
assert t["b"][1] == 5
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping4(self):
# When adding values to a masked table, if the mask is specified as a
# dict, then keys in values should match keys in mask
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1], mask=[0]))
t.add_column(MaskedColumn(name="b", data=[4], mask=[1]))
with pytest.raises(ValueError) as exc:
t.add_row({"b": 5}, mask={"a": True})
assert exc.value.args[0] == "keys in mask should match keys in vals"
def test_add_masked_row_to_masked_table_mismatch(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name="a", data=[1], mask=[0]))
t.add_column(MaskedColumn(name="b", data=[4], mask=[1]))
with pytest.raises(TypeError) as exc:
t.add_row([2, 5], mask={"a": 1, "b": 0})
assert exc.value.args[0] == "Mismatch between type of vals and mask"
with pytest.raises(TypeError) as exc:
t.add_row({"b": 5, "a": 2}, mask=[1, 0])
assert exc.value.args[0] == "Mismatch between type of vals and mask"
def test_add_masked_row_to_non_masked_table_iterable(self):
t = Table(masked=False)
t["a"] = [1]
t["b"] = [4]
t["c"] = Time([1], format="cxcsec")
tm = Time(2, format="cxcsec")
assert not t.masked
t.add_row([2, 5, tm])
assert not t.masked
t.add_row([3, 6, tm], mask=[0, 1, 1])
assert not t.masked
assert type(t["a"]) is Column
assert type(t["b"]) is MaskedColumn
assert type(t["c"]) is Time
assert np.all(t["a"] == [1, 2, 3])
assert np.all(t["b"].data == [4, 5, 6])
assert np.all(t["b"].mask == [False, False, True])
assert np.all(t["c"][:2] == Time([1, 2], format="cxcsec"))
assert np.all(t["c"].mask == [False, False, True])
def test_add_row_cannot_mask_column_raises_typeerror(self):
t = QTable()
t["a"] = [1, 2] * u.m
t.add_row((3 * u.m,)) # No problem
with pytest.raises(ValueError) as exc:
t.add_row((3 * u.m,), mask=(True,))
assert exc.value.args[0].splitlines() == [
"Unable to insert row because of exception in column 'a':",
"mask was supplied for column 'a' but it does not support masked values",
]
def test_setting_from_masked_column():
"""Test issue in #2997"""
mask_b = np.array([True, True, False, False])
for select in (mask_b, slice(0, 2)):
t = Table(masked=True)
t["a"] = Column([1, 2, 3, 4])
t["b"] = MaskedColumn([11, 22, 33, 44], mask=mask_b)
t["c"] = MaskedColumn([111, 222, 333, 444], mask=[True, False, True, False])
t["b"][select] = t["c"][select]
assert t["b"][1] == t[1]["b"]
assert t["b"][0] is np.ma.masked # Original state since t['c'][0] is masked
assert t["b"][1] == 222 # New from t['c'] since t['c'][1] is unmasked
assert t["b"][2] == 33
assert t["b"][3] == 44
assert np.all(
t["b"].mask == t.mask["b"]
) # Avoid t.mask in general, this is for testing
mask_before_add = t.mask.copy()
t["d"] = np.arange(len(t))
assert np.all(t.mask["b"] == mask_before_add["b"])
def test_coercing_fill_value_type():
"""
Test that masked column fill_value is coerced into the correct column type.
"""
# This is the original example posted on the astropy@scipy mailing list
t = Table({"a": ["1"]}, masked=True)
t["a"].set_fill_value("0")
t2 = Table(t, names=["a"], dtype=[np.int32])
assert isinstance(t2["a"].fill_value, np.int32)
# Unit test the same thing.
c = MaskedColumn(["1"])
c.set_fill_value("0")
c2 = MaskedColumn(c, dtype=np.int32)
assert isinstance(c2.fill_value, np.int32)
def test_mask_copy():
"""Test that the mask is copied when copying a table (issue #7362)."""
c = MaskedColumn([1, 2], mask=[False, True])
c2 = MaskedColumn(c, copy=True)
c2.mask[0] = True
assert np.all(c.mask == [False, True])
assert np.all(c2.mask == [True, True])
def test_masked_as_array_with_mixin():
"""Test that as_array() and Table.mask attr work with masked mixin columns"""
t = Table()
t["a"] = Time([1, 2], format="cxcsec")
t["b"] = [3, 4]
t["c"] = [5, 6] * u.m
# With no mask, the output should be ndarray
ta = t.as_array()
assert isinstance(ta, np.ndarray) and not isinstance(ta, np.ma.MaskedArray)
# With a mask, output is MaskedArray
t["a"][1] = np.ma.masked
ta = t.as_array()
assert isinstance(ta, np.ma.MaskedArray)
assert np.all(ta["a"].mask == [False, True])
assert np.isclose(ta["a"][0].cxcsec, 1.0)
assert not np.any(ta["b"].mask)
assert not np.any(ta["c"].mask)
# Check table ``mask`` property
tm = t.mask
assert np.all(tm["a"] == [False, True])
assert not np.any(tm["b"])
assert not np.any(tm["c"])
def test_masked_column_with_unit_in_qtable():
"""Test that adding a MaskedColumn with a unit to QTable creates a MaskedQuantity."""
MaskedQuantity = Masked(u.Quantity)
t = QTable()
t["a"] = MaskedColumn([1, 2])
assert isinstance(t["a"], MaskedColumn)
t["b"] = MaskedColumn([1, 2], unit=u.m)
assert isinstance(t["b"], MaskedQuantity)
assert not np.any(t["b"].mask)
t["c"] = MaskedColumn([1, 2], unit=u.m, mask=[True, False])
assert isinstance(t["c"], MaskedQuantity)
assert np.all(t["c"].mask == [True, False])
# Regular Column is still converted to regular Quantity
t["d"] = Column([1, 2], unit=u.cm)
assert not isinstance(t["d"], MaskedQuantity)
assert isinstance(t["d"], u.Quantity)
# But not if the table is masked.
t2 = QTable(t, masked=True)
assert isinstance(t2["d"], MaskedQuantity)
t2["e"] = Column([1, 2], unit=u.cm)
assert isinstance(t2["e"], MaskedQuantity)
assert not np.any(t2["e"].mask)
def test_masked_quantity_in_table():
MaskedQuantity = Masked(u.Quantity)
t = Table()
t["b"] = MaskedQuantity([1, 2], unit=u.m)
assert isinstance(t["b"], MaskedColumn)
assert not np.any(t["b"].mask)
t["c"] = MaskedQuantity([1, 2], unit=u.m, mask=[True, False])
assert isinstance(t["c"], MaskedColumn)
assert np.all(t["c"].mask == [True, False])
t2 = Table(t, masked=True)
t2["d"] = u.Quantity([1, 2], unit=u.cm)
assert isinstance(t2["d"], MaskedColumn)
assert not np.any(t2["d"].mask)
def test_masked_column_data_attribute_is_plain_masked_array():
c = MaskedColumn([1, 2], mask=[False, True])
c_data = c.data
assert type(c_data) is np.ma.MaskedArray
assert type(c_data.data) is np.ndarray
def test_mask_slicing_count_array_finalize():
"""Check that we don't finalize MaskedColumn too often.
Regression test for gh-6721.
"""
# Create a new BaseColumn class that counts how often
# ``__array_finalize__`` is called.
class MyBaseColumn(BaseColumn):
counter = 0
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
MyBaseColumn.counter += 1
# Base a new MaskedColumn class on it. The normal MaskedColumn
# hardcodes the initialization to BaseColumn, so we exchange that.
class MyMaskedColumn(MaskedColumn, Column, MyBaseColumn):
def __new__(cls, *args, **kwargs):
self = super().__new__(cls, *args, **kwargs)
self._baseclass = MyBaseColumn
return self
# Creation really needs 2 finalizations (once for the BaseColumn
# call inside ``__new__`` and once when the view as a MaskedColumn
# is taken), but since the first is hardcoded, we do not capture it
# and thus the count is only 1.
c = MyMaskedColumn([1, 2], mask=[False, True])
assert MyBaseColumn.counter == 1
# slicing should need only one ``__array_finalize__`` (used to be 3).
c0 = c[:]
assert MyBaseColumn.counter == 2
# repr should need none (used to be 2!!)
repr(c0)
assert MyBaseColumn.counter == 2
def test_set_masked_bytes_column():
mask = [True, False, True]
mc = MaskedColumn([b"a", b"b", b"c"], mask=mask)
mc[:] = mc
assert (mc.mask == mask).all()
def test_qtable_masked_true_basics():
# Explicit regression test for gh-16495.
tab = QTable([[1, 1] * u.mJy], names=["test"], masked=True)
assert isinstance(tab["test"], Masked)
assert isinstance(tab["test"], u.Quantity)
assert not np.any(tab["test"].mask)
tab["test"].mask[0] = True
assert_array_equal(tab["test"].mask, [True, False])
tab["test"].mask |= [True, True]
assert_array_equal(tab["test"].mask, [True, True])
|
TestAddRow
|
python
|
bokeh__bokeh
|
src/bokeh/core/property/enum.py
|
{
"start": 1474,
"end": 5169
}
|
class ____(Either):
""" Accept values from enumerations.
The first value in enumeration is used as the default value, unless the
``default`` keyword argument is used.
See :ref:`bokeh.core.enums` for more information.
"""
_enum: enums.Enumeration
@overload
def __init__(self, enum: enums.Enumeration, *, default: Init[str] = ..., help: str | None = ...) -> None: ...
@overload
def __init__(self, enum: str, *values: str, default: Init[str] = ..., help: str | None = ...) -> None: ...
@overload
def __init__(self, enum: enums.Enumeration, *, default: Init[int] = ..., help: str | None = ...) -> None: ...
@overload
def __init__(self, enum: int, *values: int, default: Init[int] = ..., help: str | None = ...) -> None: ...
def __init__(self, enum: str | int | enums.Enumeration, *values: str | int, default: Init[str | int] = Intrinsic, help: str | None = None) -> None:
if isinstance(enum, (str, int)):
self._enum = enums.enumeration(enum, *values)
elif values:
raise ValueError("unexpected enum values")
else:
self._enum = enum
default = default if default is not Intrinsic else self._enum._default
super().__init__(String, Int, default=default, help=help)
def __call__(self, *, default: Init[str | int] = Intrinsic, help: str | None = None) -> Enum:
""" Clone this property and allow to override ``default`` and ``help``. """
default = self._default if default is Intrinsic else default
help = self._help if help is None else help
prop = self.__class__(self._enum, default=default, help=help)
prop.alternatives = list(self.alternatives)
prop.assertions = list(self.assertions)
return prop
def __str__(self) -> str:
class_name = self.__class__.__name__
allowed_values = ", ".join(repr(x) for x in self.allowed_values)
return f"{class_name}({allowed_values})"
@property
def allowed_values(self) -> list[str | int]:
return self._enum._values
def validate(self, value: Any, detail: bool = True) -> None:
super().validate(value, detail)
if value in self._enum:
return
from ...util.strings import nice_join
msg = "" if not detail else f"invalid value: {value!r}; allowed values are {nice_join(self.allowed_values)}"
raise ValueError(msg)
# override replace so that .replace() doesn't descend this type
def replace(self, old: type[Property[Any]], new: Property[Any]) -> Property[Any]:
if self.__class__ == old:
return new
else:
return self
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
@register_type_link(Enum)
def _sphinx_type(obj: Enum) -> str:
# try to return a link to a proper enum in bokeh.core.enums if possible
if obj._enum in enums.__dict__.values():
for name, value in enums.__dict__.items():
if obj._enum is value:
fullname = f"{obj._enum.__module__}.{name}"
return f"{property_link(obj)}({model_link(fullname)})"
# otherwise just a basic str name format
return f"{property_link(obj)}({obj._enum})"
|
Enum
|
python
|
pandas-dev__pandas
|
pandas/core/groupby/ops.py
|
{
"start": 2563,
"end": 18522
}
|
class ____:
"""
Dispatch logic for functions defined in _libs.groupby
Parameters
----------
kind: str
Whether the operation is an aggregate or transform.
how: str
Operation name, e.g. "mean".
has_dropped_na: bool
True precisely when dropna=True and the grouper contains a null value.
"""
# Functions for which we do _not_ attempt to cast the cython result
# back to the original dtype.
cast_blocklist = frozenset(
["any", "all", "rank", "count", "size", "idxmin", "idxmax"]
)
def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None:
self.kind = kind
self.how = how
self.has_dropped_na = has_dropped_na
_CYTHON_FUNCTIONS: dict[str, dict] = {
"aggregate": {
"any": functools.partial(libgroupby.group_any_all, val_test="any"),
"all": functools.partial(libgroupby.group_any_all, val_test="all"),
"sum": "group_sum",
"prod": "group_prod",
"idxmin": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmin"),
"idxmax": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmax"),
"min": "group_min",
"max": "group_max",
"mean": "group_mean",
"median": "group_median_float64",
"var": "group_var",
"std": functools.partial(libgroupby.group_var, name="std"),
"sem": functools.partial(libgroupby.group_var, name="sem"),
"skew": "group_skew",
"kurt": "group_kurt",
"first": "group_nth",
"last": "group_last",
"ohlc": "group_ohlc",
},
"transform": {
"cumprod": "group_cumprod",
"cumsum": "group_cumsum",
"cummin": "group_cummin",
"cummax": "group_cummax",
"rank": "group_rank",
},
}
_cython_arity = {"ohlc": 4} # OHLC
@classmethod
def get_kind_from_how(cls, how: str) -> str:
if how in cls._CYTHON_FUNCTIONS["aggregate"]:
return "aggregate"
return "transform"
# Note: we make this a classmethod and pass kind+how so that caching
# works at the class level and not the instance level
@classmethod
@functools.cache
def _get_cython_function(
cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool
):
dtype_str = dtype.name
ftype = cls._CYTHON_FUNCTIONS[kind][how]
# see if there is a fused-type version of function
# only valid for numeric
if callable(ftype):
f = ftype
else:
f = getattr(libgroupby, ftype)
if is_numeric:
return f
elif dtype == np.dtype(object):
if how in ["median", "cumprod"]:
# no fused types -> no __signatures__
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{dtype_str}]"
)
elif how in ["std", "sem", "idxmin", "idxmax"]:
# We have a partial object that does not have __signatures__
return f
elif how in ["skew", "kurt"]:
# _get_cython_vals will convert to float64
pass
elif "object" not in f.__signatures__:
# raise NotImplementedError here rather than TypeError later
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{dtype_str}]"
)
return f
else:
raise NotImplementedError(
"This should not be reached. Please report a bug at "
"github.com/pandas-dev/pandas/",
dtype,
)
def _get_cython_vals(self, values: np.ndarray) -> np.ndarray:
"""
Cast numeric dtypes to float64 for functions that only support that.
Parameters
----------
values : np.ndarray
Returns
-------
values : np.ndarray
"""
how = self.how
if how in ["median", "std", "sem", "skew", "kurt"]:
# median only has a float64 implementation
# We should only get here with is_numeric, as non-numeric cases
# should raise in _get_cython_function
values = ensure_float64(values)
elif values.dtype.kind in "iu":
if how in ["var", "mean"] or (
self.kind == "transform" and self.has_dropped_na
):
# has_dropped_na check need for test_null_group_str_transformer
# result may still include NaN, so we have to cast
values = ensure_float64(values)
elif how in ["sum", "ohlc", "prod", "cumsum", "cumprod"]:
# Avoid overflow during group op
if values.dtype.kind == "i":
values = ensure_int64(values)
else:
values = ensure_uint64(values)
return values
def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
how = self.how
kind = self.kind
arity = self._cython_arity.get(how, 1)
out_shape: Shape
if how == "ohlc":
out_shape = (ngroups, arity)
elif arity > 1:
raise NotImplementedError(
"arity of more than 1 is not supported for the 'how' argument"
)
elif kind == "transform":
out_shape = values.shape
else:
out_shape = (ngroups,) + values.shape[1:]
return out_shape
def _get_out_dtype(self, dtype: np.dtype) -> np.dtype:
how = self.how
if how == "rank":
out_dtype = "float64"
elif how in ["idxmin", "idxmax"]:
# The Cython implementation only produces the row number; we'll take
# from the index using this in post processing
out_dtype = "intp"
else:
if dtype.kind in "iufcb":
out_dtype = f"{dtype.kind}{dtype.itemsize}"
else:
out_dtype = "object"
return np.dtype(out_dtype)
def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : np.dtype
Returns
-------
np.dtype
The desired dtype of the result.
"""
how = self.how
if how in ["sum", "cumsum", "sum", "prod", "cumprod"]:
if dtype == np.dtype(bool):
return np.dtype(np.int64)
elif how in ["mean", "median", "var", "std", "sem"]:
if dtype.kind in "fc":
return dtype
elif dtype.kind in "iub":
return np.dtype(np.float64)
return dtype
@final
def _cython_op_ndim_compat(
self,
values: np.ndarray,
*,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
mask: npt.NDArray[np.bool_] | None = None,
result_mask: npt.NDArray[np.bool_] | None = None,
initial: Any = 0,
**kwargs,
) -> np.ndarray:
if values.ndim == 1:
# expand to 2d, dispatch, then squeeze if appropriate
values2d = values[None, :]
if mask is not None:
mask = mask[None, :]
if result_mask is not None:
result_mask = result_mask[None, :]
res = self._call_cython_op(
values2d,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
result_mask=result_mask,
initial=initial,
**kwargs,
)
if res.shape[0] == 1:
return res[0]
# otherwise we have OHLC
return res.T
return self._call_cython_op(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
result_mask=result_mask,
initial=initial,
**kwargs,
)
@final
def _call_cython_op(
self,
values: np.ndarray, # np.ndarray[ndim=2]
*,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
mask: npt.NDArray[np.bool_] | None,
result_mask: npt.NDArray[np.bool_] | None,
initial: Any = 0,
**kwargs,
) -> np.ndarray: # np.ndarray[ndim=2]
orig_values = values
dtype = values.dtype
is_numeric = dtype.kind in "iufcb"
is_datetimelike = dtype.kind in "mM"
if self.how in ["any", "all"]:
if mask is None:
mask = isna(values)
if is_datetimelike:
values = values.view("int64")
is_numeric = True
elif dtype.kind == "b":
values = values.view("uint8")
if values.dtype == "float16":
values = values.astype(np.float32)
if self.how in ["any", "all"]:
if dtype == object:
if kwargs["skipna"]:
# GH#37501: don't raise on pd.NA when skipna=True
if mask is not None and mask.any():
# mask on original values computed separately
values = values.copy()
values[mask] = True
values = values.astype(bool, copy=False).view(np.int8)
is_numeric = True
values = values.T
if mask is not None:
mask = mask.T
if result_mask is not None:
result_mask = result_mask.T
out_shape = self._get_output_shape(ngroups, values)
func = self._get_cython_function(self.kind, self.how, values.dtype, is_numeric)
values = self._get_cython_vals(values)
out_dtype = self._get_out_dtype(values.dtype)
result = maybe_fill(np.empty(out_shape, dtype=out_dtype))
if self.kind == "aggregate":
counts = np.zeros(ngroups, dtype=np.int64)
if self.how in [
"idxmin",
"idxmax",
"min",
"max",
"mean",
"last",
"first",
"sum",
"median",
]:
if self.how == "sum":
# pass in through kwargs only for sum (other functions don't have
# the keyword)
kwargs["initial"] = initial
func(
out=result,
counts=counts,
values=values,
labels=comp_ids,
min_count=min_count,
mask=mask,
result_mask=result_mask,
is_datetimelike=is_datetimelike,
**kwargs,
)
elif self.how in ["sem", "std", "var", "ohlc", "prod"]:
if self.how in ["std", "sem"]:
kwargs["is_datetimelike"] = is_datetimelike
func(
result,
counts,
values,
comp_ids,
min_count=min_count,
mask=mask,
result_mask=result_mask,
**kwargs,
)
elif self.how in ["any", "all"]:
func(
out=result,
values=values,
labels=comp_ids,
mask=mask,
result_mask=result_mask,
**kwargs,
)
result = result.astype(bool, copy=False)
elif self.how in ["skew", "kurt"]:
func(
out=result,
counts=counts,
values=values,
labels=comp_ids,
mask=mask,
result_mask=result_mask,
**kwargs,
)
if dtype == object:
result = result.astype(object)
else:
raise NotImplementedError(f"{self.how} is not implemented")
else:
# TODO: min_count
if self.how != "rank":
# TODO: should rank take result_mask?
kwargs["result_mask"] = result_mask
func(
out=result,
values=values,
labels=comp_ids,
ngroups=ngroups,
is_datetimelike=is_datetimelike,
mask=mask,
**kwargs,
)
if self.kind == "aggregate" and self.how not in ["idxmin", "idxmax"]:
# i.e. counts is defined. Locations where count<min_count
# need to have the result set to np.nan, which may require casting,
# see GH#40767. For idxmin/idxmax is handled specially via post-processing
if result.dtype.kind in "iu" and not is_datetimelike:
# if the op keeps the int dtypes, we have to use 0
cutoff = max(0 if self.how in ["sum", "prod"] else 1, min_count)
empty_groups = counts < cutoff
if empty_groups.any():
if result_mask is not None:
assert result_mask[empty_groups].all()
else:
# Note: this conversion could be lossy, see GH#40767
result = result.astype("float64")
result[empty_groups] = np.nan
result = result.T
if self.how not in self.cast_blocklist:
# e.g. if we are int64 and need to restore to datetime64/timedelta64
# "rank" is the only member of cast_blocklist we get here
# Casting only needed for float16, bool, datetimelike,
# and self.how in ["sum", "prod", "ohlc", "cumprod"]
res_dtype = self._get_result_dtype(orig_values.dtype)
op_result = maybe_downcast_to_dtype(result, res_dtype)
else:
op_result = result
return op_result
@final
def _validate_axis(self, axis: AxisInt, values: ArrayLike) -> None:
if values.ndim > 2:
raise NotImplementedError("number of dimensions is currently limited to 2")
if values.ndim == 2:
assert axis == 1, axis
elif not is_1d_only_ea_dtype(values.dtype):
# Note: it is *not* the case that axis is always 0 for 1-dim values,
# as we can have 1D ExtensionArrays that we need to treat as 2D
assert axis == 0
@final
def cython_operation(
self,
*,
values: ArrayLike,
axis: AxisInt,
min_count: int = -1,
comp_ids: np.ndarray,
ngroups: int,
**kwargs,
) -> ArrayLike:
"""
Call our cython function, with appropriate pre- and post- processing.
"""
self._validate_axis(axis, values)
if not isinstance(values, np.ndarray):
# i.e. ExtensionArray
return values._groupby_op(
how=self.how,
has_dropped_na=self.has_dropped_na,
min_count=min_count,
ngroups=ngroups,
ids=comp_ids,
**kwargs,
)
return self._cython_op_ndim_compat(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=None,
**kwargs,
)
|
WrappedCythonOp
|
python
|
gevent__gevent
|
src/gevent/_abstract_linkable.py
|
{
"start": 1389,
"end": 1926
}
|
class ____(object):
__slots__ = (
'pending',
)
def __init__(self):
self.pending = False
def get_roots_and_hubs():
from gevent.hub import Hub # delay import
return {
x.parent: x
for x in get_objects()
# Make sure to only find hubs that have a loop
# and aren't destroyed. If we don't do that, we can
# get an old hub that no longer works leading to issues in
# combined test cases.
if isinstance(x, Hub) and x.loop is not None
}
|
_FakeNotifier
|
python
|
scrapy__scrapy
|
scrapy/utils/serialize.py
|
{
"start": 192,
"end": 1117
}
|
class ____(json.JSONEncoder):
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def default(self, o: Any) -> Any:
if isinstance(o, set):
return list(o)
if isinstance(o, datetime.datetime):
return o.strftime(f"{self.DATE_FORMAT} {self.TIME_FORMAT}")
if isinstance(o, datetime.date):
return o.strftime(self.DATE_FORMAT)
if isinstance(o, datetime.time):
return o.strftime(self.TIME_FORMAT)
if isinstance(o, decimal.Decimal):
return str(o)
if isinstance(o, defer.Deferred):
return str(o)
if isinstance(o, Request):
return f"<{type(o).__name__} {o.method} {o.url}>"
if isinstance(o, Response):
return f"<{type(o).__name__} {o.status} {o.url}>"
if is_item(o):
return ItemAdapter(o).asdict()
return super().default(o)
|
ScrapyJSONEncoder
|
python
|
django__django
|
django/contrib/gis/db/models/functions.py
|
{
"start": 20371,
"end": 20718
}
|
class ____(GeomOutputGeoFunc):
def __init__(self, expression, srid, **extra):
expressions = [
expression,
self._handle_param(srid, "srid", int),
]
if "output_field" not in extra:
extra["output_field"] = GeometryField(srid=srid)
super().__init__(*expressions, **extra)
|
Transform
|
python
|
Pylons__pyramid
|
src/pyramid/interfaces.py
|
{
"start": 52731,
"end": 52896
}
|
class ____(Interface):
def __call__(value, info):
"""
Create a a :class:`.IPredicate` instance for a specific value.
"""
|
IPredicateFactory
|
python
|
django__django
|
django/db/models/deletion.py
|
{
"start": 3406,
"end": 22130
}
|
class ____:
def __init__(self, using, origin=None, force_collection=False):
self.using = using
# A Model or QuerySet object.
self.origin = origin
# Force collecting objects for deletion on the Python-level.
self.force_collection = force_collection
# Initially, {model: {instances}}, later values become lists.
self.data = defaultdict(set)
# {(field, value): [instances, …]}
self.field_updates = defaultdict(list)
# {model: {field: {instances}}}
self.restricted_objects = defaultdict(partial(defaultdict, set))
# fast_deletes is a list of queryset-likes that can be deleted without
# fetching the objects into memory.
self.fast_deletes = []
# Tracks deletion-order dependency for databases without transactions
# or ability to defer constraint checks. Only concrete model classes
# should be included, as the dependencies exist only between actual
# database tables; proxy models are represented here by their concrete
# parent.
self.dependencies = defaultdict(set) # {model: {models}}
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
"""
Add 'objs' to the collection of objects to be deleted. If the call is
the result of a cascade, 'source' should be the model that caused it,
and 'nullable' should be set to True if the relation can be null.
Return a list of all objects that were not already collected.
"""
if not objs:
return []
new_objs = []
model = objs[0].__class__
instances = self.data[model]
for obj in objs:
if obj not in instances:
new_objs.append(obj)
instances.update(new_objs)
# Nullable relationships can be ignored -- they are nulled out before
# deleting, and therefore do not affect the order in which objects have
# to be deleted.
if source is not None and not nullable:
self.add_dependency(source, model, reverse_dependency=reverse_dependency)
return new_objs
def add_dependency(self, model, dependency, reverse_dependency=False):
if reverse_dependency:
model, dependency = dependency, model
self.dependencies[model._meta.concrete_model].add(
dependency._meta.concrete_model
)
self.data.setdefault(dependency, self.data.default_factory())
def add_field_update(self, field, value, objs):
"""
Schedule a field update. 'objs' must be a homogeneous iterable
collection of model instances (e.g. a QuerySet).
"""
self.field_updates[field, value].append(objs)
def add_restricted_objects(self, field, objs):
if objs:
model = objs[0].__class__
self.restricted_objects[model][field].update(objs)
def clear_restricted_objects_from_set(self, model, objs):
if model in self.restricted_objects:
self.restricted_objects[model] = {
field: items - objs
for field, items in self.restricted_objects[model].items()
}
def clear_restricted_objects_from_queryset(self, model, qs):
if model in self.restricted_objects:
objs = set(
qs.filter(
pk__in=[
obj.pk
for objs in self.restricted_objects[model].values()
for obj in objs
]
)
)
self.clear_restricted_objects_from_set(model, objs)
def _has_signal_listeners(self, model):
return signals.pre_delete.has_listeners(
model
) or signals.post_delete.has_listeners(model)
def can_fast_delete(self, objs, from_field=None):
"""
Determine if the objects in the given queryset-like or single object
can be fast-deleted. This can be done if there are no cascades, no
parents and no signal listeners for the object class.
The 'from_field' tells where we are coming from - we need this to
determine if the objects are in fact to be deleted. Allow also
skipping parent -> child -> parent chain preventing fast delete of
the child.
"""
if self.force_collection:
return False
if from_field and from_field.remote_field.on_delete is not CASCADE:
return False
if hasattr(objs, "_meta"):
model = objs._meta.model
elif hasattr(objs, "model") and hasattr(objs, "_raw_delete"):
model = objs.model
else:
return False
if self._has_signal_listeners(model):
return False
# The use of from_field comes from the need to avoid cascade back to
# parent when parent delete is cascading to child.
opts = model._meta
return (
all(
link == from_field
for link in opts.concrete_model._meta.parents.values()
)
and
# Foreign keys pointing to this model.
all(
related.field.remote_field.on_delete in SKIP_COLLECTION
for related in get_candidate_relations_to_delete(opts)
)
and (
# Something like generic foreign key.
not any(
hasattr(field, "bulk_related_objects")
for field in opts.private_fields
)
)
)
def get_del_batches(self, objs, fields):
"""
Return the objs in suitably sized batches for the used connection.
"""
conn_batch_size = max(
connections[self.using].ops.bulk_batch_size(fields, objs), 1
)
if len(objs) > conn_batch_size:
return [
objs[i : i + conn_batch_size]
for i in range(0, len(objs), conn_batch_size)
]
else:
return [objs]
def collect(
self,
objs,
source=None,
nullable=False,
collect_related=True,
source_attr=None,
reverse_dependency=False,
keep_parents=False,
fail_on_restricted=True,
):
"""
Add 'objs' to the collection of objects to be deleted as well as all
parent instances. 'objs' must be a homogeneous iterable collection of
model instances (e.g. a QuerySet). If 'collect_related' is True,
related objects will be handled by their respective on_delete handler.
If the call is the result of a cascade, 'source' should be the model
that caused it and 'nullable' should be set to True, if the relation
can be null.
If 'reverse_dependency' is True, 'source' will be deleted before the
current model, rather than after. (Needed for cascading to parent
models, the one case in which the cascade follows the forwards
direction of an FK rather than the reverse direction.)
If 'keep_parents' is True, data of parent model's will be not deleted.
If 'fail_on_restricted' is False, error won't be raised even if it's
prohibited to delete such objects due to RESTRICT, that defers
restricted object checking in recursive calls where the top-level call
may need to collect more objects to determine whether restricted ones
can be deleted.
"""
if self.can_fast_delete(objs):
self.fast_deletes.append(objs)
return
new_objs = self.add(
objs, source, nullable, reverse_dependency=reverse_dependency
)
if not new_objs:
return
model = new_objs[0].__class__
if not keep_parents:
# Recursively collect concrete model's parent models, but not their
# related objects. These will be found by meta.get_fields()
concrete_model = model._meta.concrete_model
for ptr in concrete_model._meta.parents.values():
if ptr:
parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
self.collect(
parent_objs,
source=model,
source_attr=ptr.remote_field.related_name,
collect_related=False,
reverse_dependency=True,
fail_on_restricted=False,
)
if not collect_related:
return
model_fast_deletes = defaultdict(list)
protected_objects = defaultdict(list)
for related in get_candidate_relations_to_delete(model._meta):
# Preserve parent reverse relationships if keep_parents=True.
if (
keep_parents
and related.model._meta.concrete_model in model._meta.all_parents
):
continue
field = related.field
on_delete = field.remote_field.on_delete
if on_delete in SKIP_COLLECTION:
if self.force_collection and (
forced_on_delete := getattr(on_delete, "forced_collector", None)
):
on_delete = forced_on_delete
else:
continue
related_model = related.related_model
if self.can_fast_delete(related_model, from_field=field):
model_fast_deletes[related_model].append(field)
continue
batches = self.get_del_batches(new_objs, [field])
for batch in batches:
sub_objs = self.related_objects(related_model, [field], batch)
# Non-referenced fields can be deferred if no signal receivers
# are connected for the related model as they'll never be
# exposed to the user. Skip field deferring when some
# relationships are select_related as interactions between both
# features are hard to get right. This should only happen in
# the rare cases where .related_objects is overridden anyway.
if not (
sub_objs.query.select_related
or self._has_signal_listeners(related_model)
):
referenced_fields = set(
chain.from_iterable(
(rf.attname for rf in rel.field.foreign_related_fields)
for rel in get_candidate_relations_to_delete(
related_model._meta
)
)
)
sub_objs = sub_objs.only(*tuple(referenced_fields))
if getattr(on_delete, "lazy_sub_objs", False) or sub_objs:
try:
on_delete(self, field, sub_objs, self.using)
except ProtectedError as error:
key = "'%s.%s'" % (field.model.__name__, field.name)
protected_objects[key] += error.protected_objects
if protected_objects:
raise ProtectedError(
"Cannot delete some instances of model %r because they are "
"referenced through protected foreign keys: %s."
% (
model.__name__,
", ".join(protected_objects),
),
set(chain.from_iterable(protected_objects.values())),
)
for related_model, related_fields in model_fast_deletes.items():
batches = self.get_del_batches(new_objs, related_fields)
for batch in batches:
sub_objs = self.related_objects(related_model, related_fields, batch)
self.fast_deletes.append(sub_objs)
for field in model._meta.private_fields:
if hasattr(field, "bulk_related_objects"):
# It's something like generic foreign key.
sub_objs = field.bulk_related_objects(new_objs, self.using)
self.collect(
sub_objs, source=model, nullable=True, fail_on_restricted=False
)
if fail_on_restricted:
# Raise an error if collected restricted objects (RESTRICT) aren't
# candidates for deletion also collected via CASCADE.
for related_model, instances in self.data.items():
self.clear_restricted_objects_from_set(related_model, instances)
for qs in self.fast_deletes:
self.clear_restricted_objects_from_queryset(qs.model, qs)
if self.restricted_objects.values():
restricted_objects = defaultdict(list)
for related_model, fields in self.restricted_objects.items():
for field, objs in fields.items():
if objs:
key = "'%s.%s'" % (related_model.__name__, field.name)
restricted_objects[key] += objs
if restricted_objects:
raise RestrictedError(
"Cannot delete some instances of model %r because "
"they are referenced through restricted foreign keys: "
"%s."
% (
model.__name__,
", ".join(restricted_objects),
),
set(chain.from_iterable(restricted_objects.values())),
)
def related_objects(self, related_model, related_fields, objs):
"""
Get a QuerySet of the related model to objs via related fields.
"""
predicate = query_utils.Q.create(
[(f"{related_field.name}__in", objs) for related_field in related_fields],
connector=query_utils.Q.OR,
)
return related_model._base_manager.using(self.using).filter(predicate)
def instances_with_model(self):
for model, instances in self.data.items():
for obj in instances:
yield model, obj
def sort(self):
sorted_models = []
concrete_models = set()
models = list(self.data)
while len(sorted_models) < len(models):
found = False
for model in models:
if model in sorted_models:
continue
dependencies = self.dependencies.get(model._meta.concrete_model)
if not (dependencies and dependencies.difference(concrete_models)):
sorted_models.append(model)
concrete_models.add(model._meta.concrete_model)
found = True
if not found:
return
self.data = {model: self.data[model] for model in sorted_models}
def delete(self):
# sort instance collections
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer constraint checks until
# the end of a transaction.
self.sort()
# number of objects deleted for each model label
deleted_counter = Counter()
# Optimize for the case with a single obj and no dependencies
if len(self.data) == 1 and len(instances) == 1:
instance = list(instances)[0]
if self.can_fast_delete(instance):
with transaction.mark_for_rollback_on_error(self.using):
count = sql.DeleteQuery(model).delete_batch(
[instance.pk], self.using
)
setattr(instance, model._meta.pk.attname, None)
return count, {model._meta.label: count}
with transaction.atomic(using=self.using, savepoint=False):
# send pre_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.pre_delete.send(
sender=model,
instance=obj,
using=self.using,
origin=self.origin,
)
# fast deletes
for qs in self.fast_deletes:
count = qs._raw_delete(using=self.using)
if count:
deleted_counter[qs.model._meta.label] += count
# update fields
for (field, value), instances_list in self.field_updates.items():
updates = []
objs = []
for instances in instances_list:
if (
isinstance(instances, models.QuerySet)
and instances._result_cache is None
):
updates.append(instances)
else:
objs.extend(instances)
if updates:
combined_updates = reduce(or_, updates)
combined_updates.update(**{field.name: value})
if objs:
model = objs[0].__class__
query = sql.UpdateQuery(model)
query.update_batch(
list({obj.pk for obj in objs}), {field.name: value}, self.using
)
# reverse instance collections
for instances in self.data.values():
instances.reverse()
# delete instances
for model, instances in self.data.items():
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
count = query.delete_batch(pk_list, self.using)
if count:
deleted_counter[model._meta.label] += count
if not model._meta.auto_created:
for obj in instances:
signals.post_delete.send(
sender=model,
instance=obj,
using=self.using,
origin=self.origin,
)
for model, instances in self.data.items():
for instance in instances:
setattr(instance, model._meta.pk.attname, None)
return sum(deleted_counter.values()), dict(deleted_counter)
|
Collector
|
python
|
huggingface__transformers
|
tests/models/blip_2/test_modeling_blip_2.py
|
{
"start": 45959,
"end": 49172
}
|
class ____:
def __init__(self, parent, vision_kwargs=None, qformer_kwargs=None, is_training=True):
if vision_kwargs is None:
vision_kwargs = {}
if qformer_kwargs is None:
qformer_kwargs = {"use_qformer_text_input": True}
self.parent = parent
self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs)
self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs)
self.is_training = is_training
self.num_hidden_layers = self.vision_model_tester.num_hidden_layers
self.num_attention_heads = self.vision_model_tester.num_attention_heads
self.seq_length = self.vision_model_tester.seq_length
self.hidden_size = self.vision_model_tester.hidden_size
self.batch_size = self.vision_model_tester.batch_size # need bs for batching_equivalence test
def get_config(self):
return Blip2Config(
vision_config=self.vision_model_tester.get_config(),
qformer_config=self.qformer_model_tester.get_config(),
)
def prepare_config_and_inputs(self):
_, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
def create_and_check_model(self, config, pixel_values):
model = Blip2VisionModelWithProjection(config=config)
model.to(torch_device)
model.set_attn_implementation("eager")
model.eval()
with torch.no_grad():
result = model(pixel_values, output_attentions=True, output_hidden_states=True)
self.parent.assertEqual(
result.last_hidden_state.shape,
(
self.vision_model_tester.batch_size,
self.vision_model_tester.seq_length,
self.qformer_model_tester.hidden_size,
),
)
self.parent.assertEqual(
result.image_embeds.shape,
(
self.vision_model_tester.batch_size,
config.vision_config.hidden_size,
config.image_text_hidden_size,
),
)
with torch.no_grad():
result2 = model(
pixel_values,
return_dict=not config.use_return_dict,
output_attentions=True,
output_hidden_states=True,
)
self.parent.assertTrue(torch.allclose(result.image_embeds, result2[0]))
self.parent.assertTrue(torch.allclose(result.last_hidden_state, result2[1]))
self.parent.assertTrue(torch.allclose(result.hidden_states[0], result2[2][0]))
self.parent.assertTrue(torch.allclose(result.hidden_states[1], result2[2][1]))
self.parent.assertTrue(torch.allclose(result.attentions[0], result2[3][0]))
self.parent.assertTrue(torch.allclose(result.attentions[1], result2[3][1]))
@require_torch
|
Blip2VisionModelWithProjectionTester
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config.py
|
{
"start": 86516,
"end": 90122
}
|
class ____:
@staticmethod
def pq(
bit_compression: Optional[bool] = None,
centroids: Optional[int] = None,
encoder_distribution: Optional[PQEncoderDistribution] = None,
encoder_type: Optional[PQEncoderType] = None,
segments: Optional[int] = None,
training_limit: Optional[int] = None,
enabled: bool = True,
) -> _PQConfigUpdate:
"""Create a `_PQConfigUpdate` object to be used when updating the product quantization (PQ) configuration of Weaviate.
Use this method when defining the `quantizer` argument in the `vector_index` configuration in `collection.update()`.
Args:
See [the docs](https://weaviate.io/developers/weaviate/concepts/vector-index#hnsw-with-compression) for a more detailed view!
""" # noqa: D417 (missing argument descriptions in the docstring)
if bit_compression is not None:
_Warnings.bit_compression_in_pq_config()
return _PQConfigUpdate(
enabled=enabled,
centroids=centroids,
segments=segments,
trainingLimit=training_limit,
encoder=(
_PQEncoderConfigUpdate(type_=encoder_type, distribution=encoder_distribution)
if encoder_type is not None or encoder_distribution is not None
else None
),
)
@staticmethod
def bq(rescore_limit: Optional[int] = None, enabled: bool = True) -> _BQConfigUpdate:
"""Create a `_BQConfigUpdate` object to be used when updating the binary quantization (BQ) configuration of Weaviate.
Use this method when defining the `quantizer` argument in the `vector_index` configuration in `collection.update()`.
Args:
See [the docs](https://weaviate.io/developers/weaviate/concepts/vector-index#hnsw-with-compression) for a more detailed view!
""" # noqa: D417 (missing argument descriptions in the docstring)
return _BQConfigUpdate(rescoreLimit=rescore_limit, enabled=enabled)
@staticmethod
def sq(
rescore_limit: Optional[int] = None,
training_limit: Optional[int] = None,
enabled: bool = True,
) -> _SQConfigUpdate:
"""Create a `_SQConfigUpdate` object to be used when updating the scalar quantization (SQ) configuration of Weaviate.
Use this method when defining the `quantizer` argument in the `vector_index` configuration in `collection.update()`.
Args:
See [the docs](https://weaviate.io/developers/weaviate/concepts/vector-index#hnsw-with-compression) for a more detailed view!
""" # noqa: D417 (missing argument descriptions in the docstring)
return _SQConfigUpdate(
enabled=enabled, rescoreLimit=rescore_limit, trainingLimit=training_limit
)
@staticmethod
def rq(
rescore_limit: Optional[int] = None,
enabled: bool = True,
bits: Optional[int] = None,
) -> _RQConfigUpdate:
"""Create a `_RQConfigUpdate` object to be used when updating the Rotational quantization (RQ) configuration of Weaviate.
Use this method when defining the `quantizer` argument in the `vector_index` configuration in `collection.update()`.
Arguments:
See [the docs](https://weaviate.io/developers/weaviate/concepts/vector-index#hnsw-with-compression) for a more detailed view!
""" # noqa: D417 (missing argument descriptions in the docstring)
return _RQConfigUpdate(enabled=enabled, rescoreLimit=rescore_limit, bits=bits)
|
_VectorIndexQuantizerUpdate
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/tasks.py
|
{
"start": 224977,
"end": 230665
}
|
class ____(Request):
"""
Add or update task hyper parameters
:param task: Task ID
:type task: str
:param hyperparams: Task hyper parameters. The new ones will be added and the
already existing ones will be updated
:type hyperparams: Sequence[ParamsItem]
:param replace_hyperparams: Can be set to one of the following: 'all' - all the
hyper parameters will be replaced with the provided ones 'section' - the
sections that present in the new parameters will be replaced with the provided
parameters 'none' (the default value) - only the specific parameters will be
updated or added
:type replace_hyperparams: ReplaceHyperparamsEnum
:param force: If set to True then both new and running task hyper params can be
edited. Otherwise only the new task ones. Default is False
:type force: bool
"""
_service = "tasks"
_action = "edit_hyper_params"
_version = "2.13"
_schema = {
"definitions": {
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"replace_hyperparams_enum": {
"enum": ["none", "section", "all"],
"type": "string",
},
},
"properties": {
"force": {
"description": "If set to True then both new and running task hyper params can be edited. Otherwise only the new task ones. Default is False",
"type": "boolean",
},
"hyperparams": {
"description": "Task hyper parameters. The new ones will be added and the already existing ones will be updated",
"items": {"$ref": "#/definitions/params_item"},
"type": "array",
},
"replace_hyperparams": {
"$ref": "#/definitions/replace_hyperparams_enum",
"description": "Can be set to one of the following:\n 'all' - all the hyper parameters will be replaced with the provided ones\n 'section' - the sections that present in the new parameters will be replaced with the provided parameters\n 'none' (the default value) - only the specific parameters will be updated or added",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "hyperparams"],
"type": "object",
}
def __init__(
self,
task: str,
hyperparams: List[Any],
replace_hyperparams: Any = None,
force: Optional[bool] = None,
**kwargs: Any
) -> None:
super(EditHyperParamsRequest, self).__init__(**kwargs)
self.task = task
self.hyperparams = hyperparams
self.replace_hyperparams = replace_hyperparams
self.force = force
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("hyperparams")
def hyperparams(self) -> List[Any]:
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value: List[Any]) -> None:
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", (dict, ParamsItem), is_array=True)
value = [ParamsItem(**v) if isinstance(v, dict) else v for v in value]
self._property_hyperparams = value
@schema_property("replace_hyperparams")
def replace_hyperparams(self) -> Any:
return self._property_replace_hyperparams
@replace_hyperparams.setter
def replace_hyperparams(self, value: Any) -> None:
if value is None:
self._property_replace_hyperparams = None
return
if isinstance(value, six.string_types):
try:
value = ReplaceHyperparamsEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "replace_hyperparams", enum.Enum)
self._property_replace_hyperparams = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
|
EditHyperParamsRequest
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/implementation/utils.py
|
{
"start": 20074,
"end": 21752
}
|
class ____(Exception):
# The `error` arg here should be a Graphene type implementing the interface `GrapheneError`, but
# this is not trackable by the Python type system.
def __init__(self, error: Any):
self.error = error
message = "[{cls}] {message}".format(
cls=error.__class__.__name__,
message=error.message if hasattr(error, "message") else None,
)
super().__init__(message)
def pipeline_selector_from_graphql(data: Mapping[str, Any]) -> JobSubsetSelector:
asset_selection = cast("Optional[Iterable[dict[str, list[str]]]]", data.get("assetSelection"))
asset_check_selection = cast(
"Optional[Iterable[dict[str, Any]]]", data.get("assetCheckSelection")
)
return JobSubsetSelector(
location_name=data["repositoryLocationName"],
repository_name=data["repositoryName"],
job_name=data.get("pipelineName") or data.get("jobName"), # type: ignore
op_selection=data.get("solidSelection"),
asset_selection=(
[AssetKey.from_graphql_input(asset_key) for asset_key in asset_selection]
if asset_selection
else None
),
asset_check_selection=(
[AssetCheckKey.from_graphql_input(asset_check) for asset_check in asset_check_selection]
if asset_check_selection is not None
else None
),
)
def graph_selector_from_graphql(data: Mapping[str, Any]) -> GraphSelector:
return GraphSelector(
location_name=data["repositoryLocationName"],
repository_name=data["repositoryName"],
graph_name=data["graphName"],
)
|
UserFacingGraphQLError
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/core/ag_ctx.py
|
{
"start": 1700,
"end": 1774
}
|
class ____(enum.Enum):
UNSPECIFIED = 0
ENABLED = 1
DISABLED = 2
|
Status
|
python
|
scipy__scipy
|
scipy/optimize/_optimize.py
|
{
"start": 4132,
"end": 5748
}
|
class ____(_RichResult):
"""
Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun : float
Value of objective function at `x`.
jac, hess : ndarray
Values of objective function's Jacobian and its Hessian at `x` (if
available). The Hessian may be an approximation, see the documentation
of the function in question.
hess_inv : object
Inverse of the objective function's Hessian; may be an approximation.
Not available for all solvers. The type of this attribute may be
either np.ndarray or scipy.sparse.linalg.LinearOperator.
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
Depending on the specific solver being used, `OptimizeResult` may
not have all attributes listed here, and they may have additional
attributes not listed here. Since this class is essentially a
subclass of dict with attribute accessors, one can see which
attributes are available using the `OptimizeResult.keys` method.
"""
pass
|
OptimizeResult
|
python
|
jazzband__django-model-utils
|
model_utils/models.py
|
{
"start": 4350,
"end": 5915
}
|
class ____(models.Model):
"""
An abstract base class model with a ``is_removed`` field that
marks entries that are not going to be used anymore, but are
kept in db for any reason.
Default manager returns only not-removed entries.
"""
is_removed = models.BooleanField(default=False)
class Meta:
abstract = True
objects: models.Manager[SoftDeletableModel] = SoftDeletableManager(_emit_deprecation_warnings=True)
available_objects: models.Manager[SoftDeletableModel] = SoftDeletableManager()
all_objects = models.Manager()
# Note that soft delete does not return anything,
# which doesn't conform to Django's interface.
# https://github.com/jazzband/django-model-utils/issues/541
@overload # type: ignore[override]
def delete(
self, using: Any = None, *args: Any, soft: Literal[True] = True, **kwargs: Any
) -> None:
...
@overload
def delete(
self, using: Any = None, *args: Any, soft: Literal[False], **kwargs: Any
) -> tuple[int, dict[str, int]]:
...
def delete(
self, using: Any = None, *args: Any, soft: bool = True, **kwargs: Any
) -> tuple[int, dict[str, int]] | None:
"""
Soft delete object (set its ``is_removed`` field to True).
Actually delete object if setting ``soft`` to False.
"""
if soft:
self.is_removed = True
self.save(using=using)
return None
else:
return super().delete(using, *args, **kwargs)
|
SoftDeletableModel
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-stripe/unit_tests/integration/test_events.py
|
{
"start": 2706,
"end": 9876
}
|
class ____(TestCase):
@HttpMocker()
def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_a_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_a_response().with_record(_a_record()).with_record(_a_record()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 2
@HttpMocker()
def test_given_many_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_a_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_a_response().with_pagination().with_record(_a_record().with_id("last_record_id_from_first_page")).build(),
)
http_mocker.get(
_a_request()
.with_starting_after("last_record_id_from_first_page")
.with_created_gte(_A_START_DATE)
.with_created_lte(_NOW)
.with_limit(100)
.build(),
_a_response().with_record(_a_record()).with_record(_a_record()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 3
@HttpMocker()
def test_given_start_date_before_30_days_stripe_limit_and_slice_range_when_read_then_perform_request_before_30_days(
self, http_mocker: HttpMocker
) -> None:
"""
This case is special because the source queries for a time range that is before 30 days. That being said as of 2023-12-13, the API
mentions that "We only guarantee access to events through the Retrieve Event API for 30 days." (see
https://stripe.com/docs/api/events)
"""
start_date = _NOW - timedelta(days=61)
slice_range = timedelta(days=30)
slice_datetime = start_date + slice_range
http_mocker.get( # this first request has both gte and lte before 30 days even though we know there should not be records returned
_a_request()
.with_created_gte(start_date)
.with_created_lte(slice_datetime - _AVOIDING_INCLUSIVE_BOUNDARIES)
.with_limit(100)
.build(),
_a_response().build(),
)
http_mocker.get(
_a_request()
.with_created_gte(slice_datetime)
.with_created_lte(slice_datetime + slice_range - _AVOIDING_INCLUSIVE_BOUNDARIES)
.with_limit(100)
.build(),
_a_response().build(),
)
http_mocker.get(
_a_request().with_created_gte(slice_datetime + slice_range).with_created_lte(_NOW).with_limit(100).build(),
_a_response().build(),
)
self._read(_config().with_start_date(start_date).with_slice_range_in_days(slice_range.days))
# request matched http_mocker
@HttpMocker()
def test_given_lookback_window_when_read_then_request_before_start_date(self, http_mocker: HttpMocker) -> None:
start_date = _NOW - timedelta(days=30)
lookback_window = timedelta(days=10)
http_mocker.get(
_a_request().with_created_gte(start_date).with_created_lte(_NOW).with_limit(100).build(),
_a_response().build(),
)
self._read(_config().with_start_date(start_date).with_lookback_window_in_days(lookback_window.days))
# request matched http_mocker
@HttpMocker()
def test_given_slice_range_when_read_then_perform_multiple_requests(self, http_mocker: HttpMocker) -> None:
start_date = _NOW - timedelta(days=30)
slice_range = timedelta(days=20)
slice_datetime = start_date + slice_range
http_mocker.get(
_a_request()
.with_created_gte(start_date)
.with_created_lte(slice_datetime - _AVOIDING_INCLUSIVE_BOUNDARIES)
.with_limit(100)
.build(),
_a_response().build(),
)
http_mocker.get(
_a_request().with_created_gte(slice_datetime).with_created_lte(_NOW).with_limit(100).build(),
_a_response().build(),
)
self._read(_config().with_start_date(start_date).with_slice_range_in_days(slice_range.days))
@HttpMocker()
def test_given_http_status_400_when_read_then_stream_did_not_run(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_a_request().with_any_query_params().build(),
a_response_with_status(400),
)
output = self._read(_config())
assert_stream_did_not_run(output, _STREAM_NAME, "Your account is not set up to use Issuing")
@HttpMocker()
def test_given_http_status_401_when_read_then_stream_is_incomplete(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_a_request().with_any_query_params().build(),
a_response_with_status(401),
)
output = self._read(_config().with_start_date(_A_START_DATE), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
@HttpMocker()
def test_given_rate_limited_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_a_request().with_any_query_params().build(),
[
a_response_with_status(429),
_a_response().with_record(_a_record()).build(),
],
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_once_before_200_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_a_request().with_any_query_params().build(),
[a_response_with_status(500), _a_response().with_record(_a_record()).build()],
)
output = self._read(_config())
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_when_read_then_raise_config_error(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_a_request().with_any_query_params().build(),
a_response_with_status(500),
)
with patch.object(HttpStatusErrorHandler, "max_retries", new=0):
output = self._read(_config(), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
@HttpMocker()
def test_when_read(self, http_mocker: HttpMocker) -> None:
request = _a_request().with_any_query_params().build()
http_mocker.get(
request,
_a_response().build(),
)
self._read(_config().with_start_date(_A_START_DATE))
http_mocker.assert_number_of_calls(request, 1) # one call for the actual read
def _read(self, config: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput:
return _read(config, SyncMode.full_refresh, expecting_exception=expecting_exception)
@freezegun.freeze_time(_NOW.isoformat())
|
FullRefreshTest
|
python
|
realpython__materials
|
python-313/typing/readonly.py
|
{
"start": 896,
"end": 1439
}
|
class ____(TypedDict):
version: str
release_year: ReadOnly[int]
# %% Work with Version and PythonVersion
#
def get_version_info(ver: Version) -> str:
if "release_year" in ver:
return f"Version {ver['version']} released in {ver['release_year']}"
else:
return f"Version {ver['version']}"
py313 = PythonVersion(version="3.13", release_year=2024)
# Alternative syntax, using TypedDict as an annotation
# py313: PythonVersion = {"version": "3.13", "release_year": 2024}
print(get_version_info(py313))
|
PythonVersion
|
python
|
django__django
|
tests/model_forms/tests.py
|
{
"start": 132701,
"end": 132891
}
|
class ____(ModelFormMetaclass):
def __new__(cls, name, bases, attrs):
new = super().__new__(cls, name, bases, attrs)
new.base_fields = {}
return new
|
CustomMetaclass
|
python
|
walkccc__LeetCode
|
solutions/826. Most Profit Assigning Work/826.py
|
{
"start": 0,
"end": 435
}
|
class ____:
def maxProfitAssignment(
self,
difficulty: list[int],
profit: list[int],
worker: list[int],
) -> int:
ans = 0
jobs = sorted(zip(difficulty, profit))
worker.sort(reverse=1)
i = 0
maxProfit = 0
for w in sorted(worker):
while i < len(jobs) and w >= jobs[i][0]:
maxProfit = max(maxProfit, jobs[i][1])
i += 1
ans += maxProfit
return ans
|
Solution
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.