language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
openai__openai-python
|
src/openai/types/beta/threads/runs/run_step_delta_message_delta.py
|
{
"start": 250,
"end": 390
}
|
class ____(BaseModel):
message_id: Optional[str] = None
"""The ID of the message that was created by this run step."""
|
MessageCreation
|
python
|
django__django
|
django/core/paginator.py
|
{
"start": 463,
"end": 511
}
|
class ____(InvalidPage):
pass
|
PageNotAnInteger
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/api/run.py
|
{
"start": 356,
"end": 591
}
|
class ____:
"""API for run metadata operations."""
client: IGraphQLClient
def get_run(self, run_id: str) -> "DgApiRun":
"""Get run metadata by ID."""
return get_run_via_graphql(self.client, run_id)
|
DgApiRunApi
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeVarTuple13.py
|
{
"start": 343,
"end": 560
}
|
class ____(Protocol[Unpack[Ts]]):
def __call__(self, *args: *Ts) -> tuple[Unpack[Ts]]: ...
def invoke_posonly(fn: CallbackPosOnly[Unpack[Ts]], *args: *Ts) -> tuple[Unpack[Ts]]:
return fn(*args)
|
CallbackPosOnly
|
python
|
kamyu104__LeetCode-Solutions
|
Python/generate-binary-strings-without-adjacent-zeros.py
|
{
"start": 641,
"end": 1043
}
|
class ____(object):
def validStrings(self, n):
"""
:type n: int
:rtype: List[str]
"""
q = [[]]
for _ in xrange(n):
new_q = []
for x in q:
if not x or x[-1] == '1':
new_q.append(x+['0'])
new_q.append(x+['1'])
q = new_q
return ["".join(x) for x in q]
|
Solution2
|
python
|
huggingface__transformers
|
src/transformers/models/d_fine/modeling_d_fine.py
|
{
"start": 91919,
"end": 95479
}
|
class ____(nn.Module):
def __init__(self, config: DFineConfig):
super().__init__()
self.normalize_before = config.normalize_before
# self-attention
self.self_attn = DFineMultiheadAttention(
embed_dim=config.encoder_hidden_dim,
num_heads=config.num_attention_heads,
dropout=config.dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(config.encoder_hidden_dim, eps=config.layer_norm_eps)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.encoder_activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(config.encoder_hidden_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, config.encoder_hidden_dim)
self.final_layer_norm = nn.LayerNorm(config.encoder_hidden_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_embeddings: Optional[torch.Tensor] = None,
output_attentions: bool = False,
**kwargs,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
position_embeddings (`torch.FloatTensor`, *optional*):
Object queries (also called content embeddings), to be added to the hidden states.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
if self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_embeddings=position_embeddings,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
if self.normalize_before:
hidden_states = self.final_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.final_layer_norm(hidden_states)
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
DFineEncoderLayer
|
python
|
django__django
|
django/contrib/sites/migrations/0001_initial.py
|
{
"start": 148,
"end": 1361
}
|
class ____(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="Site",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"domain",
models.CharField(
max_length=100,
verbose_name="domain name",
validators=[_simple_domain_name_validator],
),
),
("name", models.CharField(max_length=50, verbose_name="display name")),
],
options={
"ordering": ["domain"],
"db_table": "django_site",
"verbose_name": "site",
"verbose_name_plural": "sites",
},
bases=(models.Model,),
managers=[
("objects", django.contrib.sites.models.SiteManager()),
],
),
]
|
Migration
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/descriptor1.py
|
{
"start": 1281,
"end": 1437
}
|
class ____:
def __get__(self, instance: Any, owner: Any) -> int | None: ...
def __set__(self, owner: Any, value: int | None) -> None: ...
|
Descriptor1
|
python
|
plotly__plotly.py
|
plotly/graph_objs/_deprecations.py
|
{
"start": 11937,
"end": 12836
}
|
class ____(dict):
"""
plotly.graph_objs.Marker is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.Marker
- plotly.graph_objs.histogram.selected.Marker
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Marker is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.Marker
- plotly.graph_objs.histogram.selected.Marker
- etc.
"""
warnings.warn(
"""plotly.graph_objs.Marker is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.Marker
- plotly.graph_objs.histogram.selected.Marker
- etc.
""",
DeprecationWarning,
)
super().__init__(*args, **kwargs)
|
Marker
|
python
|
celery__celery
|
t/unit/contrib/test_migrate.py
|
{
"start": 1241,
"end": 1524
}
|
class ____:
def test_strtotal(self):
x = State()
assert x.strtotal == '?'
x.total_apx = 100
assert x.strtotal == '100'
def test_repr(self):
x = State()
assert repr(x)
x.filtered = 'foo'
assert repr(x)
|
test_State
|
python
|
pytorch__pytorch
|
torch/distributed/flight_recorder/components/types.py
|
{
"start": 5310,
"end": 12011
}
|
class ____:
"""
Util class to keep track of the state of an entry and standardize the way we
log the error info during analysis.
"""
def __init__(self, entry: dict[str, Any], expected_ranks: set[int]) -> None:
self.pg_name = entry["process_group"][0]
self.desc = entry["process_group"][1]
self.pg_desc = (
f"{self.pg_name}:{self.desc}" if self.desc != "undefined" else self.pg_name
)
self.profiling_name = entry["profiling_name"]
self.collective_seq_id = entry["collective_seq_id"]
self.p2p_seq_id = entry["p2p_seq_id"]
self.record_id = entry["record_id"]
self.input_sizes = entry["input_sizes"]
self.output_sizes = entry["output_sizes"]
self.collective_state = entry["state"]
self.collective_frames = entry.get("frames", [])
self.expected_ranks = expected_ranks
self.missing_ranks: set[int]
self.input_numel: int
self.output_numel: int
self.errors: set[tuple[int, MatchInfo]]
def log(
self,
logger: FlightRecorderLogger,
logger_msg: str,
frame_formatter: Any,
total_numel: Optional[tuple[int, int]] = None,
errors: Optional[set[tuple[int, MatchInfo]]] = None,
missing_ranks: Optional[set[int]] = None,
) -> None:
logger.info(
logger_msg,
self.collective_seq_id,
)
logger.info("internal record id: %s", self.record_id)
logger.info("group info: %s", self.pg_desc)
logger.info("collective: %s", self.profiling_name)
if missing_ranks:
self.missing_ranks = missing_ranks
logger.info("missing ranks: %s", missing_ranks)
if total_numel:
self.input_numel = total_numel[0]
self.output_numel = total_numel[1]
logger.info("total input numel: %d", total_numel[0])
logger.info("total output numel: %d", total_numel[1])
logger.info("input sizes: %s", self.input_sizes)
logger.info("output sizes: %s", self.output_sizes)
logger.info("world size: %d", len(self.expected_ranks))
logger.info("expected ranks: %s", str(self.expected_ranks))
logger.info("collective state: %s", self.collective_state)
if errors:
self.errors = errors
error_msg = ", ".join(
f"Culprit rank {error[0]}; {str(error[1])}" for error in errors
)
logger.info("error msg: %s", error_msg)
logger.info(
"collective stack trace: \n %s", frame_formatter(self.collective_frames)
)
def to_collective(
self,
id: int,
errors: Optional[set[tuple[int, MatchInfo]]] = None,
idx_map: Optional[dict[int, int]] = None,
all_entries: Optional[dict[int, list[dict[str, Any]]]] = None,
) -> Collective:
if not errors:
return Collective(
id=id,
group_id=self.pg_name,
record_id=self.record_id,
pg_desc=self.pg_desc,
pass_check=True,
collective_seq_id=self.collective_seq_id,
p2p_seq_id=self.p2p_seq_id,
collective_name=self.profiling_name,
input_sizes=self.input_sizes,
output_sizes=self.output_sizes,
expected_ranks=self.expected_ranks,
collective_state=self.collective_state,
collective_frames=self.collective_frames,
missing_ranks=getattr(self, "missing_ranks", None),
)
else:
assert idx_map is not None, "idx_map is None"
assert all_entries is not None, "all_entries is None"
mismatch_collectives = {}
for rank, error in errors:
idx = idx_map[rank]
entry = all_entries[rank][idx]
desc = entry["process_group"][1]
pg_name = entry["process_group"][0]
mismatch_collectives[rank] = Collective(
id=id,
group_id=entry["process_group"][0],
record_id=entry["record_id"],
pg_desc=f"{pg_name}:{desc}" if desc != "undefined" else pg_name,
pass_check=False,
collective_seq_id=entry["collective_seq_id"],
p2p_seq_id=entry["p2p_seq_id"],
collective_name=entry["profiling_name"],
input_sizes=entry["input_sizes"],
output_sizes=entry["output_sizes"],
expected_ranks=self.expected_ranks,
collective_state=entry["state"],
collective_frames=entry.get("frames", []),
type_of_mismatch=error,
)
return Collective(
id=id,
group_id=self.pg_name,
record_id=self.record_id,
pg_desc=self.pg_desc,
pass_check=False,
collective_seq_id=self.collective_seq_id,
p2p_seq_id=self.p2p_seq_id,
collective_name=self.profiling_name,
input_sizes=self.input_sizes,
output_sizes=self.output_sizes,
expected_ranks=self.expected_ranks,
collective_state=self.collective_state,
collective_frames=self.collective_frames,
input_numel=self.input_numel if hasattr(self, "input_numel") else None,
output_numel=self.output_numel
if hasattr(self, "output_numel")
else None,
missing_ranks=self.missing_ranks
if hasattr(self, "missing_ranks")
else None,
mismatch_collectives=mismatch_collectives,
)
def to_nccl_call(
self,
all_entries: dict[int, list[dict[str, Any]]],
idx_map: dict[int, int],
nccl_call_id: int,
collective_id: Any,
) -> list[NCCLCall]:
result = []
for i, k in idx_map.items():
all_entries[i].pop(k)
result.append(
NCCLCall(
id=nccl_call_id,
collective_id=collective_id,
group_id=self.pg_name, # type: ignore[arg-type]
global_rank=i,
traceback_id=0, # type: ignore[arg-type]
collective_type=self.profiling_name,
sizes=self.input_sizes,
)
)
nccl_call_id += 1
return result
|
EntryState
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 853616,
"end": 854047
}
|
class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("actor", "created_at", "issue")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
issue = sgqlc.types.Field(sgqlc.types.non_null(Issue), graphql_name="issue")
|
PinnedEvent
|
python
|
doocs__leetcode
|
solution/0600-0699/0641.Design Circular Deque/Solution.py
|
{
"start": 0,
"end": 2578
}
|
class ____:
def __init__(self, k: int):
"""
Initialize your data structure here. Set the size of the deque to be k.
"""
self.q = [0] * k
self.front = 0
self.size = 0
self.capacity = k
def insertFront(self, value: int) -> bool:
"""
Adds an item at the front of Deque. Return true if the operation is successful.
"""
if self.isFull():
return False
if not self.isEmpty():
self.front = (self.front - 1 + self.capacity) % self.capacity
self.q[self.front] = value
self.size += 1
return True
def insertLast(self, value: int) -> bool:
"""
Adds an item at the rear of Deque. Return true if the operation is successful.
"""
if self.isFull():
return False
idx = (self.front + self.size) % self.capacity
self.q[idx] = value
self.size += 1
return True
def deleteFront(self) -> bool:
"""
Deletes an item from the front of Deque. Return true if the operation is successful.
"""
if self.isEmpty():
return False
self.front = (self.front + 1) % self.capacity
self.size -= 1
return True
def deleteLast(self) -> bool:
"""
Deletes an item from the rear of Deque. Return true if the operation is successful.
"""
if self.isEmpty():
return False
self.size -= 1
return True
def getFront(self) -> int:
"""
Get the front item from the deque.
"""
if self.isEmpty():
return -1
return self.q[self.front]
def getRear(self) -> int:
"""
Get the last item from the deque.
"""
if self.isEmpty():
return -1
idx = (self.front + self.size - 1) % self.capacity
return self.q[idx]
def isEmpty(self) -> bool:
"""
Checks whether the circular deque is empty or not.
"""
return self.size == 0
def isFull(self) -> bool:
"""
Checks whether the circular deque is full or not.
"""
return self.size == self.capacity
# Your MyCircularDeque object will be instantiated and called as such:
# obj = MyCircularDeque(k)
# param_1 = obj.insertFront(value)
# param_2 = obj.insertLast(value)
# param_3 = obj.deleteFront()
# param_4 = obj.deleteLast()
# param_5 = obj.getFront()
# param_6 = obj.getRear()
# param_7 = obj.isEmpty()
# param_8 = obj.isFull()
|
MyCircularDeque
|
python
|
rapidsai__cudf
|
python/cudf_polars/cudf_polars/experimental/benchmarks/pdsh.py
|
{
"start": 31158,
"end": 56209
}
|
class ____:
"""PDS-H DuckDB query definitions."""
name: str = "pdsh"
@staticmethod
def q1(run_config: RunConfig) -> str:
"""Query 1."""
return """
select
l_returnflag,
l_linestatus,
sum(l_quantity) as sum_qty,
sum(l_extendedprice) as sum_base_price,
sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
avg(l_quantity) as avg_qty,
avg(l_extendedprice) as avg_price,
avg(l_discount) as avg_disc,
count(*) as count_order
from
lineitem
where
l_shipdate <= DATE '1998-09-02'
group by
l_returnflag,
l_linestatus
order by
l_returnflag,
l_linestatus
"""
@staticmethod
def q2(run_config: RunConfig) -> str:
"""Query 2."""
return """
select
s_acctbal,
s_name,
n_name,
p_partkey,
p_mfgr,
s_address,
s_phone,
s_comment
from
part,
supplier,
partsupp,
nation,
region
where
p_partkey = ps_partkey
and s_suppkey = ps_suppkey
and p_size = 15
and p_type like '%BRASS'
and s_nationkey = n_nationkey
and n_regionkey = r_regionkey
and r_name = 'EUROPE'
and ps_supplycost = (
select
min(ps_supplycost)
from
partsupp,
supplier,
nation,
region
where
p_partkey = ps_partkey
and s_suppkey = ps_suppkey
and s_nationkey = n_nationkey
and n_regionkey = r_regionkey
and r_name = 'EUROPE'
)
order by
s_acctbal desc,
n_name,
s_name,
p_partkey
limit 100
"""
@staticmethod
def q3(run_config: RunConfig) -> str:
"""Query 3."""
return """
select
l_orderkey,
sum(l_extendedprice * (1 - l_discount)) as revenue,
o_orderdate,
o_shippriority
from
customer,
orders,
lineitem
where
c_mktsegment = 'BUILDING'
and c_custkey = o_custkey
and l_orderkey = o_orderkey
and o_orderdate < '1995-03-15'
and l_shipdate > '1995-03-15'
group by
l_orderkey,
o_orderdate,
o_shippriority
order by
revenue desc,
o_orderdate
limit 10
"""
@staticmethod
def q4(run_config: RunConfig) -> str:
"""Query 4."""
return """
select
o_orderpriority,
count(*) as order_count
from
orders
where
o_orderdate >= timestamp '1993-07-01'
and o_orderdate < timestamp '1993-07-01' + interval '3' month
and exists (
select
*
from
lineitem
where
l_orderkey = o_orderkey
and l_commitdate < l_receiptdate
)
group by
o_orderpriority
order by
o_orderpriority
"""
@staticmethod
def q5(run_config: RunConfig) -> str:
"""Query 5."""
return """
select
n_name,
sum(l_extendedprice * (1 - l_discount)) as revenue
from
customer,
orders,
lineitem,
supplier,
nation,
region
where
c_custkey = o_custkey
and l_orderkey = o_orderkey
and l_suppkey = s_suppkey
and c_nationkey = s_nationkey
and s_nationkey = n_nationkey
and n_regionkey = r_regionkey
and r_name = 'ASIA'
and o_orderdate >= timestamp '1994-01-01'
and o_orderdate < timestamp '1994-01-01' + interval '1' year
group by
n_name
order by
revenue desc
"""
@staticmethod
def q6(run_config: RunConfig) -> str:
"""Query 6."""
return """
select
sum(l_extendedprice * l_discount) as revenue
from
lineitem
where
l_shipdate >= timestamp '1994-01-01'
and l_shipdate < timestamp '1994-01-01' + interval '1' year
and l_discount between .06 - 0.01 and .06 + 0.01
and l_quantity < 24
"""
@staticmethod
def q7(run_config: RunConfig) -> str:
"""Query 7."""
return """
select
supp_nation,
cust_nation,
l_year,
sum(volume) as revenue
from
(
select
n1.n_name as supp_nation,
n2.n_name as cust_nation,
year(l_shipdate) as l_year,
l_extendedprice * (1 - l_discount) as volume
from
supplier,
lineitem,
orders,
customer,
nation n1,
nation n2
where
s_suppkey = l_suppkey
and o_orderkey = l_orderkey
and c_custkey = o_custkey
and s_nationkey = n1.n_nationkey
and c_nationkey = n2.n_nationkey
and (
(n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY')
or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')
)
and l_shipdate between timestamp '1995-01-01' and timestamp '1996-12-31'
) as shipping
group by
supp_nation,
cust_nation,
l_year
order by
supp_nation,
cust_nation,
l_year
"""
@staticmethod
def q8(run_config: RunConfig) -> str:
"""Query 8."""
return """
select
o_year,
round(
sum(case
when nation = 'BRAZIL' then volume
else 0
end) / sum(volume)
, 2) as mkt_share
from
(
select
extract(year from o_orderdate) as o_year,
l_extendedprice * (1 - l_discount) as volume,
n2.n_name as nation
from
part,
supplier,
lineitem,
orders,
customer,
nation n1,
nation n2,
region
where
p_partkey = l_partkey
and s_suppkey = l_suppkey
and l_orderkey = o_orderkey
and o_custkey = c_custkey
and c_nationkey = n1.n_nationkey
and n1.n_regionkey = r_regionkey
and r_name = 'AMERICA'
and s_nationkey = n2.n_nationkey
and o_orderdate between timestamp '1995-01-01' and timestamp '1996-12-31'
and p_type = 'ECONOMY ANODIZED STEEL'
) as all_nations
group by
o_year
order by
o_year
"""
@staticmethod
def q9(run_config: RunConfig) -> str:
"""Query 9."""
return """
select
nation,
o_year,
round(sum(amount), 2) as sum_profit
from
(
select
n_name as nation,
year(o_orderdate) as o_year,
l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount
from
part,
supplier,
lineitem,
partsupp,
orders,
nation
where
s_suppkey = l_suppkey
and ps_suppkey = l_suppkey
and ps_partkey = l_partkey
and p_partkey = l_partkey
and o_orderkey = l_orderkey
and s_nationkey = n_nationkey
and p_name like '%green%'
) as profit
group by
nation,
o_year
order by
nation,
o_year desc
"""
@staticmethod
def q10(run_config: RunConfig) -> str:
"""Query 10."""
return """
select
c_custkey,
c_name,
round(sum(l_extendedprice * (1 - l_discount)), 2) as revenue,
c_acctbal,
n_name,
c_address,
c_phone,
c_comment
from
customer,
orders,
lineitem,
nation
where
c_custkey = o_custkey
and l_orderkey = o_orderkey
and o_orderdate >= date '1993-10-01'
and o_orderdate < date '1993-10-01' + interval '3' month
and l_returnflag = 'R'
and c_nationkey = n_nationkey
group by
c_custkey,
c_name,
c_acctbal,
c_phone,
n_name,
c_address,
c_comment
order by
revenue desc
limit 20
"""
@staticmethod
def q11(run_config: RunConfig) -> str:
"""Query 11."""
return f"""
select
ps_partkey,
round(sum(ps_supplycost * ps_availqty), 2) as value
from
partsupp, supplier, nation
where
ps_suppkey = s_suppkey
and s_nationkey = n_nationkey
and n_name = 'GERMANY'
group by
ps_partkey
having
sum(ps_supplycost * ps_availqty) > (
select
sum(ps_supplycost * ps_availqty) * {0.0001 / run_config.scale_factor}
from
partsupp, supplier, nation
where
ps_suppkey = s_suppkey
and s_nationkey = n_nationkey
and n_name = 'GERMANY'
)
order by
value desc
"""
@staticmethod
def q12(run_config: RunConfig) -> str:
"""Query 12."""
return """
select
l_shipmode,
sum(case
when o_orderpriority = '1-URGENT'
or o_orderpriority = '2-HIGH'
then 1
else 0
end) as high_line_count,
sum(case
when o_orderpriority <> '1-URGENT'
and o_orderpriority <> '2-HIGH'
then 1
else 0
end) as low_line_count
from
orders,
lineitem
where
o_orderkey = l_orderkey
and l_shipmode in ('MAIL', 'SHIP')
and l_commitdate < l_receiptdate
and l_shipdate < l_commitdate
and l_receiptdate >= date '1994-01-01'
and l_receiptdate < date '1994-01-01' + interval '1' year
group by
l_shipmode
order by
l_shipmode
"""
@staticmethod
def q13(run_config: RunConfig) -> str:
"""Query 13."""
return """
select
c_count, count(*) as custdist
from (
select
c_custkey,
count(o_orderkey)
from
customer left outer join orders on
c_custkey = o_custkey
and o_comment not like '%special%requests%'
group by
c_custkey
)as c_orders (c_custkey, c_count)
group by
c_count
order by
custdist desc,
c_count desc
"""
@staticmethod
def q14(run_config: RunConfig) -> str:
"""Query 14."""
return """
select
round(100.00 * sum(case
when p_type like 'PROMO%'
then l_extendedprice * (1 - l_discount)
else 0
end) / sum(l_extendedprice * (1 - l_discount)), 2) as promo_revenue
from
lineitem,
part
where
l_partkey = p_partkey
and l_shipdate >= date '1995-09-01'
and l_shipdate < date '1995-09-01' + interval '1' month
"""
@staticmethod
def q15(run_config: RunConfig) -> str:
"""Query 15."""
return """
with revenue (supplier_no, total_revenue) as (
select
l_suppkey,
sum(l_extendedprice * (1 - l_discount))
from
lineitem
where
l_shipdate >= date '1996-01-01'
and l_shipdate < date '1996-01-01' + interval '3' month
group by
l_suppkey
)
select
s_suppkey,
s_name,
s_address,
s_phone,
total_revenue
from
supplier,
revenue
where
s_suppkey = supplier_no
and total_revenue = (
select
max(total_revenue)
from
revenue
)
order by
s_suppkey
"""
@staticmethod
def q16(run_config: RunConfig) -> str:
"""Query 16."""
return """
select
p_brand,
p_type,
p_size,
count(distinct ps_suppkey) as supplier_cnt
from
partsupp,
part
where
p_partkey = ps_partkey
and p_brand <> 'Brand#45'
and p_type not like 'MEDIUM POLISHED%'
and p_size in (49, 14, 23, 45, 19, 3, 36, 9)
and ps_suppkey not in (
select
s_suppkey
from
supplier
where
s_comment like '%Customer%Complaints%'
)
group by
p_brand,
p_type,
p_size
order by
supplier_cnt desc,
p_brand,
p_type,
p_size
"""
@staticmethod
def q17(run_config: RunConfig) -> str:
"""Query 17."""
return """
select
round(sum(l_extendedprice) / 7.0, 2) as avg_yearly
from
lineitem,
part
where
p_partkey = l_partkey
and p_brand = 'Brand#23'
and p_container = 'MED BOX'
and l_quantity < (
select
0.2 * avg(l_quantity)
from
lineitem
where
l_partkey = p_partkey
)
"""
@staticmethod
def q18(run_config: RunConfig) -> str:
"""Query 18."""
return """
select
c_name,
c_custkey,
o_orderkey,
o_orderdate as o_orderdat,
o_totalprice,
sum(l_quantity) as col6
from
customer,
orders,
lineitem
where
o_orderkey in (
select
l_orderkey
from
lineitem
group by
l_orderkey having
sum(l_quantity) > 300
)
and c_custkey = o_custkey
and o_orderkey = l_orderkey
group by
c_name,
c_custkey,
o_orderkey,
o_orderdate,
o_totalprice
order by
o_totalprice desc,
o_orderdate
limit 100
"""
@staticmethod
def q19(run_config: RunConfig) -> str:
"""Query 19."""
return """
select
round(sum(l_extendedprice* (1 - l_discount)), 2) as revenue
from
lineitem,
part
where
(
p_partkey = l_partkey
and p_brand = 'Brand#12'
and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')
and l_quantity >= 1 and l_quantity <= 1 + 10
and p_size between 1 and 5
and l_shipmode in ('AIR', 'AIR REG')
and l_shipinstruct = 'DELIVER IN PERSON'
)
or
(
p_partkey = l_partkey
and p_brand = 'Brand#23'
and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')
and l_quantity >= 10 and l_quantity <= 20
and p_size between 1 and 10
and l_shipmode in ('AIR', 'AIR REG')
and l_shipinstruct = 'DELIVER IN PERSON'
)
or
(
p_partkey = l_partkey
and p_brand = 'Brand#34'
and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')
and l_quantity >= 20 and l_quantity <= 30
and p_size between 1 and 15
and l_shipmode in ('AIR', 'AIR REG')
and l_shipinstruct = 'DELIVER IN PERSON'
)
"""
@staticmethod
def q20(run_config: RunConfig) -> str:
"""Query 20."""
return """
select
s_name,
s_address
from
supplier,
nation
where
s_suppkey in (
select
ps_suppkey
from
partsupp
where
ps_partkey in (
select
p_partkey
from
part
where
p_name like 'forest%'
)
and ps_availqty > (
select
0.5 * sum(l_quantity)
from
lineitem
where
l_partkey = ps_partkey
and l_suppkey = ps_suppkey
and l_shipdate >= date '1994-01-01'
and l_shipdate < date '1994-01-01' + interval '1' year
)
)
and s_nationkey = n_nationkey
and n_name = 'CANADA'
order by
s_name
"""
@staticmethod
def q21(run_config: RunConfig) -> str:
"""Query 21."""
return """
select
s_name,
count(*) as numwait
from
supplier,
lineitem l1,
orders,
nation
where
s_suppkey = l1.l_suppkey
and o_orderkey = l1.l_orderkey
and o_orderstatus = 'F'
and l1.l_receiptdate > l1.l_commitdate
and exists (
select
*
from
lineitem l2
where
l2.l_orderkey = l1.l_orderkey
and l2.l_suppkey <> l1.l_suppkey
)
and not exists (
select
*
from
lineitem l3
where
l3.l_orderkey = l1.l_orderkey
and l3.l_suppkey <> l1.l_suppkey
and l3.l_receiptdate > l3.l_commitdate
)
and s_nationkey = n_nationkey
and n_name = 'SAUDI ARABIA'
group by
s_name
order by
numwait desc,
s_name
limit 100
"""
@staticmethod
def q22(run_config: RunConfig) -> str:
"""Query 22."""
return """
select
cntrycode,
count(*) as numcust,
sum(c_acctbal) as totacctbal
from (
select
substring(c_phone from 1 for 2) as cntrycode,
c_acctbal
from
customer
where
substring(c_phone from 1 for 2) in
(13, 31, 23, 29, 30, 18, 17)
and c_acctbal > (
select
avg(c_acctbal)
from
customer
where
c_acctbal > 0.00
and substring (c_phone from 1 for 2) in
(13, 31, 23, 29, 30, 18, 17)
)
and not exists (
select
*
from
orders
where
o_custkey = c_custkey
)
) as custsale
group by
cntrycode
order by
cntrycode
"""
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run PDS-H benchmarks.")
parser.add_argument(
"--engine",
choices=["polars", "duckdb", "validate"],
default="polars",
help="Which engine to use for executing the benchmarks or to validate results.",
)
args, extra_args = parser.parse_known_args()
if args.engine == "polars":
run_polars(PDSHQueries, extra_args, num_queries=22)
elif args.engine == "duckdb":
run_duckdb(PDSHDuckDBQueries, extra_args, num_queries=22)
elif args.engine == "validate":
run_validate(
PDSHQueries,
PDSHDuckDBQueries,
extra_args,
num_queries=22,
check_dtypes=True,
check_column_order=True,
)
|
PDSHDuckDBQueries
|
python
|
pytorch__pytorch
|
torch/_inductor/runtime/caching/context.py
|
{
"start": 7156,
"end": 7335
}
|
class ____(TypedDict):
torch_version_hash: bool
triton_version_hash: bool
runtime: bool
runtime_version: bool
accelerator_properties: bool
|
SelectedCompileContext
|
python
|
ipython__ipython
|
IPython/terminal/prompts.py
|
{
"start": 320,
"end": 2889
}
|
class ____:
def __init__(self, shell):
self.shell = shell
def vi_mode(self):
if (getattr(self.shell.pt_app, 'editing_mode', None) == EditingMode.VI
and self.shell.prompt_includes_vi_mode):
mode = str(self.shell.pt_app.app.vi_state.input_mode)
if mode.startswith('InputMode.'):
mode = mode[10:13].lower()
elif mode.startswith('vi-'):
mode = mode[3:6]
return '['+mode+'] '
return ''
def current_line(self) -> int:
if self.shell.pt_app is not None:
return self.shell.pt_app.default_buffer.document.cursor_position_row or 0
return 0
def in_prompt_tokens(self):
return [
(Token.Prompt.Mode, self.vi_mode()),
(
Token.Prompt.LineNumber,
self.shell.prompt_line_number_format.format(
line=1, rel_line=-self.current_line()
),
),
(Token.Prompt, "In ["),
(Token.PromptNum, str(self.shell.execution_count)),
(Token.Prompt, ']: '),
]
def _width(self):
return fragment_list_width(self.in_prompt_tokens())
def continuation_prompt_tokens(
self,
width: int | None = None,
*,
lineno: int | None = None,
wrap_count: int | None = None,
):
if width is None:
width = self._width()
line = lineno + 1 if lineno is not None else 0
if wrap_count:
return [
(
Token.Prompt.Wrap,
# (" " * (width - 2)) + "\N{HORIZONTAL ELLIPSIS} ",
(" " * (width - 2)) + "\N{VERTICAL ELLIPSIS} ",
),
]
prefix = " " * len(
self.vi_mode()
) + self.shell.prompt_line_number_format.format(
line=line, rel_line=line - self.current_line() - 1
)
return [
(
getattr(Token.Prompt.Continuation, f"L{lineno}"),
prefix + (" " * (width - len(prefix) - 5)) + "...:",
),
(Token.Prompt.Padding, " "),
]
def rewrite_prompt_tokens(self):
width = self._width()
return [
(Token.Prompt, ('-' * (width - 2)) + '> '),
]
def out_prompt_tokens(self):
return [
(Token.OutPrompt, 'Out['),
(Token.OutPromptNum, str(self.shell.execution_count)),
(Token.OutPrompt, ']: '),
]
|
Prompts
|
python
|
apache__airflow
|
providers/google/tests/unit/google/marketing_platform/hooks/test_search_ads.py
|
{
"start": 6964,
"end": 7805
}
|
class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.marketing_platform.hooks.search_ads.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = GoogleSearchAdsHook(gcp_conn_id=GCP_CONN_ID)
@mock.patch("airflow.providers.google.marketing_platform.hooks.search_ads.GoogleSearchAdsHook._authorize")
@mock.patch("airflow.providers.google.marketing_platform.hooks.search_ads.build")
def test_gen_conn(self, mock_build, mock_authorize):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
"doubleclicksearch",
"v2",
http=mock_authorize.return_value,
cache_discovery=False,
)
assert mock_build.return_value == result
|
TestSearchAdsHook
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/datasets.py
|
{
"start": 179587,
"end": 182014
}
|
class ____(Request):
"""
Get unique source ids from the given dataset version
:param version: Dataset version ID. If not provided, returns sources used by
all versions.
:type version: str
:param dataset: Dataset ID
:type dataset: str
:param max_count: Number of sources to return. default=100, Optional
:type max_count: int
"""
_service = "datasets"
_action = "get_source_ids"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"dataset": {"description": "Dataset ID", "type": "string"},
"max_count": {
"default": 100,
"description": "Number of sources to return. default=100, Optional",
"type": "integer",
},
"version": {
"description": "Dataset version ID. If not provided, returns sources used by all versions.",
"type": ["string", "null"],
},
},
"required": ["dataset"],
"type": "object",
}
def __init__(self, dataset, version=None, max_count=100, **kwargs):
super(GetSourceIdsRequest, self).__init__(**kwargs)
self.version = version
self.dataset = dataset
self.max_count = max_count
@schema_property("version")
def version(self):
return self._property_version
@version.setter
def version(self, value):
if value is None:
self._property_version = None
return
self.assert_isinstance(value, "version", six.string_types)
self._property_version = value
@schema_property("dataset")
def dataset(self):
return self._property_dataset
@dataset.setter
def dataset(self, value):
if value is None:
self._property_dataset = None
return
self.assert_isinstance(value, "dataset", six.string_types)
self._property_dataset = value
@schema_property("max_count")
def max_count(self):
return self._property_max_count
@max_count.setter
def max_count(self, value):
if value is None:
self._property_max_count = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "max_count", six.integer_types)
self._property_max_count = value
|
GetSourceIdsRequest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/kernel_tests/shard_test.py
|
{
"start": 1333,
"end": 4534
}
|
class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testSimpleCase(self):
dataset = dataset_ops.Dataset.range(10).shard(5, 2)
self.assertDatasetProduces(dataset, expected_output=[2, 7])
@combinations.generate(test_base.default_test_combinations())
def testNestedData(self):
dataset_a = dataset_ops.Dataset.range(10)
dataset_b = dataset_ops.Dataset.range(10, 0, -1)
dataset = dataset_ops.Dataset.zip((dataset_a, dataset_b)).shard(5, 2)
self.assertDatasetProduces(dataset, expected_output=[(2, 8), (7, 3)])
@combinations.generate(test_base.default_test_combinations())
def testOffsetZero(self):
dataset = dataset_ops.Dataset.range(10).shard(5, 0)
self.assertDatasetProduces(dataset, expected_output=[0, 5])
@combinations.generate(test_base.default_test_combinations())
def testOffsetGreaterNumShards(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).shard(5, 7)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testNegativeOffset(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).shard(5, -3)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testNegativeNumShards(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).shard(-3, 1)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testZeroNumShards(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).shard(0, 1)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testIteratorEndsBeforeFirstElem(self):
dataset = dataset_ops.Dataset.range(1).shard(5, 2)
self.assertDatasetProduces(dataset, expected_output=[])
@combinations.generate(test_base.default_test_combinations())
def testLargerWorkerPool(self):
dataset = dataset_ops.Dataset.range(10).shard(7, 5)
self.assertDatasetProduces(dataset, expected_output=[5])
@combinations.generate(test_base.default_test_combinations())
def testIndexEqualsNumShards(self):
dataset = dataset_ops.Dataset.range(10).shard(5, 4)
self.assertDatasetProduces(dataset, expected_output=[4, 9])
@combinations.generate(test_base.default_test_combinations())
def testIndexEqualsNumShards2(self):
dataset = dataset_ops.Dataset.range(10).shard(4, 3)
self.assertDatasetProduces(dataset, expected_output=[3, 7])
@combinations.generate(test_base.default_test_combinations())
def testNumShardsLargerThanDataset(self):
dataset = dataset_ops.Dataset.range(10).shard(20, 5)
self.assertDatasetProduces(dataset, expected_output=[5])
@combinations.generate(test_base.default_test_combinations())
def testName(self):
dataset = dataset_ops.Dataset.from_tensors(42).shard(1, 0, name="shard")
self.assertDatasetProduces(dataset, [42])
|
ShardTest
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/test_settings.py
|
{
"start": 11350,
"end": 14167
}
|
class ____:
def test_allowed_ws_origin(self):
assert bs.settings.allowed_ws_origin.default == []
def test_auth_module(self):
assert bs.settings.auth_module.default is None
def test_browser(self):
assert bs.settings.browser.default is None
def test_cdn_version(self):
assert bs.settings.cdn_version.default is None
def test_chromedriver_path(self):
assert bs.settings.chromedriver_path.default is None
def test_cookie_secret(self):
assert bs.settings.cookie_secret.default is None
def test_docs_cdn(self):
assert bs.settings.docs_cdn.default is None
def test_docs_version(self):
assert bs.settings.docs_version.default is None
def test_ico_path(self):
assert bs.settings.ico_path.default == "default"
def test_ignore_filename(self):
assert bs.settings.ignore_filename.default is False
def test_log_level(self):
assert bs.settings.log_level.default == "info"
def test_minified(self):
assert bs.settings.minified.default is True
def test_nodejs_path(self):
assert bs.settings.nodejs_path.default is None
def test_perform_document_validation(self):
assert bs.settings.perform_document_validation.default is True
def test_pretty(self):
assert bs.settings.pretty.default is False
def test_py_log_level(self):
assert bs.settings.py_log_level.default == "none"
def test_resources(self):
assert bs.settings.resources.default == "cdn"
def test_rootdir(self):
assert bs.settings.rootdir.default is None
def test_secret_key(self):
assert bs.settings.secret_key.default is None
def test_serialize_include_defaults(self):
assert bs.settings.serialize_include_defaults.default is False
def test_sign_sessions(self):
assert bs.settings.sign_sessions.default is False
def test_simple_ids(self):
assert bs.settings.simple_ids.default is True
def test_ssl_certfile(self):
assert bs.settings.ssl_certfile.default is None
def test_ssl_keyfile(self):
assert bs.settings.ssl_keyfile.default is None
def test_ssl_password(self):
assert bs.settings.ssl_password.default is None
def test_validation_level(self):
assert bs.settings.validation_level.default == "none"
def test_xsrf_cookies(self):
assert bs.settings.xsrf_cookies.default is False
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
TestDefaults
|
python
|
kamyu104__LeetCode-Solutions
|
Python/3sum-closest.py
|
{
"start": 31,
"end": 859
}
|
class ____(object):
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
result, min_diff = 0, float("inf")
nums.sort()
for i in reversed(xrange(2, len(nums))):
if i+1 < len(nums) and nums[i] == nums[i+1]:
continue
left, right = 0, i-1
while left < right:
total = nums[left]+nums[right]+nums[i]
if total < target:
left += 1
elif total > target:
right -= 1
else:
return target
if abs(total-target) < min_diff:
min_diff = abs(total-target)
result = total
return result
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/packed_distributed_variable.py
|
{
"start": 9542,
"end": 14024
}
|
class ____(object):
"""Holds a packed distributed variable and a device."""
def __init__(self, var, device):
self._var = var
self._device = device
def __getattr__(self, name):
# Exceptions raised inside the contextmanager can cause a reference
# cycle.[1] The cycle involves the current frame, which holds the reference
# to the outer frame. Tensorflow, e.g. iterators, relies on object
# finalizers to clean up resources. Such references prevents the resource
# from being deleted and can cause leaks and errors. One corner the case is
# that iterators are kept alive and the garbage collector happens to run
# after auto control dependencies; this causes the deletion to lose the
# control dependencies to operations that uses such resources.
#
# Catch and re-raise the exception seems to workaround the issue.
#
# [1] https://bugs.python.org/issue43533
try:
with ops.device(self._device):
return getattr(self._var, name)
except: # pylint: disable=try-except-raise
raise
def var(self):
return self._var
def value(self):
with ops.device(self._device):
return self._var.value()
def read_value(self):
with ops.device(self._device):
return self._var.read_value()
@property
def initial_value(self):
return self._var.initial_value(self._device)
def initialized_value(self):
with ops.device(self._device):
return self._var.initialized_value()
@property
def device(self):
return self._device
@property
def handle(self):
with ops.device(self._device):
return self._var.handle
def on_device_handle(self):
with ops.device(self._device):
return self._var.get_var_on_current_device().handle
@property
def op(self) -> ops.Operation:
with ops.device(self._device):
return self._var.op
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
with ops.device(self._device):
return self._var.assign_sub(delta, use_locking, name, read_value)
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
with ops.device(self._device):
return self._var.assign_add(delta, use_locking, name, read_value)
def assign(self, value, use_locking=None, name=None, read_value=True):
with ops.device(self._device):
return self._var.assign(value, use_locking, name, read_value)
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
with ops.device(self._device):
return self._var.scatter_sub(sparse_delta, use_locking, name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
with ops.device(self._device):
return self._var.scatter_add(sparse_delta, use_locking, name)
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
with ops.device(self._device):
return self._var.scatter_mul(sparse_delta, use_locking, name)
def scatter_div(self, sparse_delta, use_locking=False, name=None):
with ops.device(self._device):
return self._var.scatter_div(sparse_delta, use_locking, name)
def scatter_min(self, sparse_delta, use_locking=False, name=None):
with ops.device(self._device):
return self._var.scatter_min(sparse_delta, use_locking, name)
def scatter_max(self, sparse_delta, use_locking=False, name=None):
with ops.device(self._device):
return self._var.scatter_max(sparse_delta, use_locking, name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
with ops.device(self._device):
return self._var.scatter_update(sparse_delta, use_locking, name)
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
with ops.device(self._device):
return self._var._dense_var_to_tensor( # pylint: disable=protected-access
dtype=dtype,
name=name,
as_ref=as_ref)
def _as_graph_element(self):
return self._var._as_graph_element() # pylint: disable=protected-access
def _tensor_conversion_packed_var_and_device(var,
dtype=None,
name=None,
as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
tensor_conversion_registry.register_tensor_conversion_function(
PackedVarAndDevice, _tensor_conversion_packed_var_and_device)
|
PackedVarAndDevice
|
python
|
fluentpython__example-code
|
14-it-generator/sentence_genexp.py
|
{
"start": 148,
"end": 989
}
|
class ____:
def __init__(self, text):
self.text = text
def __repr__(self):
return 'Sentence(%s)' % reprlib.repr(self.text)
def __iter__(self):
return (match.group() for match in RE_WORD.finditer(self.text))
# END SENTENCE_GENEXP
def main():
import sys
import warnings
try:
filename = sys.argv[1]
word_number = int(sys.argv[2])
except (IndexError, ValueError):
print('Usage: %s <file-name> <word-number>' % sys.argv[0])
sys.exit(1)
with open(filename, 'rt', encoding='utf-8') as text_file:
s = Sentence(text_file.read())
for n, word in enumerate(s, 1):
if n == word_number:
print(word)
break
else:
warnings.warn('last word is #%d, "%s"' % (n, word))
if __name__ == '__main__':
main()
|
Sentence
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/ragged/ragged_bincount_ops_test.py
|
{
"start": 1725,
"end": 11831
}
|
class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_count(self, dtype):
x = ragged_factory_ops.constant([[], [], [3, 0, 1], [], [5, 0, 4, 4]],
dtype)
# pyformat: disable
expected_output = [
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 2, 1]]
# pyformat: enable
self.assertAllEqual(expected_output,
self.evaluate(bincount_ops.bincount(arr=x, axis=-1)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_binary(self, dtype):
x = ragged_factory_ops.constant([[], [], [3, 0, 1], [], [5, 0, 4, 4]])
# pyformat: disable
expected_output = [
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 1]]
# pyformat: enable
self.assertAllEqual(
expected_output,
self.evaluate(
bincount_ops.bincount(arr=x, axis=-1, binary_output=True)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_count_with_weights(self, dtype):
x = ragged_factory_ops.constant([[], [], [3, 0, 1], [], [5, 0, 4, 4]])
weights = ragged_factory_ops.constant([[], [], [.1, .2, .3], [],
[.2, .5, .6, .3]])
# pyformat: disable
expected_output = [
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[.2, .3, 0, .1, 0, 0],
[0, 0, 0, 0, 0, 0],
[.5, 0, 0, 0, .9, .2]]
# pyformat: enable
self.assertAllClose(
expected_output,
self.evaluate(bincount_ops.bincount(arr=x, weights=weights, axis=-1)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_count_np(self, dtype):
np.random.seed(42)
num_rows = 128
num_cols = 27
size = 1000
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_out = np.reshape(
np.concatenate(
[np.bincount(inp[j, :], minlength=size) for j in range(num_rows)],
axis=0), (num_rows, size))
x = ragged_tensor.RaggedTensor.from_tensor(inp)
self.assertAllEqual(
np_out,
self.evaluate(bincount_ops.bincount(arr=x, minlength=size, axis=-1)))
@parameterized.parameters([{
"dtype": np.int32,
}, {
"dtype": np.int64,
}])
def test_ragged_input_count_np_with_weights(self, dtype):
np.random.seed(42)
num_rows = 128
num_cols = 27
size = 1000
inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
np_weight = np.random.random((num_rows, num_cols))
np_out = np.reshape(
np.concatenate([
np.bincount(inp[j, :], weights=np_weight[j, :], minlength=size)
for j in range(num_rows)
],
axis=0), (num_rows, size))
x = ragged_tensor.RaggedTensor.from_tensor(inp)
weights = ragged_tensor.RaggedTensor.from_tensor(np_weight)
self.assertAllEqual(
np_out,
self.evaluate(
bincount_ops.bincount(
arr=x, weights=weights, minlength=size, axis=-1)))
@parameterized.product(
(
dict(
tid="_r2",
x_factory=_ragged_factory([[], [1], [2, 2], [3, 3, 3]]),
expected=[0, 1, 2, 3], # no implied zeros
),
dict(
tid="_r3",
x_factory=_ragged_factory([[[], [1]], [[2, 2], [3, 3, 3]]]),
expected=[0, 1, 2, 3], # no implied zeros
),
),
(
dict(minlength=None, maxlength=None),
dict(minlength=3, maxlength=None),
dict(minlength=5, maxlength=None),
dict(minlength=None, maxlength=3),
dict(minlength=None, maxlength=5),
dict(minlength=2, maxlength=3),
dict(minlength=3, maxlength=5),
dict(minlength=5, maxlength=10),
dict(minlength=None, maxlength=0),
),
)
def test_default(self, x_factory, minlength, maxlength, expected, tid=None):
x = x_factory()
expected = _adjust_expected_rank1(expected, minlength, maxlength)
self.assertAllEqual(
expected,
self.evaluate(
bincount_ops.bincount(x, minlength=minlength, maxlength=maxlength)
),
)
self.assertAllEqual(
expected,
self.evaluate(
bincount_ops.bincount(
x, minlength=minlength, maxlength=maxlength, axis=0
)
),
)
@parameterized.product(
(
dict(
tid="_r2",
x_factory=_ragged_factory([[], [1], [2, 2], [3, 3, 3]]),
# no implied zeros
expected=[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 2, 0], [0, 0, 0, 3]],
),
),
(
dict(minlength=None, maxlength=None),
dict(minlength=3, maxlength=None),
dict(minlength=5, maxlength=None),
dict(minlength=None, maxlength=3),
dict(minlength=None, maxlength=5),
dict(minlength=2, maxlength=3),
dict(minlength=3, maxlength=5),
dict(minlength=5, maxlength=10),
dict(minlength=None, maxlength=0),
),
)
def test_axis_neg_1(self, tid, x_factory, minlength, maxlength, expected):
x = x_factory()
expected = _adjust_expected_rank2(expected, minlength, maxlength)
self.assertAllEqual(
expected,
self.evaluate(
bincount_ops.bincount(
x, minlength=minlength, maxlength=maxlength, axis=-1
)
),
)
@parameterized.product(
(
dict(
tid="_r2",
x_factory=_ragged_factory([[], [1], [2, 2], [3, 3, 3]]),
weights_factory=_ragged_factory([[], [1], [2, 3], [4, 5, 6]]),
axis=None,
expected=[0, 1, 5, 15], # no implied zeros
),
dict(
tid="_r3",
x_factory=_ragged_factory([[[], [1]], [[2, 2], [3, 3, 3]]]),
weights_factory=_ragged_factory([[[], [1]], [[2, 3], [4, 5, 6]]]),
expected=[0, 1, 5, 15], # no implied zeros
axis=None,
),
dict(
tid="_r2_axis_neg_1",
x_factory=_ragged_factory([[], [1], [2, 2], [3, 3, 3]]),
weights_factory=_ragged_factory([[], [1], [2, 3], [4, 5, 6]]),
# no implied zeros
expected=[
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 5, 0],
[0, 0, 0, 15],
],
axis=-1,
),
),
(
dict(minlength=None, maxlength=None),
dict(minlength=3, maxlength=None),
dict(minlength=5, maxlength=None),
dict(minlength=None, maxlength=3),
dict(minlength=None, maxlength=5),
dict(minlength=2, maxlength=3),
dict(minlength=3, maxlength=5),
dict(minlength=5, maxlength=10),
dict(minlength=None, maxlength=0),
),
)
def test_weights(
self,
tid,
x_factory,
weights_factory,
minlength,
maxlength,
expected,
axis,
):
device_set = set([d.device_type for d in tf_config.list_physical_devices()])
if "GPU" in device_set and not test_util.is_xla_enabled():
self.skipTest(
"b/263004039 The DenseBincount GPU kernel does not support weights."
" unsorted_segment_sum should be used instead on GPU."
)
x = x_factory()
weights = weights_factory()
if axis == -1:
expected = _adjust_expected_rank2(expected, minlength, maxlength)
else:
expected = _adjust_expected_rank1(expected, minlength, maxlength)
self.assertAllEqual(
expected,
self.evaluate(
bincount_ops.bincount(
x,
weights=weights,
minlength=minlength,
maxlength=maxlength,
axis=axis,
)
),
)
@parameterized.product(
(
dict(
tid="_r2",
x_factory=_ragged_factory([[], [1], [2, 2], [3, 3, 3]]),
expected=[0, 1, 1, 1], # no implied zeros
axis=None,
),
dict(
tid="_r3",
x_factory=_ragged_factory([[[], [1]], [[2, 2], [3, 3, 3]]]),
expected=[0, 1, 1, 1], # no implied zeros
axis=None,
),
dict(
tid="_r2_axis_neg_1",
x_factory=_ragged_factory([[], [1], [2, 2], [3, 3, 3]]),
# no implied zeros
expected=[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
axis=-1,
),
),
(
dict(minlength=None, maxlength=None),
dict(minlength=3, maxlength=None),
dict(minlength=5, maxlength=None),
dict(minlength=None, maxlength=3),
dict(minlength=None, maxlength=5),
dict(minlength=2, maxlength=3),
dict(minlength=3, maxlength=5),
dict(minlength=5, maxlength=10),
dict(minlength=None, maxlength=0),
),
)
def test_binary_output(
self,
tid,
x_factory,
minlength,
maxlength,
expected,
axis=None,
):
x = x_factory()
if axis == -1:
expected = _adjust_expected_rank2(expected, minlength, maxlength)
else:
expected = _adjust_expected_rank1(expected, minlength, maxlength)
self.assertAllEqual(
expected,
self.evaluate(
bincount_ops.bincount(
x,
minlength=minlength,
maxlength=maxlength,
binary_output=True,
axis=axis,
)
),
)
|
TestDenseBincount
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/immutable_dict_test.py
|
{
"start": 932,
"end": 3483
}
|
class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
def testGetItem(self):
d = immutable_dict.ImmutableDict({'x': 1, 'y': 2})
self.assertEqual(d['x'], 1)
self.assertEqual(d['y'], 2)
with self.assertRaises(KeyError):
d['z'] # pylint: disable=pointless-statement
def testIter(self):
d = immutable_dict.ImmutableDict({'x': 1, 'y': 2})
self.assertEqual(set(iter(d)), set(['x', 'y']))
def testContains(self):
d = immutable_dict.ImmutableDict({'x': 1, 'y': 2})
self.assertIn('x', d)
self.assertIn('y', d)
self.assertNotIn('z', d)
def testLen(self):
d1 = immutable_dict.ImmutableDict({})
self.assertLen(d1, 0) # pylint: disable=g-generic-assert
d2 = immutable_dict.ImmutableDict({'x': 1, 'y': 2})
self.assertLen(d2, 2)
def testRepr(self):
d = immutable_dict.ImmutableDict({'x': 1, 'y': 2})
s = repr(d)
self.assertTrue(s == "ImmutableDict({'x': 1, 'y': 2})" or
s == "ImmutableDict({'y': 1, 'x': 2})")
def testGet(self):
d = immutable_dict.ImmutableDict({'x': 1, 'y': 2})
self.assertEqual(d.get('x'), 1)
self.assertEqual(d.get('y'), 2)
self.assertIsNone(d.get('z'))
self.assertEqual(d.get('z', 'Foo'), 'Foo')
def testKeys(self):
d = immutable_dict.ImmutableDict({'x': 1, 'y': 2})
self.assertEqual(set(d.keys()), set(['x', 'y']))
def testValues(self):
d = immutable_dict.ImmutableDict({'x': 1, 'y': 2})
self.assertEqual(set(d.values()), set([1, 2]))
def testItems(self):
d = immutable_dict.ImmutableDict({'x': 1, 'y': 2})
self.assertEqual(set(d.items()), set([('x', 1), ('y', 2)]))
def testEqual(self):
d = immutable_dict.ImmutableDict({'x': 1, 'y': 2})
self.assertEqual(d, {'x': 1, 'y': 2})
def testNotEqual(self):
d = immutable_dict.ImmutableDict({'x': 1, 'y': 2})
self.assertNotEqual(d, {'x': 1})
def testSetItemFails(self):
d = immutable_dict.ImmutableDict({'x': 1, 'y': 2})
with self.assertRaises(TypeError):
d['x'] = 5 # pylint: disable=unsupported-assignment-operation
with self.assertRaises(TypeError):
d['z'] = 5 # pylint: disable=unsupported-assignment-operation
def testDelItemFails(self):
d = immutable_dict.ImmutableDict({'x': 1, 'y': 2})
with self.assertRaises(TypeError):
del d['x'] # pylint: disable=unsupported-delete-operation
with self.assertRaises(TypeError):
del d['z'] # pylint: disable=unsupported-delete-operation
if __name__ == '__main__':
googletest.main()
|
ImmutableDictTest
|
python
|
psf__black
|
tests/data/cases/preview_long_strings__regression.py
|
{
"start": 12787,
"end": 14207
}
|
class ____(StepBase):
def who(self):
self.cmd = 'SR AAAA-CORRECT NAME IS {last_name} {first_name}{middle_name} {title}/P{passenger_association}'.format(
last_name=last_name,
first_name=first_name,
middle_name=middle_name,
title=title,
passenger_association=passenger_association,
)
xxxxxxx_xxxxxx_xxxxxxx = xxx(
[
xxxxxxxxxxxx(
xxxxxx_xxxxxxx=(
'((x.aaaaaaaaa = "xxxxxx.xxxxxxxxxxxxxxxxxxxxx") || (x.xxxxxxxxx = "xxxxxxxxxxxx")) && '
# xxxxx xxxxxxxxxxxx xxxx xxx (xxxxxxxxxxxxxxxx) xx x xxxxxxxxx xx xxxxxx.
"(x.bbbbbbbbbbbb.xxx != "
'"xxx:xxx:xxx::cccccccccccc:xxxxxxx-xxxx/xxxxxxxxxxx/xxxxxxxxxxxxxxxxx") && '
)
)
]
)
if __name__ == "__main__":
for i in range(4, 8):
cmd = (
r"for pid in $(ps aux | grep paster | grep -v grep | grep '\-%d' | awk '{print $2}'); do kill $pid; done"
% (i)
)
def A():
def B():
def C():
def D():
def E():
def F():
def G():
assert (
c_float(val[0][0] / val[0][1]).value
== c_float(value[0][0] / value[0][1]).value
), "%s didn't roundtrip" % tag
|
Step
|
python
|
ijl__orjson
|
test/test_dataclass.py
|
{
"start": 1389,
"end": 1521
}
|
class ____(abc.ABC):
@abc.abstractmethod
def key(self):
raise NotImplementedError
@dataclass(frozen=True)
|
AbstractBase
|
python
|
astropy__astropy
|
astropy/units/tests/test_quantity_non_ufuncs.py
|
{
"start": 64471,
"end": 66274
}
|
class ____:
# For these, making behaviour work means deviating only slightly from
# the docstring, and by default they fail miserably. So, might as well.
def setup_method(self):
self.q = np.arange(3.0) * u.Jy
def test_array2string(self):
# The default formatters cannot handle units, so if we do not pass
# a relevant formatter, we are better off just treating it as an
# array (which happens for all subtypes).
out0 = np.array2string(self.q)
expected0 = str(self.q.value)
assert out0 == expected0
# Arguments are interpreted as usual.
out1 = np.array2string(self.q, separator=", ")
expected1 = "[0., 1., 2.]"
assert out1 == expected1
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.q, separator=", ", formatter={"all": str})
expected2 = "[0.0 Jy, 1.0 Jy, 2.0 Jy]"
assert out2 == expected2
# Also as positional argument (no, nobody will do this!)
if NUMPY_LT_2_4:
args = (self.q, None, None, None, ", ", "", np._NoValue, {"float": str})
out3 = np.array2string(*args)
assert out3 == expected2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.q, separator=", ", formatter={"int": str})
assert out4 == expected1
def test_array_repr(self):
out = np.array_repr(self.q)
assert out == "Quantity([0., 1., 2.], unit='Jy')"
q2 = self.q.astype("f4")
out2 = np.array_repr(q2)
assert out2 == "Quantity([0., 1., 2.], unit='Jy', dtype=float32)"
def test_array_str(self):
out = np.array_str(self.q)
expected = str(self.q)
assert out == expected
|
TestStringFunctions
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/tasks.py
|
{
"start": 312147,
"end": 314794
}
|
class ____(Response):
"""
Response of tasks.get_configurations endpoint.
:param configurations: Configurations (keyed by task ID)
:type configurations: Sequence[dict]
"""
_service = "tasks"
_action = "get_configurations"
_version = "2.13"
_schema = {
"definitions": {
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"configurations": {
"description": "Configurations (keyed by task ID)",
"items": {
"properties": {
"configuration": {
"description": "Configuration list",
"items": {"$ref": "#/definitions/configuration_item"},
"type": "array",
},
"task": {"description": "Task ID", "type": "string"},
},
"type": "object",
},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, configurations: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(GetConfigurationsResponse, self).__init__(**kwargs)
self.configurations = configurations
@schema_property("configurations")
def configurations(self) -> Optional[List[dict]]:
return self._property_configurations
@configurations.setter
def configurations(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_configurations = None
return
self.assert_isinstance(value, "configurations", (list, tuple))
self.assert_isinstance(value, "configurations", (dict,), is_array=True)
self._property_configurations = value
|
GetConfigurationsResponse
|
python
|
run-llama__llama_index
|
llama-index-packs/llama-index-packs-snowflake-query-engine/llama_index/packs/snowflake_query_engine/base.py
|
{
"start": 303,
"end": 2634
}
|
class ____(BaseLlamaPack):
"""
Snowflake query engine pack.
It uses snowflake-sqlalchemy to connect to Snowflake, then calls
NLSQLTableQueryEngine to query data.
"""
def __init__(
self,
user: str,
password: str,
account: str,
database: str,
schema: str,
warehouse: str,
role: str,
tables: List[str],
**kwargs: Any,
) -> None:
"""Init params."""
# workaround for https://github.com/snowflakedb/snowflake-sqlalchemy/issues/380.
try:
snowflake_sqlalchemy_20_monkey_patches()
except Exception:
raise ImportError("Please run `pip install snowflake-sqlalchemy`")
if not os.environ.get("OPENAI_API_KEY", None):
raise ValueError("OpenAI API Token is missing or blank.")
snowflake_uri = f"snowflake://{user}:{password}@{account}/{database}/{schema}?warehouse={warehouse}&role={role}"
engine = create_engine(snowflake_uri)
self._sql_database = SQLDatabase(engine)
self.tables = tables
self.query_engine = NLSQLTableQueryEngine(
sql_database=self._sql_database, tables=self.tables
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"sql_database": self._sql_database,
"query_engine": self.query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
def snowflake_sqlalchemy_20_monkey_patches():
import sqlalchemy.util.compat
# make strings always return unicode strings
sqlalchemy.util.compat.string_types = (str,)
sqlalchemy.types.String.RETURNS_UNICODE = True
import snowflake.sqlalchemy.snowdialect
snowflake.sqlalchemy.snowdialect.SnowflakeDialect.returns_unicode_strings = True
# make has_table() support the `info_cache` kwarg
import snowflake.sqlalchemy.snowdialect
def has_table(self, connection, table_name, schema=None, info_cache=None):
"""
Checks if the table exists.
"""
return self._has_object(connection, "TABLE", table_name, schema)
snowflake.sqlalchemy.snowdialect.SnowflakeDialect.has_table = has_table
|
SnowflakeQueryEnginePack
|
python
|
kamyu104__LeetCode-Solutions
|
Python/construct-binary-tree-from-preorder-and-inorder-traversal.py
|
{
"start": 982,
"end": 1666
}
|
class ____(object):
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
preorder_iterator = iter(preorder)
inorder_lookup = {n: i for i, n in enumerate(inorder)}
def helper(start, end):
if start > end:
return None
root_val = next(preorder_iterator)
root = TreeNode(root_val)
idx = inorder_lookup[root_val]
root.left = helper(start, idx-1)
root.right = helper(idx+1, end)
return root
return helper(0, len(inorder)-1)
|
Solution2
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol48.py
|
{
"start": 281,
"end": 548
}
|
class ____:
def method1(self) -> tuple[Self, Self]: ...
def method2(self):
x = apply_method1(self)
reveal_type(x, expected_text="tuple[Self@A, Self@A]")
def func1(a: A):
x = apply_method1(a)
reveal_type(x, expected_text="tuple[A, A]")
|
A
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/statuses.py
|
{
"start": 677,
"end": 935
}
|
class ____(AutoEnum):
"""Enumeration of work queue statuses."""
READY = AutoEnum.auto()
NOT_READY = AutoEnum.auto()
PAUSED = AutoEnum.auto()
def in_kebab_case(self) -> str:
return self.value.lower().replace("_", "-")
|
WorkQueueStatus
|
python
|
gevent__gevent
|
src/greentest/3.9/test_socket.py
|
{
"start": 112695,
"end": 118639
}
|
class ____(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
|
RecvmsgGenericTests
|
python
|
skorch-dev__skorch
|
skorch/exceptions.py
|
{
"start": 639,
"end": 741
}
|
class ____(SkorchException):
"""The net cannot be used for training"""
|
SkorchTrainingImpossibleError
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1124004,
"end": 1124855
}
|
class ____(sgqlc.types.Type, Node):
"""Represents a 'demilestoned' event on a given issue or pull
request.
"""
__schema__ = github_schema
__field_names__ = ("actor", "created_at", "milestone_title", "subject")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
milestone_title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="milestoneTitle")
"""Identifies the milestone title associated with the 'demilestoned'
event.
"""
subject = sgqlc.types.Field(sgqlc.types.non_null("MilestoneItem"), graphql_name="subject")
"""Object referenced by event."""
|
DemilestonedEvent
|
python
|
huggingface__transformers
|
src/transformers/models/sam_hq/modeling_sam_hq.py
|
{
"start": 46276,
"end": 46919
}
|
class ____(SamHQPreTrainedModel):
config: SamHQVisionConfig
main_input_name = "pixel_values"
def __init__(self, config: SamHQVisionConfig):
super().__init__(config)
self.vision_encoder = SamHQVisionEncoder(config)
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_encoder.patch_embed
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, SamHQVisionEncoderOutput]:
return self.vision_encoder(pixel_values, **kwargs)
|
SamHQVisionModel
|
python
|
Textualize__textual
|
src/textual/drivers/win32.py
|
{
"start": 1679,
"end": 2015
}
|
class ____(Structure):
"""https://docs.microsoft.com/en-us/windows/console/key-event-record-str"""
_fields_ = [
("bKeyDown", BOOL),
("wRepeatCount", WORD),
("wVirtualKeyCode", WORD),
("wVirtualScanCode", WORD),
("uChar", uChar),
("dwControlKeyState", DWORD),
]
|
KEY_EVENT_RECORD
|
python
|
django__django
|
tests/from_db_value/models.py
|
{
"start": 47,
"end": 99
}
|
class ____(decimal.Decimal):
currency = "USD"
|
Cash
|
python
|
bokeh__bokeh
|
src/bokeh/plotting/contour.py
|
{
"start": 2168,
"end": 2378
}
|
class ____:
''' Coordinates for all filled polygons over a whole sequence of contour levels.
'''
xs: list[list[list[np.ndarray]]]
ys: list[list[list[np.ndarray]]]
@dataclass(frozen=True)
|
FillCoords
|
python
|
pytransitions__transitions
|
transitions/extensions/asyncio.py
|
{
"start": 12606,
"end": 25847
}
|
class ____(Machine):
"""Machine manages states, transitions and models. In case it is initialized without a specific model
(or specifically no model), it will also act as a model itself. Machine takes also care of decorating
models with conveniences functions related to added transitions and states during runtime.
Attributes:
states (OrderedDict): Collection of all registered states.
events (dict): Collection of transitions ordered by trigger/event.
models (list): List of models attached to the machine.
initial (str): Name of the initial state for new models.
prepare_event (list): Callbacks executed when an event is triggered.
before_state_change (list): Callbacks executed after condition checks but before transition is conducted.
Callbacks will be executed BEFORE the custom callbacks assigned to the transition.
after_state_change (list): Callbacks executed after the transition has been conducted.
Callbacks will be executed AFTER the custom callbacks assigned to the transition.
finalize_event (list): Callbacks will be executed after all transitions callbacks have been executed.
Callbacks mentioned here will also be called if a transition or condition check raised an error.
on_exception: A callable called when an event raises an exception. If not set,
the Exception will be raised instead.
queued (bool or str): Whether transitions in callbacks should be executed immediately (False) or sequentially.
send_event (bool): When True, any arguments passed to trigger methods will be wrapped in an EventData
object, allowing indirect and encapsulated access to data. When False, all positional and keyword
arguments will be passed directly to all callback methods.
auto_transitions (bool): When True (default), every state will automatically have an associated
to_{state}() convenience trigger in the base model.
ignore_invalid_triggers (bool): When True, any calls to trigger methods that are not valid for the
present state (e.g., calling an a_to_b() trigger when the current state is c) will be silently
ignored rather than raising an invalid transition exception.
name (str): Name of the ``Machine`` instance mainly used for easier log message distinction.
"""
state_cls = AsyncState
transition_cls = AsyncTransition
event_cls = AsyncEvent
async_tasks = {}
protected_tasks = []
current_context = contextvars.ContextVar('current_context', default=None)
def __init__(self, model=Machine.self_literal, states=None, initial='initial', transitions=None,
send_event=False, auto_transitions=True,
ordered_transitions=False, ignore_invalid_triggers=None,
before_state_change=None, after_state_change=None, name=None,
queued=False, prepare_event=None, finalize_event=None, model_attribute='state',
model_override=False, on_exception=None, on_final=None, **kwargs):
super().__init__(model=None, states=states, initial=initial, transitions=transitions,
send_event=send_event, auto_transitions=auto_transitions,
ordered_transitions=ordered_transitions, ignore_invalid_triggers=ignore_invalid_triggers,
before_state_change=before_state_change, after_state_change=after_state_change, name=name,
queued=bool(queued), prepare_event=prepare_event, finalize_event=finalize_event,
model_attribute=model_attribute, model_override=model_override,
on_exception=on_exception, on_final=on_final, **kwargs)
self._transition_queue_dict = _DictionaryMock(self._transition_queue) if queued is True else {}
self._queued = queued
for model in listify(model):
self.add_model(model)
def add_model(self, model, initial=None):
super().add_model(model, initial)
if self.has_queue == 'model':
for mod in listify(model):
self._transition_queue_dict[id(self) if mod is self.self_literal else id(mod)] = deque()
async def dispatch(self, trigger, *args, **kwargs):
"""Trigger an event on all models assigned to the machine.
Args:
trigger (str): Event name
*args (list): List of arguments passed to the event trigger
**kwargs (dict): Dictionary of keyword arguments passed to the event trigger
Returns:
bool The truth value of all triggers combined with AND
"""
results = await self.await_all([partial(getattr(model, trigger), *args, **kwargs) for model in self.models])
return all(results)
async def callbacks(self, funcs, event_data):
"""Triggers a list of callbacks"""
await self.await_all([partial(event_data.machine.callback, func, event_data) for func in funcs])
async def callback(self, func, event_data):
"""Trigger a callback function with passed event_data parameters. In case func is a string,
the callable will be resolved from the passed model in event_data. This function is not intended to
be called directly but through state and transition callback definitions.
Args:
func (string, callable): The callback function.
1. First, if the func is callable, just call it
2. Second, we try to import string assuming it is a path to a func
3. Fallback to a model attribute
event_data (EventData): An EventData instance to pass to the
callback (if event sending is enabled) or to extract arguments
from (if event sending is disabled).
"""
func = self.resolve_callable(func, event_data)
res = func(event_data) if self.send_event else func(*event_data.args, **event_data.kwargs)
if inspect.isawaitable(res):
await res
@staticmethod
async def await_all(callables):
"""
Executes callables without parameters in parallel and collects their results.
Args:
callables (list): A list of callable functions
Returns:
list: A list of results. Using asyncio the list will be in the same order as the passed callables.
"""
return await asyncio.gather(*[func() for func in callables])
async def switch_model_context(self, model):
warnings.warn("Please replace 'AsyncMachine.switch_model_context' with "
"'AsyncMachine.cancel_running_transitions'.", category=DeprecationWarning)
await self.cancel_running_transitions(model)
async def cancel_running_transitions(self, model, msg=None):
"""
This method is called by an `AsyncTransition` when all conditional tests have passed
and the transition will happen. This requires already running tasks to be cancelled.
Args:
model (object): The currently processed model
msg (str): Optional message to pass to a running task's cancel request (deprecated).
"""
if msg is not None:
warnings.warn(
"When you call cancel_running_transitions with a custom message "
"transitions will re-raise all raised CancelledError. "
"Make sure to catch them in your code. "
"The parameter 'msg' will likely be removed in a future release.", category=DeprecationWarning)
for running_task in self.async_tasks.get(id(model), []):
if self.current_context.get() == running_task or running_task in self.protected_tasks:
continue
if running_task.done() is False:
_LOGGER.debug("Cancel running tasks...")
running_task.cancel(msg or CANCELLED_MSG)
async def process_context(self, func, model):
"""
This function is called by an `AsyncEvent` to make callbacks processed in Event._trigger cancellable.
Using asyncio this will result in a try-catch block catching CancelledEvents.
Args:
func (partial): The partial of Event._trigger with all parameters already assigned
model (object): The currently processed model
Returns:
bool: returns the success state of the triggered event
"""
if self.current_context.get() is None:
token = self.current_context.set(asyncio.current_task())
if id(model) in self.async_tasks:
self.async_tasks[id(model)].append(asyncio.current_task())
else:
self.async_tasks[id(model)] = [asyncio.current_task()]
try:
res = await self._process_async(func, model)
except asyncio.CancelledError as err:
# raise CancelledError only if the task was not cancelled by internal processes
# we indicate internal cancellation by passing CANCELLED_MSG to cancel()
if CANCELLED_MSG not in err.args and sys.version_info >= (3, 11):
_LOGGER.debug("%sExternal cancellation of task. Raise CancelledError...", self.name)
raise
res = False
finally:
self.async_tasks[id(model)].remove(asyncio.current_task())
self.current_context.reset(token)
if len(self.async_tasks[id(model)]) == 0:
del self.async_tasks[id(model)]
else:
res = await self._process_async(func, model)
return res
def remove_model(self, model):
"""Remove a model from the state machine. The model will still contain all previously added triggers
and callbacks, but will not receive updates when states or transitions are added to the Machine.
If an event queue is used, all queued events of that model will be removed."""
models = listify(model)
if self.has_queue == 'model':
for mod in models:
del self._transition_queue_dict[id(mod)]
self.models.remove(mod)
else:
for mod in models:
self.models.remove(mod)
if len(self._transition_queue) > 0:
queue = self._transition_queue
new_queue = [queue.popleft()] + [e for e in queue if e.args[0].model not in models]
self._transition_queue.clear()
self._transition_queue.extend(new_queue)
async def _can_trigger(self, model, trigger, *args, **kwargs):
state = self.get_model_state(model)
event_data = AsyncEventData(state, AsyncEvent(name=trigger, machine=self), self, model, args, kwargs)
for trigger_name in self.get_triggers(state):
if trigger_name != trigger:
continue
for transition in self.events[trigger_name].transitions[state.name]:
try:
_ = self.get_state(transition.dest) if transition.dest is not None else transition.source
except ValueError:
continue
event_data.transition = transition
try:
await self.callbacks(self.prepare_event, event_data)
await self.callbacks(transition.prepare, event_data)
if all(await self.await_all([partial(c.check, event_data) for c in transition.conditions])):
return True
except BaseException as err:
event_data.error = err
if self.on_exception:
await self.callbacks(self.on_exception, event_data)
else:
raise
return False
def _process(self, trigger):
raise RuntimeError("AsyncMachine should not call `Machine._process`. Use `Machine._process_async` instead.")
async def _process_async(self, trigger, model):
# default processing
if not self.has_queue:
if not self._transition_queue:
# if trigger raises an Error, it has to be handled by the Machine.process caller
return await trigger()
raise MachineError("Attempt to process events synchronously while transition queue is not empty!")
self._transition_queue_dict[id(model)].append(trigger)
# another entry in the queue implies a running transition; skip immediate execution
if len(self._transition_queue_dict[id(model)]) > 1:
return True
while self._transition_queue_dict[id(model)]:
try:
await self._transition_queue_dict[id(model)][0]()
except BaseException:
# if a transition raises an exception, clear queue and delegate exception handling
self._transition_queue_dict[id(model)].clear()
raise
try:
self._transition_queue_dict[id(model)].popleft()
except KeyError:
return True
return True
|
AsyncMachine
|
python
|
django-import-export__django-import-export
|
tests/core/admin.py
|
{
"start": 2317,
"end": 2401
}
|
class ____(ExportActionModelAdmin):
pass
@admin.register(Author)
|
UUIDCategoryAdmin
|
python
|
altair-viz__altair
|
altair/utils/server.py
|
{
"start": 572,
"end": 709
}
|
class ____:
def makefile(self, *args, **kwargs):
return IO(b"GET /")
def sendall(self, response):
pass
|
MockRequest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/two-sum-ii-input-array-is-sorted.py
|
{
"start": 29,
"end": 373
}
|
class ____(object):
def twoSum(self, nums, target):
start, end = 0, len(nums) - 1
while start != end:
sum = nums[start] + nums[end]
if sum > target:
end -= 1
elif sum < target:
start += 1
else:
return [start + 1, end + 1]
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/3458. Select K Disjoint Special Substrings/3458.py
|
{
"start": 0,
"end": 870
}
|
class ____:
def maxSubstringLength(self, s: str, k: int) -> bool:
n = len(s)
first = [n] * 26
last = [-1] * 26
# dp[i] := the maximum disjoint special substrings for the first i letters
dp = [0] * (n + 1)
seenOrder = []
for i, c in enumerate(s):
a = ord(c) - ord('a')
if first[a] == n:
first[a] = i
seenOrder.append(c)
last[a] = i
for c in seenOrder:
a = ord(c) - ord('a')
for j in range(first[a], last[a]):
b = ord(s[j]) - ord('a')
first[a] = min(first[a], first[b])
last[a] = max(last[a], last[b])
for i, c in enumerate(s):
a = ord(c) - ord('a')
if last[a] != i or (first[a] == 0 and i == n - 1):
dp[i + 1] = dp[i]
else: # Start a new special substring.
dp[i + 1] = max(dp[i], 1 + dp[first[a]])
return dp[n] >= k
|
Solution
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py
|
{
"start": 120231,
"end": 124516
}
|
class ____:
@mock.patch(VERTEX_AI_PATH.format("pipeline_job.PipelineJobHook"))
@mock.patch("google.cloud.aiplatform_v1.types.PipelineJob.to_dict")
def test_execute(self, to_dict_mock, mock_hook):
op = RunPipelineJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
display_name=DISPLAY_NAME,
template_path=TEST_TEMPLATE_PATH,
job_id=TEST_PIPELINE_JOB_ID,
pipeline_root="",
parameter_values={},
input_artifacts={},
enable_caching=False,
encryption_spec_key_name="",
labels={},
failure_policy="",
service_account="",
network="",
create_request_timeout=None,
experiment=None,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.submit_pipeline_job.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
display_name=DISPLAY_NAME,
template_path=TEST_TEMPLATE_PATH,
job_id=TEST_PIPELINE_JOB_ID,
pipeline_root="",
parameter_values={},
input_artifacts={},
enable_caching=False,
encryption_spec_key_name="",
labels={},
failure_policy="",
service_account="",
network="",
create_request_timeout=None,
experiment=None,
)
@mock.patch(VERTEX_AI_PATH.format("pipeline_job.PipelineJobHook"))
def test_execute_enters_deferred_state(self, mock_hook):
task = RunPipelineJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
display_name=DISPLAY_NAME,
template_path=TEST_TEMPLATE_PATH,
job_id=TEST_PIPELINE_JOB_ID,
deferrable=True,
)
mock_hook.return_value.exists.return_value = False
with pytest.raises(TaskDeferred) as exc:
task.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
assert isinstance(exc.value.trigger, RunPipelineJobTrigger), "Trigger is not a RunPipelineJobTrigger"
@mock.patch(VERTEX_AI_PATH.format("pipeline_job.PipelineJobHook"))
def test_execute_complete_success(self, mock_hook):
task = RunPipelineJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
display_name=DISPLAY_NAME,
template_path=TEST_TEMPLATE_PATH,
job_id=TEST_PIPELINE_JOB_ID,
deferrable=True,
)
expected_pipeline_job = expected_result = {
"name": f"projects/{GCP_PROJECT}/locations/{GCP_LOCATION}/pipelineJobs/{TEST_PIPELINE_JOB_ID}",
}
mock_hook.return_value.exists.return_value = False
mock_ti = mock.MagicMock()
mock_context = {"ti": mock_ti}
actual_result = task.execute_complete(
context=mock_context, event={"status": "success", "message": "", "job": expected_pipeline_job}
)
assert actual_result == expected_result
def test_execute_complete_error_status_raises_exception(self):
task = RunPipelineJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
display_name=DISPLAY_NAME,
template_path=TEST_TEMPLATE_PATH,
job_id=TEST_PIPELINE_JOB_ID,
deferrable=True,
)
with pytest.raises(AirflowException):
task.execute_complete(
context=None, event={"status": "error", "message": "test message", "job": None}
)
|
TestVertexAIRunPipelineJobOperator
|
python
|
pydantic__pydantic
|
tests/test_forward_ref.py
|
{
"start": 16411,
"end": 17036
}
|
class ____(BaseModel):
foo_user: FooUser
user: User
@field_serializer('user')
def serialize_user(self, v):
return f'User({v.y})'
"""
)
m = module.Model(foo_user={'x': 'user1'}, user={'y': 'user2'})
# TODO: How can we replicate this custom-encoder functionality without affecting the serialization of `User`?
assert m.model_dump_json() == '{"foo_user":{"x":"user1"},"user":"User(user2)"}'
def test_pep585_self_referencing_generics(create_module):
module = create_module(
# language=Python
"""
from __future__ import annotations
from pydantic import BaseModel
|
Model
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 997211,
"end": 997987
}
|
class ____(sgqlc.types.relay.Connection):
"""The connection type for TeamDiscussionComment."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("TeamDiscussionCommentEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("TeamDiscussionComment"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
|
TeamDiscussionCommentConnection
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-nvidia-triton/llama_index/llms/nvidia_triton/utils.py
|
{
"start": 1877,
"end": 3076
}
|
class ____(Queue):
"""A Generator that provides the inference results from an LLM."""
def __init__(
self,
client: "GrpcTritonClient",
request_id: str,
force_batch: bool,
model_name: str,
max_tokens: int,
) -> None:
"""Instantiate the generator class."""
super().__init__()
self._client = client
self.request_id = request_id
self._batch = force_batch
self._model_name = model_name
self._max_tokens = max_tokens
self._counter = 0
def __iter__(self) -> "StreamingResponseGenerator":
"""Return self as a generator."""
return self
def __next__(self) -> str:
"""Return the next retrieved token."""
val = self.get()
if val is None or val in STOP_WORDS or self._counter == self._max_tokens - 1:
self._stop_stream()
raise StopIteration
self._counter += 1
return val
def _stop_stream(self) -> None:
"""Drain and shutdown the Triton stream."""
self._client.stop_stream(
self._model_name, self.request_id, signal=not self._batch
)
|
StreamingResponseGenerator
|
python
|
django__django
|
tests/modeladmin/models.py
|
{
"start": 548,
"end": 983
}
|
class ____(models.Model):
main_band = models.ForeignKey(Band, models.CASCADE, related_name="main_concerts")
opening_band = models.ForeignKey(
Band, models.CASCADE, related_name="opening_concerts", blank=True
)
day = models.CharField(max_length=3, choices=((1, "Fri"), (2, "Sat")))
transport = models.CharField(
max_length=100, choices=((1, "Plane"), (2, "Train"), (3, "Bus")), blank=True
)
|
Concert
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-number-of-non-overlapping-substrings.py
|
{
"start": 1207,
"end": 2446
}
|
class ____(object):
def maxNumOfSubstrings(self, s):
"""
:type s: str
:rtype: List[str]
"""
def find_right_from_left(s, first, last, left):
right, i = last[ord(s[left])-ord('a')], left
while i <= right:
if first[ord(s[i])-ord('a')] < left:
return -1
right = max(right, last[ord(s[i])-ord('a')])
i += 1
return right
first, last = [float("inf")]*26, [float("-inf")]*26
for i, c in enumerate(s):
first[ord(c)-ord('a')] = min(first[ord(c)-ord('a')], i)
last[ord(c)-ord('a')] = max(last[ord(c)-ord('a')], i)
intervals = []
for c in xrange(len(first)):
if first[c] == float("inf"):
continue
left, right = first[c], find_right_from_left(s, first, last, first[c])
if right != -1:
intervals.append((right, left))
intervals.sort() # Time: O(26log26)
result, prev = [], -1
for right, left in intervals:
if left <= prev:
continue
result.append(s[left:right+1])
prev = right
return result
|
Solution2
|
python
|
miyuchina__mistletoe
|
mistletoe/span_token.py
|
{
"start": 8941,
"end": 9545
}
|
class ____(SpanToken):
"""
Span-level HTML token.
This is an inline token without children.
Attributes:
content (str): the raw HTML content.
"""
pattern = re.compile('|'.join([_open_tag, _closing_tag, _comment,
_instruction, _declaration, _cdata]),
re.DOTALL)
parse_inner = False
parse_group = 0
HTMLSpan = HtmlSpan
"""
Deprecated name of the `HtmlSpan` class.
"""
# Note: The following XWiki tokens are based on the XWiki Syntax 2.0 (or above; 1.0 was deprecated years ago already).
|
HtmlSpan
|
python
|
getsentry__sentry
|
src/sentry/auth/providers/saml2/activedirectory/provider.py
|
{
"start": 80,
"end": 197
}
|
class ____(GenericSAML2Provider):
name = "Active Directory"
key = "active-directory"
|
ActiveDirectorySAML2Provider
|
python
|
xlwings__xlwings
|
xlwings/_xlwindows.py
|
{
"start": 22427,
"end": 24202
}
|
class ____(base_classes.Books):
def __init__(self, xl, app):
self.xl = xl
self.app = app
@property
def api(self):
return self.xl
@property
def active(self):
return Book(self.xl.Application.ActiveWorkbook)
def __call__(self, name_or_index):
try:
return Book(xl=self.xl(name_or_index))
except pywintypes.com_error:
raise KeyError(name_or_index)
def __len__(self):
return self.xl.Count
def add(self):
return Book(xl=self.xl.Add())
def open(
self,
fullname,
update_links=None,
read_only=None,
format=None,
password=None,
write_res_password=None,
ignore_read_only_recommended=None,
origin=None,
delimiter=None,
editable=None,
notify=None,
converter=None,
add_to_mru=None,
local=None,
corrupt_load=None,
):
# update_links: According to VBA docs, only constants 0 and 3 are supported
if update_links:
update_links = UpdateLinks.xlUpdateLinksAlways
# Workbooks.Open params are position only on pywin32
return Book(
xl=self.xl.Open(
fullname,
update_links,
read_only,
format,
password,
write_res_password,
ignore_read_only_recommended,
origin,
delimiter,
editable,
notify,
converter,
add_to_mru,
local,
corrupt_load,
)
)
def __iter__(self):
for xl in self.xl:
yield Book(xl=xl)
|
Books
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-google-sheets/unit_tests/integration/conftest/request_builder.py
|
{
"start": 304,
"end": 1636
}
|
class ____:
@classmethod
def get_account_endpoint(cls) -> RequestBuilder:
return cls(resource="values:batchGet")
def __init__(self, resource: str = None) -> None:
self._spreadsheet_id = None
self._query_params = {}
self._body = None
self.resource = resource
def with_include_grid_data(self, include_grid_data: bool) -> RequestBuilder:
self._query_params["includeGridData"] = "true" if include_grid_data else "false"
return self
def with_alt(self, alt: str) -> RequestBuilder:
self._query_params["alt"] = alt
return self
def with_ranges(self, ranges: str) -> RequestBuilder:
self._query_params["ranges"] = ranges
return self
def with_major_dimension(self, dimension: str) -> RequestBuilder:
self._query_params["majorDimension"] = dimension
return self
def with_spreadsheet_id(self, spreadsheet_id: str) -> RequestBuilder:
self._spreadsheet_id = spreadsheet_id
return self
def build(self) -> HttpRequest:
endpoint = f"/{self.resource}" if self.resource else ""
return HttpRequest(
url=f"{GOOGLE_SHEETS_BASE_URL}/{self._spreadsheet_id}{endpoint}",
query_params=self._query_params,
body=self._body,
)
|
RequestBuilder
|
python
|
dateutil__dateutil
|
src/dateutil/tz/tz.py
|
{
"start": 5055,
"end": 8940
}
|
class ____(_tzinfo):
"""
A :class:`tzinfo` subclass built around the ``time`` timezone functions.
"""
def __init__(self):
super(tzlocal, self).__init__()
self._std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
self._dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
self._dst_offset = self._std_offset
self._dst_saved = self._dst_offset - self._std_offset
self._hasdst = bool(self._dst_saved)
self._tznames = tuple(time.tzname)
def utcoffset(self, dt):
if dt is None and self._hasdst:
return None
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if dt is None and self._hasdst:
return None
if self._isdst(dt):
return self._dst_offset - self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._tznames[self._isdst(dt)]
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
naive_dst = self._naive_is_dst(dt)
return (not naive_dst and
(naive_dst != self._naive_is_dst(dt - self._dst_saved)))
def _naive_is_dst(self, dt):
timestamp = _datetime_to_timestamp(dt)
return time.localtime(timestamp + time.timezone).tm_isdst
def _isdst(self, dt, fold_naive=True):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
# >>> import tz, datetime
# >>> t = tz.tzlocal()
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
#
# Here is a more stable implementation:
#
if not self._hasdst:
return False
# Check for ambiguous times:
dstval = self._naive_is_dst(dt)
fold = getattr(dt, 'fold', None)
if self.is_ambiguous(dt):
if fold is not None:
return not self._fold(dt)
else:
return True
return dstval
def __eq__(self, other):
if isinstance(other, tzlocal):
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
elif isinstance(other, tzutc):
return (not self._hasdst and
self._tznames[0] in {'UTC', 'GMT'} and
self._std_offset == ZERO)
elif isinstance(other, tzoffset):
return (not self._hasdst and
self._tznames[0] == other._name and
self._std_offset == other._offset)
else:
return NotImplemented
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
|
tzlocal
|
python
|
pytest-dev__pytest
|
src/_pytest/config/__init__.py
|
{
"start": 3298,
"end": 7103
}
|
class ____(Exception):
def __init__(
self,
path: pathlib.Path,
*,
cause: Exception,
) -> None:
self.path = path
self.cause = cause
def __str__(self) -> str:
return f"{type(self.cause).__name__}: {self.cause} (from {self.path})"
def filter_traceback_for_conftest_import_failure(
entry: _pytest._code.TracebackEntry,
) -> bool:
"""Filter tracebacks entries which point to pytest internals or importlib.
Make a special case for importlib because we use it to import test modules and conftest files
in _pytest.pathlib.import_path.
"""
return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep)
def print_conftest_import_error(e: ConftestImportFailure, file: TextIO) -> None:
exc_info = ExceptionInfo.from_exception(e.cause)
tw = TerminalWriter(file)
tw.line(f"ImportError while loading conftest '{e.path}'.", red=True)
exc_info.traceback = exc_info.traceback.filter(
filter_traceback_for_conftest_import_failure
)
exc_repr = (
exc_info.getrepr(style="short", chain=False)
if exc_info.traceback
else exc_info.exconly()
)
formatted_tb = str(exc_repr)
for line in formatted_tb.splitlines():
tw.line(line.rstrip(), red=True)
def print_usage_error(e: UsageError, file: TextIO) -> None:
tw = TerminalWriter(file)
for msg in e.args:
tw.line(f"ERROR: {msg}\n", red=True)
def main(
args: list[str] | os.PathLike[str] | None = None,
plugins: Sequence[str | _PluggyPlugin] | None = None,
) -> int | ExitCode:
"""Perform an in-process test run.
:param args:
List of command line arguments. If `None` or not given, defaults to reading
arguments directly from the process command line (:data:`sys.argv`).
:param plugins: List of plugin objects to be auto-registered during initialization.
:returns: An exit code.
"""
# Handle a single `--version` argument early to avoid starting up the entire pytest infrastructure.
new_args = sys.argv[1:] if args is None else args
if isinstance(new_args, Sequence) and new_args.count("--version") == 1:
sys.stdout.write(f"pytest {__version__}\n")
return ExitCode.OK
old_pytest_version = os.environ.get("PYTEST_VERSION")
try:
os.environ["PYTEST_VERSION"] = __version__
try:
config = _prepareconfig(new_args, plugins)
except ConftestImportFailure as e:
print_conftest_import_error(e, file=sys.stderr)
return ExitCode.USAGE_ERROR
try:
ret: ExitCode | int = config.hook.pytest_cmdline_main(config=config)
try:
return ExitCode(ret)
except ValueError:
return ret
finally:
config._ensure_unconfigure()
except UsageError as e:
print_usage_error(e, file=sys.stderr)
return ExitCode.USAGE_ERROR
finally:
if old_pytest_version is None:
os.environ.pop("PYTEST_VERSION", None)
else:
os.environ["PYTEST_VERSION"] = old_pytest_version
def console_main() -> int:
"""The CLI entry point of pytest.
This function is not meant for programmable use; use `main()` instead.
"""
# https://docs.python.org/3/library/signal.html#note-on-sigpipe
try:
code = main()
sys.stdout.flush()
return code
except BrokenPipeError:
# Python flushes standard streams on exit; redirect remaining output
# to devnull to avoid another BrokenPipeError at shutdown
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
return 1 # Python exits with error code 1 on EPIPE
|
ConftestImportFailure
|
python
|
davidhalter__jedi
|
jedi/inference/value/instance.py
|
{
"start": 18087,
"end": 19062
}
|
class ____(TreeNameDefinition):
"""
This name calculates the parent_context lazily.
"""
def __init__(self, instance, class_context, tree_name):
self._instance = instance
self.class_context = class_context
self.tree_name = tree_name
@property
def parent_context(self):
return self._instance.create_instance_context(self.class_context, self.tree_name)
def get_defining_qualified_value(self):
return self._instance
def infer(self):
stmt = search_ancestor(self.tree_name, 'expr_stmt')
if stmt is not None:
if stmt.children[1].type == "annassign":
from jedi.inference.gradual.annotation import infer_annotation
values = infer_annotation(
self.parent_context, stmt.children[1].children[1]
).execute_annotation()
if values:
return values
return super().infer()
|
SelfName
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_merge.py
|
{
"start": 28397,
"end": 28618
}
|
class ____(Merge):
def _lower(self):
# This is cheap and avoids shuffling unnecessary data
right = DropDuplicatesBlockwise(self.right)
return Merge(self.left, right, *self.operands[2:])
|
SemiMerge
|
python
|
pypa__pip
|
src/pip/_internal/utils/temp_dir.py
|
{
"start": 6597,
"end": 9307
}
|
class ____(TempDirectory):
"""Helper class that creates a temporary directory adjacent to a real one.
Attributes:
original
The original directory to create a temp directory for.
path
After calling create() or entering, contains the full
path to the temporary directory.
delete
Whether the directory should be deleted when exiting
(when used as a contextmanager)
"""
# The characters that may be used to name the temp directory
# We always prepend a ~ and then rotate through these until
# a usable name is found.
# pkg_resources raises a different error for .dist-info folder
# with leading '-' and invalid metadata
LEADING_CHARS = "-~.=%0123456789"
def __init__(self, original: str, delete: bool | None = None) -> None:
self.original = original.rstrip("/\\")
super().__init__(delete=delete)
@classmethod
def _generate_names(cls, name: str) -> Generator[str, None, None]:
"""Generates a series of temporary names.
The algorithm replaces the leading characters in the name
with ones that are valid filesystem characters, but are not
valid package names (for both Python and pip definitions of
package).
"""
for i in range(1, len(name)):
for candidate in itertools.combinations_with_replacement(
cls.LEADING_CHARS, i - 1
):
new_name = "~" + "".join(candidate) + name[i:]
if new_name != name:
yield new_name
# If we make it this far, we will have to make a longer name
for i in range(len(cls.LEADING_CHARS)):
for candidate in itertools.combinations_with_replacement(
cls.LEADING_CHARS, i
):
new_name = "~" + "".join(candidate) + name
if new_name != name:
yield new_name
def _create(self, kind: str) -> str:
root, name = os.path.split(self.original)
for candidate in self._generate_names(name):
path = os.path.join(root, candidate)
try:
os.mkdir(path)
except OSError as ex:
# Continue if the name exists already
if ex.errno != errno.EEXIST:
raise
else:
path = os.path.realpath(path)
break
else:
# Final fallback on the default behavior.
path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-"))
logger.debug("Created temporary directory: %s", path)
return path
|
AdjacentTempDirectory
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/queues.py
|
{
"start": 23845,
"end": 26540
}
|
class ____(Request):
"""
Create a new queue
:param name: Queue name Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
"""
_service = "queues"
_action = "create"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"name": {
"description": "Queue name Unique within the company.",
"type": "string",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["name"],
"type": "object",
}
def __init__(
self, name: str, tags: Optional[List[str]] = None, system_tags: Optional[List[str]] = None, **kwargs: Any
) -> None:
super(CreateRequest, self).__init__(**kwargs)
self.name = name
self.tags = tags
self.system_tags = system_tags
@schema_property("name")
def name(self) -> str:
return self._property_name
@name.setter
def name(self, value: str) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
|
CreateRequest
|
python
|
rapidsai__cudf
|
python/cudf_polars/cudf_polars/utils/config.py
|
{
"start": 32964,
"end": 33434
}
|
class ____:
"""
Configuration for the CUDA stream pool.
Parameters
----------
pool_size
The size of the CUDA stream pool.
flags
The flags to use for the CUDA stream pool.
"""
pool_size: int = 16
flags: CudaStreamFlags = CudaStreamFlags.NON_BLOCKING
def build(self) -> CudaStreamPool:
return CudaStreamPool(
pool_size=self.pool_size,
flags=self.flags,
)
|
CUDAStreamPoolConfig
|
python
|
RaRe-Technologies__gensim
|
gensim/models/ldaseqmodel.py
|
{
"start": 28864,
"end": 48810
}
|
class ____(utils.SaveLoad):
"""Encapsulate the inner State Space Language Model for DTM.
Some important attributes of this class:
* `obs` is a matrix containing the document to topic ratios.
* `e_log_prob` is a matrix containing the topic to word ratios.
* `mean` contains the mean values to be used for inference for each word for a time slice.
* `variance` contains the variance values to be used for inference of word in a time slice.
* `fwd_mean` and`fwd_variance` are the forward posterior values for the mean and the variance.
* `zeta` is an extra variational parameter with a value for each time slice.
"""
def __init__(self, vocab_len=None, num_time_slices=None, num_topics=None, obs_variance=0.5, chain_variance=0.005):
self.vocab_len = vocab_len
self.num_time_slices = num_time_slices
self.obs_variance = obs_variance
self.chain_variance = chain_variance
self.num_topics = num_topics
# setting up matrices
self.obs = np.zeros((vocab_len, num_time_slices))
self.e_log_prob = np.zeros((vocab_len, num_time_slices))
self.mean = np.zeros((vocab_len, num_time_slices + 1))
self.fwd_mean = np.zeros((vocab_len, num_time_slices + 1))
self.fwd_variance = np.zeros((vocab_len, num_time_slices + 1))
self.variance = np.zeros((vocab_len, num_time_slices + 1))
self.zeta = np.zeros(num_time_slices)
# the following are class variables which are to be integrated during Document Influence Model
self.m_update_coeff = None
self.mean_t = None
self.variance_t = None
self.influence_sum_lgl = None
self.w_phi_l = None
self.w_phi_sum = None
self.w_phi_l_sq = None
self.m_update_coeff_g = None
def update_zeta(self):
"""Update the Zeta variational parameter.
Zeta is described in the appendix and is equal to sum (exp(mean[word] + Variance[word] / 2)),
over every time-slice. It is the value of variational parameter zeta which maximizes the lower bound.
Returns
-------
list of float
The updated zeta values for each time slice.
"""
for j, val in enumerate(self.zeta):
self.zeta[j] = np.sum(np.exp(self.mean[:, j + 1] + self.variance[:, j + 1] / 2))
return self.zeta
def compute_post_variance(self, word, chain_variance):
r"""Get the variance, based on the
`Variational Kalman Filtering approach for Approximate Inference (section 3.1)
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
This function accepts the word to compute variance for, along with the associated sslm class object,
and returns the `variance` and the posterior approximation `fwd_variance`.
Notes
-----
This function essentially computes Var[\beta_{t,w}] for t = 1:T
.. :math::
fwd\_variance[t] \equiv E((beta_{t,w}-mean_{t,w})^2 |beta_{t}\ for\ 1:t) =
(obs\_variance / fwd\_variance[t - 1] + chain\_variance + obs\_variance ) *
(fwd\_variance[t - 1] + obs\_variance)
.. :math::
variance[t] \equiv E((beta_{t,w}-mean\_cap_{t,w})^2 |beta\_cap_{t}\ for\ 1:t) =
fwd\_variance[t - 1] + (fwd\_variance[t - 1] / fwd\_variance[t - 1] + obs\_variance)^2 *
(variance[t - 1] - (fwd\_variance[t-1] + obs\_variance))
Parameters
----------
word: int
The word's ID.
chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
Returns
-------
(numpy.ndarray, numpy.ndarray)
The first returned value is the variance of each word in each time slice, the second value is the
inferred posterior variance for the same pairs.
"""
INIT_VARIANCE_CONST = 1000
T = self.num_time_slices
variance = self.variance[word]
fwd_variance = self.fwd_variance[word]
# forward pass. Set initial variance very high
fwd_variance[0] = chain_variance * INIT_VARIANCE_CONST
for t in range(1, T + 1):
if self.obs_variance:
c = self.obs_variance / (fwd_variance[t - 1] + chain_variance + self.obs_variance)
else:
c = 0
fwd_variance[t] = c * (fwd_variance[t - 1] + chain_variance)
# backward pass
variance[T] = fwd_variance[T]
for t in range(T - 1, -1, -1):
if fwd_variance[t] > 0.0:
c = np.power((fwd_variance[t] / (fwd_variance[t] + chain_variance)), 2)
else:
c = 0
variance[t] = (c * (variance[t + 1] - chain_variance)) + ((1 - c) * fwd_variance[t])
return variance, fwd_variance
def compute_post_mean(self, word, chain_variance):
"""Get the mean, based on the `Variational Kalman Filtering approach for Approximate Inference (section 3.1)
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
Notes
-----
This function essentially computes E[\beta_{t,w}] for t = 1:T.
.. :math::
Fwd_Mean(t) ≡ E(beta_{t,w} | beta_ˆ 1:t )
= (obs_variance / fwd_variance[t - 1] + chain_variance + obs_variance ) * fwd_mean[t - 1] +
(1 - (obs_variance / fwd_variance[t - 1] + chain_variance + obs_variance)) * beta
.. :math::
Mean(t) ≡ E(beta_{t,w} | beta_ˆ 1:T )
= fwd_mean[t - 1] + (obs_variance / fwd_variance[t - 1] + obs_variance) +
(1 - obs_variance / fwd_variance[t - 1] + obs_variance)) * mean[t]
Parameters
----------
word: int
The word's ID.
chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
Returns
-------
(numpy.ndarray, numpy.ndarray)
The first returned value is the mean of each word in each time slice, the second value is the
inferred posterior mean for the same pairs.
"""
T = self.num_time_slices
obs = self.obs[word]
fwd_variance = self.fwd_variance[word]
mean = self.mean[word]
fwd_mean = self.fwd_mean[word]
# forward
fwd_mean[0] = 0
for t in range(1, T + 1):
c = self.obs_variance / (fwd_variance[t - 1] + chain_variance + self.obs_variance)
fwd_mean[t] = c * fwd_mean[t - 1] + (1 - c) * obs[t - 1]
# backward pass
mean[T] = fwd_mean[T]
for t in range(T - 1, -1, -1):
if chain_variance == 0.0:
c = 0.0
else:
c = chain_variance / (fwd_variance[t] + chain_variance)
mean[t] = c * fwd_mean[t] + (1 - c) * mean[t + 1]
return mean, fwd_mean
def compute_expected_log_prob(self):
"""Compute the expected log probability given values of m.
The appendix describes the Expectation of log-probabilities in equation 5 of the DTM paper;
The below implementation is the result of solving the equation and is implemented as in the original
Blei DTM code.
Returns
-------
numpy.ndarray of float
The expected value for the log probabilities for each word and time slice.
"""
for (w, t), val in np.ndenumerate(self.e_log_prob):
self.e_log_prob[w][t] = self.mean[w][t + 1] - np.log(self.zeta[t])
return self.e_log_prob
def sslm_counts_init(self, obs_variance, chain_variance, sstats):
"""Initialize the State Space Language Model with LDA sufficient statistics.
Called for each topic-chain and initializes initial mean, variance and Topic-Word probabilities
for the first time-slice.
Parameters
----------
obs_variance : float, optional
Observed variance used to approximate the true and forward variance.
chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
sstats : numpy.ndarray
Sufficient statistics of the LDA model. Corresponds to matrix beta in the linked paper for time slice 0,
expected shape (`self.vocab_len`, `num_topics`).
"""
W = self.vocab_len
T = self.num_time_slices
log_norm_counts = np.copy(sstats)
log_norm_counts /= sum(log_norm_counts)
log_norm_counts += 1.0 / W
log_norm_counts /= sum(log_norm_counts)
log_norm_counts = np.log(log_norm_counts)
# setting variational observations to transformed counts
self.obs = (np.repeat(log_norm_counts, T, axis=0)).reshape(W, T)
# set variational parameters
self.obs_variance = obs_variance
self.chain_variance = chain_variance
# compute post variance, mean
for w in range(W):
self.variance[w], self.fwd_variance[w] = self.compute_post_variance(w, self.chain_variance)
self.mean[w], self.fwd_mean[w] = self.compute_post_mean(w, self.chain_variance)
self.zeta = self.update_zeta()
self.e_log_prob = self.compute_expected_log_prob()
def fit_sslm(self, sstats):
"""Fits variational distribution.
This is essentially the m-step.
Maximizes the approximation of the true posterior for a particular topic using the provided sufficient
statistics. Updates the values using :meth:`~gensim.models.ldaseqmodel.sslm.update_obs` and
:meth:`~gensim.models.ldaseqmodel.sslm.compute_expected_log_prob`.
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the
current time slice, expected shape (`self.vocab_len`, `num_topics`).
Returns
-------
float
The lower bound for the true posterior achieved using the fitted approximate distribution.
"""
W = self.vocab_len
bound = 0
old_bound = 0
sslm_fit_threshold = 1e-6
sslm_max_iter = 2
converged = sslm_fit_threshold + 1
# computing variance, fwd_variance
self.variance, self.fwd_variance = \
(np.array(x) for x in zip(*(self.compute_post_variance(w, self.chain_variance) for w in range(W))))
# column sum of sstats
totals = sstats.sum(axis=0)
iter_ = 0
model = "DTM"
if model == "DTM":
bound = self.compute_bound(sstats, totals)
if model == "DIM":
bound = self.compute_bound_fixed(sstats, totals)
logger.info("initial sslm bound is %f", bound)
while converged > sslm_fit_threshold and iter_ < sslm_max_iter:
iter_ += 1
old_bound = bound
self.obs, self.zeta = self.update_obs(sstats, totals)
if model == "DTM":
bound = self.compute_bound(sstats, totals)
if model == "DIM":
bound = self.compute_bound_fixed(sstats, totals)
converged = np.fabs((bound - old_bound) / old_bound)
logger.info("iteration %i iteration lda seq bound is %f convergence is %f", iter_, bound, converged)
self.e_log_prob = self.compute_expected_log_prob()
return bound
def compute_bound(self, sstats, totals):
"""Compute the maximized lower bound achieved for the log probability of the true posterior.
Uses the formula presented in the appendix of the DTM paper (formula no. 5).
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the first
time slice, expected shape (`self.vocab_len`, `num_topics`).
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
Returns
-------
float
The maximized lower bound.
"""
w = self.vocab_len
t = self.num_time_slices
term_1 = 0
term_2 = 0
term_3 = 0
val = 0
ent = 0
chain_variance = self.chain_variance
# computing mean, fwd_mean
self.mean, self.fwd_mean = \
(np.array(x) for x in zip(*(self.compute_post_mean(w, self.chain_variance) for w in range(w))))
self.zeta = self.update_zeta()
val = sum(self.variance[w][0] - self.variance[w][t] for w in range(w)) / 2 * chain_variance
logger.info("Computing bound, all times")
for t in range(1, t + 1):
term_1 = 0.0
term_2 = 0.0
ent = 0.0
for w in range(w):
m = self.mean[w][t]
prev_m = self.mean[w][t - 1]
v = self.variance[w][t]
# w_phi_l is only used in Document Influence Model; the values are always zero in this case
# w_phi_l = sslm.w_phi_l[w][t - 1]
# exp_i = np.exp(-prev_m)
# term_1 += (np.power(m - prev_m - (w_phi_l * exp_i), 2) / (2 * chain_variance)) -
# (v / chain_variance) - np.log(chain_variance)
term_1 += \
(np.power(m - prev_m, 2) / (2 * chain_variance)) - (v / chain_variance) - np.log(chain_variance)
term_2 += sstats[w][t - 1] * m
ent += np.log(v) / 2 # note the 2pi's cancel with term1 (see doc)
term_3 = -totals[t - 1] * np.log(self.zeta[t - 1])
val += term_2 + term_3 + ent - term_1
return val
def update_obs(self, sstats, totals):
"""Optimize the bound with respect to the observed variables.
TODO:
This is by far the slowest function in the whole algorithm.
Replacing or improving the performance of this would greatly speed things up.
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the first
time slice, expected shape (`self.vocab_len`, `num_topics`).
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
Returns
-------
(numpy.ndarray of float, numpy.ndarray of float)
The updated optimized values for obs and the zeta variational parameter.
"""
OBS_NORM_CUTOFF = 2
STEP_SIZE = 0.01
TOL = 1e-3
W = self.vocab_len
T = self.num_time_slices
runs = 0
mean_deriv_mtx = np.zeros((T, T + 1))
norm_cutoff_obs = None
for w in range(W):
w_counts = sstats[w]
counts_norm = 0
# now we find L2 norm of w_counts
for i in range(len(w_counts)):
counts_norm += w_counts[i] * w_counts[i]
counts_norm = np.sqrt(counts_norm)
if counts_norm < OBS_NORM_CUTOFF and norm_cutoff_obs is not None:
obs = self.obs[w]
norm_cutoff_obs = np.copy(obs)
else:
if counts_norm < OBS_NORM_CUTOFF:
w_counts = np.zeros(len(w_counts))
# TODO: apply lambda function
for t in range(T):
mean_deriv_mtx[t] = self.compute_mean_deriv(w, t, mean_deriv_mtx[t])
deriv = np.zeros(T)
args = self, w_counts, totals, mean_deriv_mtx, w, deriv
obs = self.obs[w]
model = "DTM"
if model == "DTM":
# slowest part of method
obs = optimize.fmin_cg(
f=f_obs, fprime=df_obs, x0=obs, gtol=TOL, args=args, epsilon=STEP_SIZE, disp=0
)
if model == "DIM":
pass
runs += 1
if counts_norm < OBS_NORM_CUTOFF:
norm_cutoff_obs = obs
self.obs[w] = obs
self.zeta = self.update_zeta()
return self.obs, self.zeta
def compute_mean_deriv(self, word, time, deriv):
"""Helper functions for optimizing a function.
Compute the derivative of:
.. :math::
E[\beta_{t,w}]/d obs_{s,w} for t = 1:T.
Parameters
----------
word : int
The word's ID.
time : int
The time slice.
deriv : list of float
Derivative for each time slice.
Returns
-------
list of float
Mean derivative for each time slice.
"""
T = self.num_time_slices
fwd_variance = self.variance[word]
deriv[0] = 0
# forward pass
for t in range(1, T + 1):
if self.obs_variance > 0.0:
w = self.obs_variance / (fwd_variance[t - 1] + self.chain_variance + self.obs_variance)
else:
w = 0.0
val = w * deriv[t - 1]
if time == t - 1:
val += (1 - w)
deriv[t] = val
for t in range(T - 1, -1, -1):
if self.chain_variance == 0.0:
w = 0.0
else:
w = self.chain_variance / (fwd_variance[t] + self.chain_variance)
deriv[t] = w * deriv[t] + (1 - w) * deriv[t + 1]
return deriv
def compute_obs_deriv(self, word, word_counts, totals, mean_deriv_mtx, deriv):
"""Derivation of obs which is used in derivative function `df_obs` while optimizing.
Parameters
----------
word : int
The word's ID.
word_counts : list of int
Total word counts for each time slice.
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
mean_deriv_mtx : list of float
Mean derivative for each time slice.
deriv : list of float
Mean derivative for each time slice.
Returns
-------
list of float
Mean derivative for each time slice.
"""
# flag
init_mult = 1000
T = self.num_time_slices
mean = self.mean[word]
variance = self.variance[word]
# only used for DIM mode
# w_phi_l = self.w_phi_l[word]
# m_update_coeff = self.m_update_coeff[word]
# temp_vector holds temporary zeta values
self.temp_vect = np.zeros(T)
for u in range(T):
self.temp_vect[u] = np.exp(mean[u + 1] + variance[u + 1] / 2)
for t in range(T):
mean_deriv = mean_deriv_mtx[t]
term1 = 0
term2 = 0
term3 = 0
term4 = 0
for u in range(1, T + 1):
mean_u = mean[u]
mean_u_prev = mean[u - 1]
dmean_u = mean_deriv[u]
dmean_u_prev = mean_deriv[u - 1]
term1 += (mean_u - mean_u_prev) * (dmean_u - dmean_u_prev)
term2 += (word_counts[u - 1] - (totals[u - 1] * self.temp_vect[u - 1] / self.zeta[u - 1])) * dmean_u
model = "DTM"
if model == "DIM":
# do some stuff
pass
if self.chain_variance:
term1 = - (term1 / self.chain_variance)
term1 = term1 - (mean[0] * mean_deriv[0]) / (init_mult * self.chain_variance)
else:
term1 = 0.0
deriv[t] = term1 + term2 + term3 + term4
return deriv
|
sslm
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
|
{
"start": 245862,
"end": 248174
}
|
class ____(GeneratedAirbyteSource):
class AuthenticateViaHarvestOAuth:
@public
def __init__(
self,
client_id: str,
client_secret: str,
refresh_token: str,
auth_type: Optional[str] = None,
):
self.auth_type = check.opt_str_param(auth_type, "auth_type")
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
class AuthenticateWithPersonalAccessToken:
@public
def __init__(self, api_token: str, auth_type: Optional[str] = None):
self.auth_type = check.opt_str_param(auth_type, "auth_type")
self.api_token = check.str_param(api_token, "api_token")
@public
def __init__(
self,
name: str,
account_id: str,
replication_start_date: str,
credentials: Union[
"HarvestSource.AuthenticateViaHarvestOAuth",
"HarvestSource.AuthenticateWithPersonalAccessToken",
],
):
"""Airbyte Source for Harvest.
Documentation can be found at https://docs.airbyte.com/integrations/sources/harvest
Args:
name (str): The name of the destination.
account_id (str): Harvest account ID. Required for all Harvest requests in pair with Personal Access Token
replication_start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
credentials (Union[HarvestSource.AuthenticateViaHarvestOAuth, HarvestSource.AuthenticateWithPersonalAccessToken]): Choose how to authenticate to Harvest.
"""
self.account_id = check.str_param(account_id, "account_id")
self.replication_start_date = check.str_param(
replication_start_date, "replication_start_date"
)
self.credentials = check.inst_param(
credentials,
"credentials",
(
HarvestSource.AuthenticateViaHarvestOAuth,
HarvestSource.AuthenticateWithPersonalAccessToken,
),
)
super().__init__("Harvest", name)
|
HarvestSource
|
python
|
scikit-image__scikit-image
|
src/skimage/_shared/utils.py
|
{
"start": 16210,
"end": 18239
}
|
class ____:
"""Class to indicate a failed transform estimation.
The ``from_estimate`` class method of each transform type may return an
instance of this class to indicate some failure in the estimation process.
Parameters
----------
message : str
Message indicating reason for failed estimation.
Attributes
----------
message : str
Message above.
Raises
------
FailedEstimationAccessError
Exception raised for missing attributes or if the instance is used as a
callable.
"""
error_cls = FailedEstimationAccessError
hint = (
"You can check for a failed estimation by truth testing the returned "
"object. For failed estimations, `bool(estimation_result)` will be `False`. "
"E.g.\n\n"
" if not estimation_result:\n"
" raise RuntimeError(f'Failed estimation: {estimation_result}')"
)
def __init__(self, message):
self.message = message
def __bool__(self):
return False
def __repr__(self):
return f"{type(self).__name__}({self.message!r})"
def __str__(self):
return self.message
def __call__(self, *args, **kwargs):
msg = (
f'{type(self).__name__} is not callable. {self.message}\n\n'
f'Hint: {self.hint}'
)
raise self.error_cls(msg)
def __getattr__(self, name):
msg = (
f'{type(self).__name__} has no attribute {name!r}. {self.message}\n\n'
f'Hint: {self.hint}'
)
raise self.error_cls(msg)
@contextmanager
def _ignore_deprecated_estimate_warning():
"""Filter warnings about the deprecated `estimate` method.
Use either as decorator or context manager.
"""
with warnings.catch_warnings():
warnings.filterwarnings(
action="ignore",
category=FutureWarning,
message="`estimate` is deprecated",
module="skimage",
)
yield
|
FailedEstimation
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/monitored_session.py
|
{
"start": 38115,
"end": 41522
}
|
class ____(_MonitoredSession):
"""Session-like object that handles initialization, recovery and hooks.
Example usage:
```python
saver_hook = CheckpointSaverHook(...)
summary_hook = SummarySaverHook(...)
with MonitoredSession(session_creator=ChiefSessionCreator(...),
hooks=[saver_hook, summary_hook]) as sess:
while not sess.should_stop():
sess.run(train_op)
```
Initialization: At creation time the monitored session does following things
in given order:
* calls `hook.begin()` for each given hook
* finalizes the graph via `scaffold.finalize()`
* create session
* initializes the model via initialization ops provided by `Scaffold`
* restores variables if a checkpoint exists
* launches queue runners
* calls `hook.after_create_session()`
Run: When `run()` is called, the monitored session does following things:
* calls `hook.before_run()`
* calls TensorFlow `session.run()` with merged fetches and feed_dict
* calls `hook.after_run()`
* returns result of `session.run()` asked by user
* if `AbortedError` or `UnavailableError` occurs, it recovers or
reinitializes the session before executing the run() call again
Exit: At the `close()`, the monitored session does following things in order:
* calls `hook.end()`
* closes the queue runners and the session
* suppresses `OutOfRange` error which indicates that all inputs have been
processed if the monitored_session is used as a context
How to set `tf.compat.v1.Session` arguments:
* In most cases you can set session arguments as follows:
```python
MonitoredSession(
session_creator=ChiefSessionCreator(master=..., config=...))
```
* In distributed setting for a non-chief worker, you can use following:
```python
MonitoredSession(
session_creator=WorkerSessionCreator(master=..., config=...))
```
See `MonitoredTrainingSession` for an example usage based on chief or worker.
Note: This is not a `tf.compat.v1.Session`. For example, it cannot do
following:
* it cannot be set as default session.
* it cannot be sent to saver.save.
* it cannot be sent to tf.train.start_queue_runners.
@compatibility(TF2)
This API is not compatible with eager execution and `tf.function`. To migrate
to TF2, rewrite the code to be compatible with eager execution. Check the
[migration
guide](https://www.tensorflow.org/guide/migrate#1_replace_v1sessionrun_calls)
on replacing `Session.run` calls. In Keras, session hooks can be replaced by
Callbacks e.g. [logging hook notebook](
https://github.com/tensorflow/docs/blob/master/site/en/guide/migrate/logging_stop_hook.ipynb)
For more details please read [Better
performance with tf.function](https://www.tensorflow.org/guide/function).
@end_compatibility
Args:
session_creator: A factory object to create session. Typically a
`ChiefSessionCreator` which is the default one.
hooks: An iterable of `SessionRunHook' objects.
Returns:
A MonitoredSession object.
"""
def __init__(self,
session_creator=None,
hooks=None,
stop_grace_period_secs=120):
super(MonitoredSession, self).__init__(
session_creator,
hooks,
should_recover=True,
stop_grace_period_secs=stop_grace_period_secs)
@tf_export(v1=['train.SingularMonitoredSession'])
|
MonitoredSession
|
python
|
getsentry__sentry
|
src/sentry/apidocs/parameters.py
|
{
"start": 30193,
"end": 30431
}
|
class ____:
DASHBOARD_ID = OpenApiParameter(
name="dashboard_id",
location="path",
required=True,
type=int,
description="""The ID of the dashboard you'd like to retrieve.""",
)
|
DashboardParams
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/commands/show.py
|
{
"start": 918,
"end": 1920
}
|
class ____(Command):
"""
Show information about one or more installed packages.
The output is in RFC-compliant mail header format.
"""
usage = """
%prog [options] <package> ..."""
ignore_require_venv = True
def add_options(self) -> None:
self.cmd_opts.add_option(
"-f",
"--files",
dest="files",
action="store_true",
default=False,
help="Show the full list of installed files for each package.",
)
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options: Values, args: List[str]) -> int:
if not args:
logger.warning("ERROR: Please provide a package name or names.")
return ERROR
query = args
results = search_packages_info(query)
if not print_results(
results, list_files=options.files, verbose=options.verbose
):
return ERROR
return SUCCESS
|
ShowCommand
|
python
|
catalyst-team__catalyst
|
catalyst/engines/torch.py
|
{
"start": 1234,
"end": 5867
}
|
class ____(Engine):
"""Distributed multi-GPU-based engine.
Args:
*args: args for Accelerator.__init__
address: master node (rank 0)'s address,
should be either the IP address or the hostname
of node 0, for single node multi-proc training, can simply be 127.0.0.1
port: master node (rank 0)'s free port that needs to be used for communication
during distributed training
world_size: the number of processes to use for distributed training.
Should be less or equal to the number of GPUs
workers_dist_rank: the rank of the first process to run on the node.
It should be a number between `number of initialized processes`
and `world_size - 1`, the other processes on the node wiil have ranks
`# of initialized processes + 1`, `# of initialized processes + 2`, ...,
`# of initialized processes + num_node_workers - 1`
num_node_workers: the number of processes to launch on the node.
For GPU training, this is recommended to be set to the number of GPUs
on the current node so that each process can be bound to a single GPU
process_group_kwargs: parameters for `torch.distributed.init_process_group`.
More info here:
https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group # noqa: E501, W505
**kwargs: kwargs for Accelerator.__init__
"""
def __init__(
self,
*args,
address: str = "127.0.0.1",
port: Union[str, int] = 2112,
world_size: Optional[int] = None,
workers_dist_rank: int = 0,
num_node_workers: Optional[int] = None,
process_group_kwargs: Dict[str, Any] = None,
**kwargs
):
"""Init."""
self._address = os.environ.get("MASTER_ADDR", address)
self._port = os.environ.get("MASTER_PORT", port)
self._num_local_workers = num_node_workers or torch.cuda.device_count() or 1
self._workers_global_rank = workers_dist_rank
self._world_size = world_size or self._num_local_workers
self._process_group_kwargs = process_group_kwargs or {}
self._args = args
self._kwargs = kwargs
def spawn(self, fn: Callable, *args, **kwargs):
"""Spawns processes with specified ``fn`` and ``args``/``kwargs``.
Args:
fn (function): Function is called as the entrypoint of the
spawned process. This function must be defined at the top
level of a module so it can be pickled and spawned. This
is a requirement imposed by multiprocessing.
The function is called as ``fn(i, *args)``, where ``i`` is
the process index and ``args`` is the passed through tuple
of arguments.
*args: Arguments passed to spawn method.
**kwargs: Keyword-arguments passed to spawn method.
Returns:
wrapped function (if needed).
"""
return mp.spawn(
fn,
args=(self._world_size,),
nprocs=self._num_local_workers,
join=True,
)
def setup(self, local_rank: int, world_size: int):
"""Initialize DDP variables and processes if required.
Args:
local_rank: process rank. Default is `-1`.
world_size: number of devices in netwok to expect for train.
Default is `1`.
"""
process_group_kwargs = {
"backend": "nccl",
"world_size": world_size,
**self._process_group_kwargs,
}
global_rank = self._workers_global_rank + local_rank
os.environ["MASTER_ADDR"] = str(self._address)
os.environ["MASTER_PORT"] = str(self._port)
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["RANK"] = str(global_rank)
os.environ["LOCAL_RANK"] = str(local_rank)
dist.init_process_group(**process_group_kwargs)
super().__init__(self, *self._args, **self._kwargs)
def cleanup(self):
"""Cleans DDP variables and processes."""
dist.destroy_process_group()
def mean_reduce_ddp_metrics(self, metrics: Dict) -> Dict:
"""Syncs ``metrics`` over ``world_size`` in the distributed mode."""
metrics = {
k: mean_reduce(
torch.tensor(v, device=self.device),
world_size=self.state.num_processes,
)
for k, v in metrics.items()
}
return metrics
|
DistributedDataParallelEngine
|
python
|
Lightning-AI__lightning
|
tests/tests_fabric/utilities/test_registry.py
|
{
"start": 238,
"end": 2105
}
|
class ____:
"""A callback in another library that gets registered through entry points."""
pass
def test_load_external_callbacks():
"""Test that the connector collects Callback instances from factories registered through entry points."""
def factory_no_callback():
return []
def factory_one_callback():
return ExternalCallback()
def factory_one_callback_list():
return [ExternalCallback()]
def factory_multiple_callbacks_list():
return [ExternalCallback(), ExternalCallback()]
with _make_entry_point_query_mock(factory_no_callback):
callbacks = _load_external_callbacks("lightning.pytorch.callbacks_factory")
assert callbacks == []
with _make_entry_point_query_mock(factory_one_callback):
callbacks = _load_external_callbacks("lightning.pytorch.callbacks_factory")
assert isinstance(callbacks[0], ExternalCallback)
with _make_entry_point_query_mock(factory_one_callback_list):
callbacks = _load_external_callbacks("lightning.pytorch.callbacks_factory")
assert isinstance(callbacks[0], ExternalCallback)
with _make_entry_point_query_mock(factory_multiple_callbacks_list):
callbacks = _load_external_callbacks("lightning.pytorch.callbacks_factory")
assert isinstance(callbacks[0], ExternalCallback)
assert isinstance(callbacks[1], ExternalCallback)
@contextlib.contextmanager
def _make_entry_point_query_mock(callback_factory):
query_mock = MagicMock()
entry_point = Mock()
entry_point.name = "mocked"
entry_point.load.return_value = callback_factory
if _PYTHON_GREATER_EQUAL_3_10_0:
query_mock.return_value = [entry_point]
else:
query_mock().get.return_value = [entry_point]
with mock.patch("lightning.fabric.utilities.registry.entry_points", query_mock):
yield
|
ExternalCallback
|
python
|
joke2k__faker
|
tests/providers/test_phone_number.py
|
{
"start": 5859,
"end": 6660
}
|
class ____:
"""Test de_CH phone number provider methods"""
pattern: Pattern = re.compile(r"(\+41|0) ?(?P<dialing_code>\d{2}) \d{3} \d{2} \d{2}")
def test_phone_number(self, faker, num_samples):
for _ in range(num_samples):
phone_number = faker.phone_number()
assert self.pattern.fullmatch(phone_number)
assert self.pattern.match(phone_number).group("dialing_code") in DeChPhoneNumberProvider.landline_codes
def test_cellphone_number(self, faker, num_samples):
for _ in range(num_samples):
cellphone_number = faker.cellphone_number()
assert self.pattern.fullmatch(cellphone_number)
assert self.pattern.match(cellphone_number).group("dialing_code") in DeChPhoneNumberProvider.dialing_codes
|
TestDeCh
|
python
|
PyCQA__pycodestyle
|
pycodestyle.py
|
{
"start": 67932,
"end": 80804
}
|
class ____:
"""Load a Python source file, tokenize it, check coding style."""
def __init__(self, filename=None, lines=None,
options=None, report=None, **kwargs):
if options is None:
options = StyleGuide(kwargs).options
else:
assert not kwargs
self._io_error = None
self._physical_checks = options.physical_checks
self._logical_checks = options.logical_checks
self._ast_checks = options.ast_checks
self.max_line_length = options.max_line_length
self.max_doc_length = options.max_doc_length
self.indent_size = options.indent_size
self.fstring_start = self.tstring_start = 0
self.multiline = False # in a multiline string?
self.hang_closing = options.hang_closing
self.indent_size = options.indent_size
self.verbose = options.verbose
self.filename = filename
# Dictionary where a checker can store its custom state.
self._checker_states = {}
if filename is None:
self.filename = 'stdin'
self.lines = lines or []
elif filename == '-':
self.filename = 'stdin'
self.lines = stdin_get_value().splitlines(True)
elif lines is None:
try:
self.lines = readlines(filename)
except OSError:
(exc_type, exc) = sys.exc_info()[:2]
self._io_error = f'{exc_type.__name__}: {exc}'
self.lines = []
else:
self.lines = lines
if self.lines:
ord0 = ord(self.lines[0][0])
if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM
if ord0 == 0xfeff:
self.lines[0] = self.lines[0][1:]
elif self.lines[0][:3] == '\xef\xbb\xbf':
self.lines[0] = self.lines[0][3:]
self.report = report or options.report
self.report_error = self.report.error
self.noqa = False
def report_invalid_syntax(self):
"""Check if the syntax is valid."""
(exc_type, exc) = sys.exc_info()[:2]
if len(exc.args) > 1:
offset = exc.args[1]
if len(offset) > 2:
offset = offset[1:3]
else:
offset = (1, 0)
self.report_error(offset[0], offset[1] or 0,
f'E901 {exc_type.__name__}: {exc.args[0]}',
self.report_invalid_syntax)
def readline(self):
"""Get the next line from the input buffer."""
if self.line_number >= self.total_lines:
return ''
line = self.lines[self.line_number]
self.line_number += 1
if self.indent_char is None and line[:1] in WHITESPACE:
self.indent_char = line[0]
return line
def run_check(self, check, argument_names):
"""Run a check plugin."""
arguments = [getattr(self, name) for name in argument_names]
return check(*arguments)
def init_checker_state(self, name, argument_names):
"""Prepare custom state for the specific checker plugin."""
if 'checker_state' in argument_names:
self.checker_state = self._checker_states.setdefault(name, {})
def check_physical(self, line):
"""Run all physical checks on a raw input line."""
self.physical_line = line
for name, check, argument_names in self._physical_checks:
self.init_checker_state(name, argument_names)
result = self.run_check(check, argument_names)
if result is not None:
(offset, text) = result
self.report_error(self.line_number, offset, text, check)
if text[:4] == 'E101':
self.indent_char = line[0]
def build_tokens_line(self):
"""Build a logical line from tokens."""
logical = []
comments = []
length = 0
prev_row = prev_col = mapping = None
for token_type, text, start, end, line in self.tokens:
if token_type in SKIP_TOKENS:
continue
if not mapping:
mapping = [(0, start)]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type == tokenize.STRING:
text = mute_string(text)
elif token_type in {FSTRING_MIDDLE, TSTRING_MIDDLE}: # pragma: >=3.12 cover # noqa: E501
# fstring tokens are "unescaped" braces -- re-escape!
brace_count = text.count('{') + text.count('}')
text = 'x' * (len(text) + brace_count)
end = (end[0], end[1] + brace_count)
if prev_row:
(start_row, start_col) = start
if prev_row != start_row: # different row
prev_text = self.lines[prev_row - 1][prev_col - 1]
if prev_text == ',' or (prev_text not in '{[(' and
text not in '}])'):
text = ' ' + text
elif prev_col != start_col: # different column
text = line[prev_col:start_col] + text
logical.append(text)
length += len(text)
mapping.append((length, end))
(prev_row, prev_col) = end
self.logical_line = ''.join(logical)
self.noqa = comments and noqa(''.join(comments))
return mapping
def check_logical(self):
"""Build a line from tokens and run all logical checks on it."""
self.report.increment_logical_line()
mapping = self.build_tokens_line()
if not mapping:
return
mapping_offsets = [offset for offset, _ in mapping]
(start_row, start_col) = mapping[0][1]
start_line = self.lines[start_row - 1]
self.indent_level = expand_indent(start_line[:start_col])
if self.blank_before < self.blank_lines:
self.blank_before = self.blank_lines
if self.verbose >= 2:
print(self.logical_line[:80].rstrip())
for name, check, argument_names in self._logical_checks:
if self.verbose >= 4:
print(' ' + name)
self.init_checker_state(name, argument_names)
for offset, text in self.run_check(check, argument_names) or ():
if not isinstance(offset, tuple):
# As mappings are ordered, bisecting is a fast way
# to find a given offset in them.
token_offset, pos = mapping[bisect.bisect_left(
mapping_offsets, offset)]
offset = (pos[0], pos[1] + offset - token_offset)
self.report_error(offset[0], offset[1], text, check)
if self.logical_line:
self.previous_indent_level = self.indent_level
self.previous_logical = self.logical_line
if not self.indent_level:
self.previous_unindented_logical_line = self.logical_line
self.blank_lines = 0
self.tokens = []
def check_ast(self):
"""Build the file's AST and run all AST checks."""
try:
tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
except (ValueError, SyntaxError, TypeError):
return self.report_invalid_syntax()
for name, cls, __ in self._ast_checks:
checker = cls(tree, self.filename)
for lineno, offset, text, check in checker.run():
if not self.lines or not noqa(self.lines[lineno - 1]):
self.report_error(lineno, offset, text, check)
def generate_tokens(self):
"""Tokenize file, run physical line checks and yield tokens."""
if self._io_error:
self.report_error(1, 0, 'E902 %s' % self._io_error, readlines)
tokengen = tokenize.generate_tokens(self.readline)
try:
prev_physical = ''
for token in tokengen:
if token[2][0] > self.total_lines:
return
self.noqa = token[4] and noqa(token[4])
self.maybe_check_physical(token, prev_physical)
yield token
prev_physical = token[4]
except (SyntaxError, tokenize.TokenError):
self.report_invalid_syntax()
def maybe_check_physical(self, token, prev_physical):
"""If appropriate for token, check current physical line(s)."""
# Called after every token, but act only on end of line.
if token.type == FSTRING_START: # pragma: >=3.12 cover
self.fstring_start = token.start[0]
elif token.type == TSTRING_START: # pragma: >=3.14 cover
self.tstring_start = token.start[0]
# a newline token ends a single physical line.
elif _is_eol_token(token):
# if the file does not end with a newline, the NEWLINE
# token is inserted by the parser, but it does not contain
# the previous physical line in `token[4]`
if token.line == '':
self.check_physical(prev_physical)
else:
self.check_physical(token.line)
elif (
token.type == tokenize.STRING and '\n' in token.string or
token.type == FSTRING_END or
token.type == TSTRING_END
):
# Less obviously, a string that contains newlines is a
# multiline string, either triple-quoted or with internal
# newlines backslash-escaped. Check every physical line in
# the string *except* for the last one: its newline is
# outside of the multiline string, so we consider it a
# regular physical line, and will check it like any other
# physical line.
#
# Subtleties:
# - we don't *completely* ignore the last line; if it
# contains the magical "# noqa" comment, we disable all
# physical checks for the entire multiline string
# - have to wind self.line_number back because initially it
# points to the last line of the string, and we want
# check_physical() to give accurate feedback
if noqa(token.line):
return
if token.type == FSTRING_END: # pragma: >=3.12 cover
start = self.fstring_start
elif token.type == TSTRING_END: # pragma: >=3.12 cover
start = self.tstring_start
else:
start = token.start[0]
end = token.end[0]
self.multiline = True
self.line_number = start
for line_number in range(start, end):
self.check_physical(self.lines[line_number - 1] + '\n')
self.line_number += 1
self.multiline = False
def check_all(self, expected=None, line_offset=0):
"""Run all checks on the input file."""
self.report.init_file(self.filename, self.lines, expected, line_offset)
self.total_lines = len(self.lines)
if self._ast_checks:
self.check_ast()
self.line_number = 0
self.indent_char = None
self.indent_level = self.previous_indent_level = 0
self.previous_logical = ''
self.previous_unindented_logical_line = ''
self.tokens = []
self.blank_lines = self.blank_before = 0
parens = 0
for token in self.generate_tokens():
self.tokens.append(token)
token_type, text = token[0:2]
if self.verbose >= 3:
if token[2][0] == token[3][0]:
pos = '[{}:{}]'.format(token[2][1] or '', token[3][1])
else:
pos = 'l.%s' % token[3][0]
print('l.%s\t%s\t%s\t%r' %
(token[2][0], pos, tokenize.tok_name[token[0]], text))
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in '}])':
parens -= 1
elif not parens:
if token_type in NEWLINE:
if token_type == tokenize.NEWLINE:
self.check_logical()
self.blank_before = 0
elif len(self.tokens) == 1:
# The physical line contains only this token.
self.blank_lines += 1
del self.tokens[0]
else:
self.check_logical()
if self.tokens:
self.check_physical(self.lines[-1])
self.check_logical()
return self.report.get_file_results()
|
Checker
|
python
|
tornadoweb__tornado
|
tornado/test/web_test.py
|
{
"start": 85492,
"end": 86021
}
|
class ____(WebTestCase):
def get_handlers(self):
return [("/foo", RequestHandler)]
def get_app_kwargs(self):
class Custom404Handler(RequestHandler):
def get(self):
self.set_status(404)
self.write("custom 404 response")
return dict(default_handler_class=Custom404Handler)
def test_404(self):
response = self.fetch("/")
self.assertEqual(response.code, 404)
self.assertEqual(response.body, b"custom 404 response")
|
Custom404Test
|
python
|
pytorch__pytorch
|
benchmarks/tensorexpr/reduction.py
|
{
"start": 7666,
"end": 8286
}
|
class ____(DynamicReduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super().__init__(mode, device, dtype, 0, dim0, dim1)
@staticmethod
def default_configs():
parent_config = DynamicReduce2DBench.default_configs()[0]
return [parent_config[1:]]
def config(self):
parent_config = super().config()
return parent_config[1:]
@staticmethod
def module():
return "reduce2d_dynamic_outer"
benchmark.register_benchmark_class(DynamicReduce2DInnerBench)
benchmark.register_benchmark_class(DynamicReduce2DOuterBench)
|
DynamicReduce2DOuterBench
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/models.py
|
{
"start": 3635,
"end": 5104
}
|
class ____(models.Model):
"""
Project to project relationship.
This is used for subprojects.
Terminology: We should say main project and subproject.
Saying "child" and "parent" only has internal, technical value.
"""
parent = models.ForeignKey(
"projects.Project",
verbose_name=_("Main project"),
related_name="subprojects",
on_delete=models.CASCADE,
)
child = models.ForeignKey(
"projects.Project",
verbose_name=_("Subproject"),
related_name="superprojects",
on_delete=models.CASCADE,
)
alias = models.SlugField(
_("Alias"),
max_length=255,
null=True,
blank=True,
db_index=False,
)
objects = ChildRelatedProjectQuerySet.as_manager()
def save(self, *args, **kwargs):
if not self.alias:
self.alias = self.child.slug
super().save(*args, **kwargs)
# HACK
def get_absolute_url(self):
return Resolver().resolve_version(project=self.child)
@cached_property
def subproject_prefix(self):
"""
Returns the path prefix of the subproject.
This normally is ``/projects/<subproject-alias>/``,
but if the project has a custom subproject prefix,
that will be used.
"""
prefix = self.parent.custom_subproject_prefix or "/projects/"
return unsafe_join_url_path(prefix, self.alias, "/")
|
ProjectRelationship
|
python
|
PrefectHQ__prefect
|
tests/test_serializers.py
|
{
"start": 5716,
"end": 13171
}
|
class ____:
@pytest.mark.parametrize("data", SERIALIZER_TEST_CASES)
def test_simple_roundtrip(self, data: Any):
serializer = JSONSerializer()
serialized = serializer.dumps(data)
assert serializer.loads(serialized) == data
@pytest.mark.parametrize("data", EXCEPTION_TEST_CASES)
def test_exception_roundtrip(self, data: Any):
serializer = JSONSerializer()
serialized = serializer.dumps(data)
assert exceptions_equal(serializer.loads(serialized), data)
@pytest.mark.parametrize(
"data",
[
complex_str.encode("utf-8"),
complex_str.encode("ASCII"),
complex_str.encode("latin_1"),
[complex_str.encode("utf-8")],
{"key": complex_str.encode("ASCII")},
],
)
def test_simple_roundtrip_with_complex_bytes(self, data: Any):
serializer = JSONSerializer()
serialized = serializer.dumps(data)
assert serializer.loads(serialized) == data
def test_allows_orjson(self):
# orjson does not support hooks
serializer = JSONSerializer(
jsonlib="orjson", object_encoder=None, object_decoder=None
)
serialized = serializer.dumps("test")
assert serializer.loads(serialized) == "test"
def test_uses_alternative_json_library(self, monkeypatch: pytest.MonkeyPatch):
dumps_mock = MagicMock()
loads_mock = MagicMock()
monkeypatch.setattr("orjson.dumps", dumps_mock)
monkeypatch.setattr("orjson.loads", loads_mock)
serializer = JSONSerializer(jsonlib="orjson")
serializer.dumps("test")
serializer.loads(b"test")
dumps_mock.assert_called_once_with("test", default=prefect_json_object_encoder)
loads_mock.assert_called_once_with(
"test", object_hook=prefect_json_object_decoder
)
def test_json_serializer_does_not_consume_iobase_objects(self):
serializer = JSONSerializer()
string_io_content = "hello world from unit test"
string_io = io.StringIO(string_io_content)
data_with_stream = {"my_stream": string_io, "other_data": 123}
string_io.seek(0)
assert string_io.tell() == 0, "Initial seek(0) failed"
serialized_data = serializer.dumps(data_with_stream)
assert string_io.tell() == 0, "Stream pointer moved after dumps()"
assert string_io.read() == string_io_content, (
"Stream content changed or was consumed after dumps()"
)
string_io.seek(0)
deserialized_data = json.loads(serialized_data.decode())
deserialized_stream_placeholder: dict[str, Any] = deserialized_data.get(
"my_stream"
)
assert isinstance(deserialized_stream_placeholder, dict), (
f"Deserialized 'my_stream' should be a dict placeholder, "
f"but got {type(deserialized_stream_placeholder)}"
)
assert deserialized_stream_placeholder.get("__class__") == to_qualified_name(
io.StringIO
), (
f"Placeholder __class__ ('{deserialized_stream_placeholder.get('__class__')}') "
f"does not match expected ('{to_qualified_name(io.StringIO)}')"
)
placeholder_data_string = deserialized_stream_placeholder.get("data")
assert isinstance(placeholder_data_string, str), (
f"Placeholder data field should be a string, "
f"but got {type(placeholder_data_string)}"
)
expected_placeholder_prefix = "<Prefect IOStream Placeholder:"
expected_placeholder_type_info = f"type={string_io.__class__.__name__}"
expected_placeholder_repr_info = f"repr={repr(string_io)}"
expected_placeholder_suffix = "(original content not read)>"
assert expected_placeholder_prefix in placeholder_data_string, (
f"Placeholder prefix '{expected_placeholder_prefix}' missing in placeholder string: {placeholder_data_string}"
)
assert expected_placeholder_type_info in placeholder_data_string, (
f"Expected type info '{expected_placeholder_type_info}' not in placeholder string: {placeholder_data_string}"
)
assert expected_placeholder_repr_info in placeholder_data_string, (
f"Expected repr info '{expected_placeholder_repr_info}' not in placeholder string: {placeholder_data_string}"
)
assert expected_placeholder_suffix in placeholder_data_string, (
f"Placeholder suffix '{expected_placeholder_suffix}' missing in placeholder string: {placeholder_data_string}"
)
assert deserialized_data.get("other_data") == 123, "Other data was altered"
def test_allows_custom_encoder(self, monkeypatch: pytest.MonkeyPatch):
fake_object_encoder = MagicMock(return_value="foobar!")
prefect_object_encoder = MagicMock()
monkeypatch.setattr(
"prefect.serializers.fake_object_encoder",
fake_object_encoder,
raising=False,
)
monkeypatch.setattr(
"prefect.serializers.prefect_json_object_encoder",
prefect_object_encoder,
)
serializer = JSONSerializer(
object_encoder="prefect.serializers.fake_object_encoder"
)
# Encoder hooks are only called for unsupported objects
obj = uuid.uuid4()
result = serializer.dumps(obj)
assert result == b'"foobar!"'
prefect_object_encoder.assert_not_called()
fake_object_encoder.assert_called_once_with(obj)
def test_allows_custom_decoder(self, monkeypatch: pytest.MonkeyPatch):
fake_object_decoder = MagicMock(return_value="test")
prefect_object_decoder = MagicMock()
monkeypatch.setattr(
"prefect.serializers.fake_object_decoder",
fake_object_decoder,
raising=False,
)
monkeypatch.setattr(
"prefect.serializers.prefect_json_object_decoder",
prefect_object_decoder,
)
serializer = JSONSerializer(
object_decoder="prefect.serializers.fake_object_decoder"
)
# Decoder hooks are only called for dicts
assert serializer.loads(json.dumps({"foo": "bar"}).encode()) == "test"
fake_object_decoder.assert_called_once_with({"foo": "bar"})
prefect_object_decoder.assert_not_called()
def test_allows_custom_kwargs(self, monkeypatch: pytest.MonkeyPatch):
dumps_mock = MagicMock()
loads_mock = MagicMock()
monkeypatch.setattr("json.dumps", dumps_mock)
monkeypatch.setattr("json.loads", loads_mock)
serializer = JSONSerializer(
dumps_kwargs={"foo": "bar"}, loads_kwargs={"bar": "foo"}
)
serializer.dumps("test")
serializer.loads(b"test")
dumps_mock.assert_called_once_with(
"test", default=prefect_json_object_encoder, foo="bar"
)
loads_mock.assert_called_once_with(
"test", object_hook=prefect_json_object_decoder, bar="foo"
)
def test_does_not_allow_object_hook_collision(self):
with pytest.raises(ValidationError):
JSONSerializer(loads_kwargs={"object_hook": "foo"})
def test_does_not_allow_default_collision(self):
with pytest.raises(ValidationError):
JSONSerializer(dumps_kwargs={"default": "foo"})
|
TestJSONSerializer
|
python
|
pytorch__pytorch
|
torch/distributed/_tools/ilp_utils.py
|
{
"start": 594,
"end": 2080
}
|
class ____(TypedDict):
fqn: str
# per-module params
param_per_module: int
# per-module grads
grad_per_module: int
# total accumulated gradients up to and including this module
grad_total: int
# per module fw activation size (excluding input and output)
act_fw_per_module: int
# per module bw activation size during peak_bw
act_bw_per_module: int
# per module activation grad size during peak_bw
act_grad_per_module: int
# total activation size up to but excluding the current module
# includes input of the current module (i.e., output of previous module)
act_total: int
# Inputs to the module
input_per_module: int
# Outputs of the module
output_per_module: int
# Total fw run-time of the module
fw_runtime_per_module: float
# Total bw run-time of the module
bw_runtime_per_module: float
# Is this module a leaf module
is_leaf: bool
# Total ac run-time of the module
sac_runtime: float
# Total ac_memory for the module
sac_memory: int
# Number of piecewise-linear functions used for approximating ac tradeoff curve
n_segments: int
# Slopes of the of piecewise-linear functions
slopes: list[float]
# Intercepts of the of piecewise-linear functions
intercepts: list[float]
# X breakpoints of the of piecewise-linear functions
breakpoints: list[float]
# Original trade-off curves
tradeoff_curve: OrderedDict[float, float]
|
ModStats
|
python
|
huggingface__transformers
|
src/transformers/models/llama/tokenization_llama.py
|
{
"start": 1661,
"end": 7565
}
|
class ____(TokenizersBackend):
"""
Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding.
This uses notably ByteFallback and no normalization.
```python
>>> from transformers import LlamaTokenizer
>>> tokenizer = LlamaTokenizer.from_pretrained("hf-internal-testing/llama-tokenizer")
>>> tokenizer.encode("Hello this is a test")
[1, 15043, 445, 338, 263, 1243]
```
If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
[post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
extra spaces.
unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
The end of sequence token.
add_bos_token (`bool`, *optional*, defaults to `True`):
Whether or not to add an `bos_token` at the start of sequences.
add_eos_token (`bool`, *optional*, defaults to `False`):
Whether or not to add an `eos_token` at the end of sequences.
use_default_system_prompt (`bool`, *optional*, defaults to `False`):
Whether or not the default system prompt for Llama should be used
add_prefix_space (`bool`, *optional*):
Whether or not the tokenizer should automatically add a prefix space
"""
vocab_files_names = VOCAB_FILES_NAMES
padding_side = "left"
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
clean_up_tokenization_spaces=False,
unk_token="<unk>",
bos_token="<s>",
eos_token="</s>",
add_bos_token=True,
add_eos_token=False,
use_default_system_prompt=False,
legacy=False,
add_prefix_space=None,
vocab=None,
merges=None,
**kwargs,
):
self.add_prefix_space = add_prefix_space if add_prefix_space is not None else True
if vocab is not None:
self._vocab = (
{token: idx for idx, (token, _score) in enumerate(vocab)} if isinstance(vocab, list) else vocab
)
else:
self._vocab = {
str(unk_token): 0,
str(bos_token): 1,
str(eos_token): 2,
}
special_tokens = {str(eos_token), str(bos_token), str(unk_token)}
filtered_vocab = {t: i for t, i in self._vocab.items() if t not in special_tokens}
if merges is not None:
self._merges = [tuple(merge) if isinstance(merge, list) else merge for merge in merges]
else:
self._merges = generate_merges(filtered_vocab)
self._tokenizer = Tokenizer(
BPE(vocab=self._vocab, merges=self._merges, fuse_unk=True, byte_fallback=True, dropout=None)
)
self._tokenizer.normalizer = None
self._tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(
replacement="▁", prepend_scheme=_get_prepend_scheme(self.add_prefix_space, self), split=False
)
sequence = [
decoders.Replace("▁", " "),
decoders.ByteFallback(),
decoders.Fuse(),
]
if self.add_prefix_space:
sequence += [decoders.Strip(content=" ", left=1)]
self._tokenizer.decoder = decoders.Sequence(sequence)
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
add_bos_token=add_bos_token,
add_eos_token=add_eos_token,
use_default_system_prompt=use_default_system_prompt,
add_prefix_space=add_prefix_space,
**kwargs,
)
self._add_bos_token = add_bos_token
self._add_eos_token = add_eos_token
self.use_default_system_prompt = use_default_system_prompt
self._post_init()
def _post_init(self):
"""Post-initialization setup that needs to run after _tokenizer is set."""
# Only set pre_tokenizer/normalizer for Llama-3 style tokenizers (use Sequence)
pre_tok = self._tokenizer.pre_tokenizer
if pre_tok is None or type(pre_tok).__name__ != "Sequence":
self._tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(
replacement="▁", prepend_scheme="first", split=False
)
self._tokenizer.normalizer = None
self.add_tokens([AddedToken(token, special=True) for token in self.all_special_tokens])
super()._post_init()
self.update_post_processor()
__all__ = ["LlamaTokenizer", "LlamaTokenizerFast"]
# Backward alias
LlamaTokenizerFast = LlamaTokenizer
|
LlamaTokenizer
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/utils/data_utils.py
|
{
"start": 12884,
"end": 16922
}
|
class ____(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`.
The method `__getitem__` should return a complete batch.
Notes:
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once
on each sample per epoch which is not the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Args:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def iter_sequence_infinite(seq):
"""Iterates indefinitely over a Sequence.
Args:
seq: `Sequence` instance.
Yields:
Batches of data from the `Sequence`.
"""
while True:
for item in seq:
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
# Because multiprocessing pools are inherently unsafe, starting from a clean
# state can be essential to avoiding deadlocks. In order to accomplish this, we
# need to be able to check on the status of Pools that we create.
_DATA_POOLS = weakref.WeakSet()
_WORKER_ID_QUEUE = None # Only created if needed.
_WORKER_IDS = set()
_FORCE_THREADPOOL = False
_FORCE_THREADPOOL_LOCK = threading.RLock()
def dont_use_multiprocessing_pool(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
with _FORCE_THREADPOOL_LOCK:
global _FORCE_THREADPOOL
old_force_threadpool, _FORCE_THREADPOOL = _FORCE_THREADPOOL, True
out = f(*args, **kwargs)
_FORCE_THREADPOOL = old_force_threadpool
return out
return wrapped
def get_pool_class(use_multiprocessing):
global _FORCE_THREADPOOL
if not use_multiprocessing or _FORCE_THREADPOOL:
return multiprocessing.dummy.Pool # ThreadPool
return multiprocessing.Pool
def get_worker_id_queue():
"""Lazily create the queue to track worker ids."""
global _WORKER_ID_QUEUE
if _WORKER_ID_QUEUE is None:
_WORKER_ID_QUEUE = multiprocessing.Queue()
return _WORKER_ID_QUEUE
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Args:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
|
Sequence
|
python
|
kamyu104__LeetCode-Solutions
|
Python/number-of-changing-keys.py
|
{
"start": 38,
"end": 238
}
|
class ____(object):
def countKeyChanges(self, s):
"""
:type s: str
:rtype: int
"""
return sum(s[i].lower() != s[i+1].lower() for i in xrange(len(s)-1))
|
Solution
|
python
|
eventlet__eventlet
|
eventlet/green/http/client.py
|
{
"start": 57274,
"end": 57442
}
|
class ____(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
|
HTTPException
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_table03.py
|
{
"start": 315,
"end": 1708
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_table03.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<a:pPr"]}
def test_create_file(self):
"""Test XlsxWriter chart axis table properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [108636032, 108643840]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_table(
{
"vertical": False,
"horizontal": False,
"outline": False,
"show_keys": True,
"font": {"bold": True, "italic": True, "color": "red", "baseline": -1},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
django__django
|
tests/db_functions/text/test_reverse.py
|
{
"start": 250,
"end": 2367
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.john = Author.objects.create(name="John Smith", alias="smithj")
cls.elena = Author.objects.create(name="Élena Jordan", alias="elena")
cls.python = Author.objects.create(name="パイソン")
def test_null(self):
author = Author.objects.annotate(backward=Reverse("alias")).get(
pk=self.python.pk
)
self.assertEqual(
author.backward,
"" if connection.features.interprets_empty_strings_as_nulls else None,
)
def test_basic(self):
authors = Author.objects.annotate(
backward=Reverse("name"),
constant=Reverse(Value("static string")),
)
self.assertQuerySetEqual(
authors,
[
("John Smith", "htimS nhoJ", "gnirts citats"),
("Élena Jordan", "nadroJ anelÉ", "gnirts citats"),
("パイソン", "ンソイパ", "gnirts citats"),
],
lambda a: (a.name, a.backward, a.constant),
ordered=False,
)
def test_transform(self):
with register_lookup(CharField, Reverse):
authors = Author.objects.all()
self.assertCountEqual(
authors.filter(name__reverse=self.john.name[::-1]), [self.john]
)
self.assertCountEqual(
authors.exclude(name__reverse=self.john.name[::-1]),
[self.elena, self.python],
)
def test_expressions(self):
author = Author.objects.annotate(backward=Reverse(Trim("name"))).get(
pk=self.john.pk
)
self.assertEqual(author.backward, self.john.name[::-1])
with register_lookup(CharField, Reverse), register_lookup(CharField, Length):
authors = Author.objects.all()
self.assertCountEqual(
authors.filter(name__reverse__length__gt=7), [self.john, self.elena]
)
self.assertCountEqual(
authors.exclude(name__reverse__length__gt=7), [self.python]
)
|
ReverseTests
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_rbd_volume_source.py
|
{
"start": 383,
"end": 10731
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'fs_type': 'str',
'image': 'str',
'keyring': 'str',
'monitors': 'list[str]',
'pool': 'str',
'read_only': 'bool',
'secret_ref': 'V1LocalObjectReference',
'user': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'image': 'image',
'keyring': 'keyring',
'monitors': 'monitors',
'pool': 'pool',
'read_only': 'readOnly',
'secret_ref': 'secretRef',
'user': 'user'
}
def __init__(self, fs_type=None, image=None, keyring=None, monitors=None, pool=None, read_only=None, secret_ref=None, user=None, local_vars_configuration=None): # noqa: E501
"""V1RBDVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._fs_type = None
self._image = None
self._keyring = None
self._monitors = None
self._pool = None
self._read_only = None
self._secret_ref = None
self._user = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
self.image = image
if keyring is not None:
self.keyring = keyring
self.monitors = monitors
if pool is not None:
self.pool = pool
if read_only is not None:
self.read_only = read_only
if secret_ref is not None:
self.secret_ref = secret_ref
if user is not None:
self.user = user
@property
def fs_type(self):
"""Gets the fs_type of this V1RBDVolumeSource. # noqa: E501
fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd # noqa: E501
:return: The fs_type of this V1RBDVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1RBDVolumeSource.
fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd # noqa: E501
:param fs_type: The fs_type of this V1RBDVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def image(self):
"""Gets the image of this V1RBDVolumeSource. # noqa: E501
image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:return: The image of this V1RBDVolumeSource. # noqa: E501
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this V1RBDVolumeSource.
image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:param image: The image of this V1RBDVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and image is None: # noqa: E501
raise ValueError("Invalid value for `image`, must not be `None`") # noqa: E501
self._image = image
@property
def keyring(self):
"""Gets the keyring of this V1RBDVolumeSource. # noqa: E501
keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:return: The keyring of this V1RBDVolumeSource. # noqa: E501
:rtype: str
"""
return self._keyring
@keyring.setter
def keyring(self, keyring):
"""Sets the keyring of this V1RBDVolumeSource.
keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:param keyring: The keyring of this V1RBDVolumeSource. # noqa: E501
:type: str
"""
self._keyring = keyring
@property
def monitors(self):
"""Gets the monitors of this V1RBDVolumeSource. # noqa: E501
monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:return: The monitors of this V1RBDVolumeSource. # noqa: E501
:rtype: list[str]
"""
return self._monitors
@monitors.setter
def monitors(self, monitors):
"""Sets the monitors of this V1RBDVolumeSource.
monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:param monitors: The monitors of this V1RBDVolumeSource. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and monitors is None: # noqa: E501
raise ValueError("Invalid value for `monitors`, must not be `None`") # noqa: E501
self._monitors = monitors
@property
def pool(self):
"""Gets the pool of this V1RBDVolumeSource. # noqa: E501
pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:return: The pool of this V1RBDVolumeSource. # noqa: E501
:rtype: str
"""
return self._pool
@pool.setter
def pool(self, pool):
"""Sets the pool of this V1RBDVolumeSource.
pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:param pool: The pool of this V1RBDVolumeSource. # noqa: E501
:type: str
"""
self._pool = pool
@property
def read_only(self):
"""Gets the read_only of this V1RBDVolumeSource. # noqa: E501
readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:return: The read_only of this V1RBDVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1RBDVolumeSource.
readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:param read_only: The read_only of this V1RBDVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def secret_ref(self):
"""Gets the secret_ref of this V1RBDVolumeSource. # noqa: E501
:return: The secret_ref of this V1RBDVolumeSource. # noqa: E501
:rtype: V1LocalObjectReference
"""
return self._secret_ref
@secret_ref.setter
def secret_ref(self, secret_ref):
"""Sets the secret_ref of this V1RBDVolumeSource.
:param secret_ref: The secret_ref of this V1RBDVolumeSource. # noqa: E501
:type: V1LocalObjectReference
"""
self._secret_ref = secret_ref
@property
def user(self):
"""Gets the user of this V1RBDVolumeSource. # noqa: E501
user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:return: The user of this V1RBDVolumeSource. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this V1RBDVolumeSource.
user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:param user: The user of this V1RBDVolumeSource. # noqa: E501
:type: str
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1RBDVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1RBDVolumeSource):
return True
return self.to_dict() != other.to_dict()
|
V1RBDVolumeSource
|
python
|
scipy__scipy
|
scipy/stats/tests/test_distributions.py
|
{
"start": 51085,
"end": 54095
}
|
class ____:
def setup_method(self):
self.rng = np.random.default_rng(7672986002)
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50), random_state=self.rng)
assert_(np.all(vals >= 0))
assert_(np.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75, random_state=self.rng)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3, random_state=self.rng)
assert_(isinstance(val, np.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_rvs_9313(self):
# previously, RVS were converted to `np.int32` on some platforms,
# causing overflow for moderately large integer output (gh-9313).
# Check that this is resolved to the extent possible w/ `np.int64`.
rvs = stats.geom.rvs(np.exp(-35), size=5, random_state=self.rng)
assert rvs.dtype == np.int64
assert np.all(rvs > np.iinfo(np.int32).max)
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
def test_ppf_underflow(self):
# this should not underflow
assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14)
def test_entropy_gh18226(self):
# gh-18226 reported that `geom.entropy` produced a warning and
# inaccurate output for small p. Check that this is resolved.
h = stats.geom(0.0146).entropy()
assert_allclose(h, 5.219397961962308, rtol=1e-15)
def test_rvs_gh18372(self):
# gh-18372 reported that `geom.rvs` could produce negative numbers,
# with `RandomState` PRNG, but the support is positive integers.
# Check that this is resolved.
random_state = np.random.RandomState(294582935)
assert (stats.geom.rvs(1e-30, size=10, random_state=random_state) > 0).all()
|
TestGeom
|
python
|
ray-project__ray
|
python/ray/autoscaler/local/coordinator_server.py
|
{
"start": 2545,
"end": 4471
}
|
class ____(threading.Thread):
"""Initializes HTTPServer and serves CoordinatorSenderNodeProvider forever.
It handles requests from the remote CoordinatorSenderNodeProvider. The
requests are forwarded to LocalNodeProvider function calls.
"""
def __init__(self, list_of_node_ips, host, port):
"""Initialize HTTPServer and serve forever by invoking self.run()."""
logger.info(
"Running on prem coordinator server on address " + build_address(host, port)
)
threading.Thread.__init__(self)
self._port = port
self._list_of_node_ips = list_of_node_ips
address = (host, self._port)
config = {"list_of_node_ips": list_of_node_ips}
self._server = HTTPServer(
address,
runner_handler(LocalNodeProvider(config, cluster_name=None)),
)
self.start()
def run(self):
self._server.serve_forever()
def shutdown(self):
"""Shutdown the underlying server."""
self._server.shutdown()
self._server.server_close()
def main():
parser = argparse.ArgumentParser(
description="Please provide a list of node ips and port."
)
parser.add_argument(
"--ips", required=True, help="Comma separated list of node ips."
)
parser.add_argument(
"--host",
type=str,
required=False,
help="The Host on which the coordinator listens.",
)
parser.add_argument(
"--port",
type=int,
required=True,
help="The port on which the coordinator listens.",
)
args = parser.parse_args()
host = args.host or socket.gethostbyname(socket.gethostname())
list_of_node_ips = args.ips.split(",")
OnPremCoordinatorServer(
list_of_node_ips=list_of_node_ips,
host=host,
port=args.port,
)
if __name__ == "__main__":
main()
|
OnPremCoordinatorServer
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/error_utils.py
|
{
"start": 4376,
"end": 4663
}
|
class ____(KeyError):
def __init__(self, message, original_key):
super(MultilineMessageKeyError, self).__init__(original_key)
self.__message = message
def __str__(self):
return self.__message
MultilineMessageKeyError.__name__ = KeyError.__name__
|
MultilineMessageKeyError
|
python
|
sqlalchemy__sqlalchemy
|
test/base/test_except.py
|
{
"start": 13768,
"end": 17300
}
|
class ____(Exception):
def __init__(self, msg):
self.msg = msg
def __eq__(self, other):
return isinstance(other, EqException) and other.msg == self.msg
ALL_EXC = [
(
[sa_exceptions.SQLAlchemyError],
[lambda cls: cls(1, 2, code="42")],
),
([sa_exceptions.ObjectNotExecutableError], [lambda cls: cls("xx")]),
(
[sa_exceptions.EmulatedDBAPIException],
[lambda cls: cls("xx", EqException("original"))],
),
(
[
sa_exceptions.ArgumentError,
sa_exceptions.DuplicateColumnError,
sa_exceptions.ConstraintColumnNotFoundError,
sa_exceptions.NoSuchModuleError,
sa_exceptions.NoForeignKeysError,
sa_exceptions.AmbiguousForeignKeysError,
sa_exceptions.CompileError,
sa_exceptions.IdentifierError,
sa_exceptions.DisconnectionError,
sa_exceptions.InvalidatePoolError,
sa_exceptions.TimeoutError,
sa_exceptions.InvalidRequestError,
sa_exceptions.IllegalStateChangeError,
sa_exceptions.NoInspectionAvailable,
sa_exceptions.PendingRollbackError,
sa_exceptions.ResourceClosedError,
sa_exceptions.NoSuchColumnError,
sa_exceptions.NoResultFound,
sa_exceptions.MultipleResultsFound,
sa_exceptions.NoReferenceError,
sa_exceptions.AwaitRequired,
sa_exceptions.MissingGreenlet,
sa_exceptions.NoSuchTableError,
sa_exceptions.UnreflectableTableError,
sa_exceptions.UnboundExecutionError,
],
[lambda cls: cls("foo", code="42")],
),
(
[sa_exceptions.CircularDependencyError],
[
lambda cls: cls("msg", ["cycles"], "edges"),
lambda cls: cls("msg", ["cycles"], "edges", "xx", "zz"),
],
),
(
[sa_exceptions.UnsupportedCompilationError],
[lambda cls: cls("cmp", "el"), lambda cls: cls("cmp", "el", "msg")],
),
(
[sa_exceptions.NoReferencedTableError],
[lambda cls: cls("msg", "tbl")],
),
(
[sa_exceptions.NoReferencedColumnError],
[lambda cls: cls("msg", "tbl", "col")],
),
(
[sa_exceptions.StatementError],
[
lambda cls: cls("msg", "stmt", (), "orig"),
lambda cls: cls("msg", "stmt", (), "orig", True, "99", True),
details,
],
),
(
[
sa_exceptions.DBAPIError,
sa_exceptions.InterfaceError,
sa_exceptions.DatabaseError,
sa_exceptions.DataError,
sa_exceptions.OperationalError,
sa_exceptions.IntegrityError,
sa_exceptions.InternalError,
sa_exceptions.ProgrammingError,
sa_exceptions.NotSupportedError,
],
[
lambda cls: cls("stmt", (), "orig"),
lambda cls: cls("stmt", (), "orig", True, True, "99", True),
details,
],
),
(
[
sa_exceptions.SADeprecationWarning,
sa_exceptions.Base20DeprecationWarning,
sa_exceptions.LegacyAPIWarning,
sa_exceptions.MovedIn20Warning,
sa_exceptions.SAWarning,
],
[lambda cls: cls("foo", code="42")],
),
([sa_exceptions.SAPendingDeprecationWarning], [lambda cls: cls(1, 2, 3)]),
([sa_exceptions.SATestSuiteWarning], [lambda cls: cls()]),
]
|
EqException
|
python
|
huggingface__transformers
|
src/transformers/models/blip/modeling_blip_text.py
|
{
"start": 15072,
"end": 18745
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BlipTextLayer(config, i) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if use_cache:
# The model acts as encoder decoder but is not an encoder decoder. So we cast all cache objects to
# `EncoderDecoderCache` type assuming that the incoming cache is from `self_attention`
if isinstance(past_key_values, DynamicCache):
past_key_values = EncoderDecoderCache(past_key_values, DynamicCache(config=self.config))
elif past_key_values is None:
past_key_values = EncoderDecoderCache(
DynamicCache(config=self.config), DynamicCache(config=self.config)
)
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
for i in range(self.config.num_hidden_layers):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_values,
output_attentions,
cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
past_key_values,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->BlipText
|
BlipTextEncoder
|
python
|
django__django
|
tests/model_fields/tests.py
|
{
"start": 15957,
"end": 17187
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.foo1 = Foo.objects.create(a="a", d="12.34")
cls.foo2 = Foo.objects.create(a="b", d="12.34")
cls.bar1 = Bar.objects.create(a=cls.foo1, b="b")
cls.bar2 = Bar.objects.create(a=cls.foo2, b="a")
cls.field = Bar._meta.get_field("a")
def assertChoicesEqual(self, choices, objs):
self.assertCountEqual(choices, [(obj.pk, str(obj)) for obj in objs])
def test_get_choices(self):
self.assertChoicesEqual(
self.field.get_choices(include_blank=False, limit_choices_to={"a": "a"}),
[self.foo1],
)
self.assertChoicesEqual(
self.field.get_choices(include_blank=False, limit_choices_to={}),
[self.foo1, self.foo2],
)
def test_get_choices_reverse_related_field(self):
field = self.field.remote_field
self.assertChoicesEqual(
field.get_choices(include_blank=False, limit_choices_to={"b": "b"}),
[self.bar1],
)
self.assertChoicesEqual(
field.get_choices(include_blank=False, limit_choices_to={}),
[self.bar1, self.bar2],
)
|
GetChoicesLimitChoicesToTests
|
python
|
pola-rs__polars
|
py-polars/src/polars/io/iceberg/dataset.py
|
{
"start": 18005,
"end": 19553
}
|
class ____(_ResolvedScanDataBase):
"""Resolved parameters for a native Iceberg scan."""
sources: list[str]
projected_iceberg_schema: pyiceberg.schema.Schema
column_mapping: pa.Schema
default_values: dict[int, pl.Series | str]
deletion_files: dict[int, list[str]]
min_max_statistics: pl.DataFrame | None
# This is here for test purposes, as the `min_max_statistics` on this
# dataclass contain coalesced values from `default_values`, a test may
# access the statistics loader directly to inspect the values before
# coalescing.
statistics_loader: IcebergStatisticsLoader | None
storage_options: dict[str, str] | None
# (physical, deleted)
row_count: tuple[int, int] | None
_snapshot_id_key: str
def to_lazyframe(self) -> pl.LazyFrame:
from polars.io.parquet.functions import scan_parquet
return scan_parquet(
self.sources,
cast_options=ScanCastOptions._default_iceberg(),
missing_columns="insert",
extra_columns="ignore",
storage_options=self.storage_options,
_column_mapping=("iceberg-column-mapping", self.column_mapping),
_default_values=("iceberg", self.default_values),
_deletion_files=("iceberg-position-delete", self.deletion_files),
_table_statistics=self.min_max_statistics,
_row_count=self.row_count,
)
@property
def snapshot_id_key(self) -> str:
return self._snapshot_id_key
@dataclass
|
_NativeIcebergScanData
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/memberAccess18.py
|
{
"start": 319,
"end": 663
}
|
class ____(Generic[_T]):
thing: _T
def __getitem__(self, key: str) -> _T:
return self.thing
def __getattr__(self, key: str) -> _T:
return self.thing
c1: CollectionThing[Descriptor] = CollectionThing()
reveal_type(c1["key"], expected_text="Descriptor")
reveal_type(c1.key, expected_text="Descriptor")
|
CollectionThing
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/python/lite.py
|
{
"start": 70729,
"end": 76773
}
|
class ____(TFLiteConverterBaseV2):
"""Converts the given frozen graph into TensorFlow Lite model."""
def __init__(self, funcs, trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteFrozenGraphConverterV2, self).__init__()
self._funcs = funcs
self._trackable_obj = trackable_obj
self.experimental_lower_to_saved_model = True
@convert_phase(
Component.PREPARE_TF_MODEL, SubComponent.FREEZE_CONCRETE_FUNCTION
)
def _freeze_concrete_function(self):
"""Convert the given ConcreteFunction to frozen graph.
Returns:
graph_def: The frozen GraphDef.
input_tensors: List of input tensors.
output_tensors: List of output tensors.
frozen_func: The frozen ConcreteFunction.
Raises:
ValueError: none or multiple ConcreteFunctions provided.
"""
if len(self._funcs) == 0: # pylint: disable=g-explicit-length-test
raise ValueError("No ConcreteFunction is specified.")
if len(self._funcs) > 1:
raise ValueError(
"This converter can only convert a single "
"ConcreteFunction. Converting multiple functions is "
"under development."
)
frozen_func, graph_def = (
_convert_to_constants.convert_variables_to_constants_v2_as_graph(
self._funcs[0], lower_control_flow=False
)
)
input_tensors = [
tensor
for tensor in frozen_func.inputs
if tensor.dtype != _dtypes.resource
]
output_tensors = frozen_func.outputs
return graph_def, input_tensors, output_tensors, frozen_func
@convert_phase(
Component.PREPARE_TF_MODEL,
SubComponent.CONVERT_CONCRETE_FUNCTIONS_TO_SAVED_MODEL,
)
def _convert_concrete_functions_to_saved_model(self, output_dir):
"""Save concrete functions to the SavedModel format.
Args:
output_dir: The output directory to save the SavedModel.
Returns:
graph_def: The frozen GraphDef.
input_tensors: List of input tensors.
output_tensors: List of output tensors.
"""
if len(self._funcs) == 0: # pylint: disable=g-explicit-length-test
raise ValueError("No ConcreteFunction is specified.")
if not self.experimental_lower_to_saved_model:
return None, None, None
# Without the provided trackable obj, it is not able to serialize the given
# concrete functions as a saved model format. Also when trackable obj is
# a function, use the original concrete function conversion pipeline.
if not self._trackable_obj or isinstance(
self._trackable_obj,
(_function.ConcreteFunction, _def_function.Function),
):
return None, None, None
signatures = {}
signature_keys = []
try:
if len(self._funcs) == 1:
signatures[_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = (
self._funcs[0]
)
signature_keys = [
_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
]
else:
for func in self._funcs:
signatures[func.graph.name] = func
signature_keys.append(func.graph.name)
_save.save(
self._trackable_obj,
output_dir,
signatures=signatures,
options=_save_options.SaveOptions(save_debug_info=True),
)
except Exception: # pylint: disable=broad-except
# When storing the given concrete function to a saved model is failed,
# let's use original concrete function conversion pipeline.
return None, None, None
self.saved_model_dir = output_dir
self._saved_model_tags = set([_tag_constants.SERVING])
self._saved_model_exported_names = signature_keys
self._parse_saved_model_args(always_enable_saved_model_import=True)
if self.saved_model_dir:
graph_def, input_tensors, output_tensors = self._load_saved_model(
self.saved_model_dir, self._saved_model_tags
)
self._trackable_obj = _load(self.saved_model_dir, self._saved_model_tags)
return graph_def, input_tensors, output_tensors
return None, None, None
def _convert_as_saved_model(self):
"""Converts the given concrete functions as a saved model format.
Returns:
The converted data in serialized format.
"""
temp_dir = tempfile.mkdtemp()
try:
graph_def, input_tensors, _ = (
self._convert_concrete_functions_to_saved_model(temp_dir)
)
if self.saved_model_dir:
self._validate_inputs(graph_def, input_tensors)
return self._convert_from_saved_model(graph_def)
finally:
shutil.rmtree(temp_dir, True)
return None
@_export_metrics
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete function is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
if self.experimental_lower_to_saved_model:
saved_model_convert_result = self._convert_as_saved_model()
if saved_model_convert_result:
return saved_model_convert_result
graph_def, input_tensors, output_tensors, frozen_func = (
self._freeze_concrete_function()
)
graph_def = self._optimize_tf_model(
graph_def, input_tensors, output_tensors, frozen_func
)
return super(TFLiteFrozenGraphConverterV2, self).convert(
graph_def, input_tensors, output_tensors
)
|
TFLiteFrozenGraphConverterV2
|
python
|
scikit-learn__scikit-learn
|
sklearn/_loss/link.py
|
{
"start": 5197,
"end": 5593
}
|
class ____(BaseLink):
"""Half the logit link function g(x)=1/2 * logit(x).
Used for the exponential loss.
"""
interval_y_pred = Interval(0, 1, False, False)
def link(self, y_pred, out=None):
out = logit(y_pred, out=out)
out *= 0.5
return out
def inverse(self, raw_prediction, out=None):
return expit(2 * raw_prediction, out)
|
HalfLogitLink
|
python
|
pypa__setuptools
|
setuptools/_vendor/typeguard/_memo.py
|
{
"start": 130,
"end": 1303
}
|
class ____:
"""
Contains information necessary for type checkers to do their work.
.. attribute:: globals
:type: dict[str, Any]
Dictionary of global variables to use for resolving forward references.
.. attribute:: locals
:type: dict[str, Any]
Dictionary of local variables to use for resolving forward references.
.. attribute:: self_type
:type: type | None
When running type checks within an instance method or class method, this is the
class object that the first argument (usually named ``self`` or ``cls``) refers
to.
.. attribute:: config
:type: TypeCheckConfiguration
Contains the configuration for a particular set of type checking operations.
"""
__slots__ = "globals", "locals", "self_type", "config"
def __init__(
self,
globals: dict[str, Any],
locals: dict[str, Any],
*,
self_type: type | None = None,
config: TypeCheckConfiguration = global_config,
):
self.globals = globals
self.locals = locals
self.self_type = self_type
self.config = config
|
TypeCheckMemo
|
python
|
walkccc__LeetCode
|
solutions/1408. String Matching in an Array/1408-2.py
|
{
"start": 0,
"end": 84
}
|
class ____:
def __init__(self):
self.children: dict[str, TrieNode] = {}
|
TrieNode
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/testing/suite/test_sequence.py
|
{
"start": 6040,
"end": 9704
}
|
class ____(fixtures.TablesTest):
run_deletes = None
__requires__ = ("sequences",)
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
normalize_sequence(config, Sequence("user_id_seq", metadata=metadata))
normalize_sequence(
config,
Sequence(
"other_seq",
metadata=metadata,
nomaxvalue=True,
nominvalue=True,
),
)
if testing.requires.schemas.enabled:
normalize_sequence(
config,
Sequence(
"user_id_seq", schema=config.test_schema, metadata=metadata
),
)
normalize_sequence(
config,
Sequence(
"schema_seq", schema=config.test_schema, metadata=metadata
),
)
Table(
"user_id_table",
metadata,
Column("id", Integer, primary_key=True),
)
def test_has_sequence(self, connection):
eq_(inspect(connection).has_sequence("user_id_seq"), True)
def test_has_sequence_cache(self, connection, metadata):
insp = inspect(connection)
eq_(insp.has_sequence("user_id_seq"), True)
ss = normalize_sequence(config, Sequence("new_seq", metadata=metadata))
eq_(insp.has_sequence("new_seq"), False)
ss.create(connection)
try:
eq_(insp.has_sequence("new_seq"), False)
insp.clear_cache()
eq_(insp.has_sequence("new_seq"), True)
finally:
ss.drop(connection)
def test_has_sequence_other_object(self, connection):
eq_(inspect(connection).has_sequence("user_id_table"), False)
@testing.requires.schemas
def test_has_sequence_schema(self, connection):
eq_(
inspect(connection).has_sequence(
"user_id_seq", schema=config.test_schema
),
True,
)
def test_has_sequence_neg(self, connection):
eq_(inspect(connection).has_sequence("some_sequence"), False)
@testing.requires.schemas
def test_has_sequence_schemas_neg(self, connection):
eq_(
inspect(connection).has_sequence(
"some_sequence", schema=config.test_schema
),
False,
)
@testing.requires.schemas
def test_has_sequence_default_not_in_remote(self, connection):
eq_(
inspect(connection).has_sequence(
"other_sequence", schema=config.test_schema
),
False,
)
@testing.requires.schemas
def test_has_sequence_remote_not_in_default(self, connection):
eq_(inspect(connection).has_sequence("schema_seq"), False)
def test_get_sequence_names(self, connection):
exp = {"other_seq", "user_id_seq"}
res = set(inspect(connection).get_sequence_names())
is_true(res.intersection(exp) == exp)
is_true("schema_seq" not in res)
@testing.requires.schemas
def test_get_sequence_names_no_sequence_schema(self, connection):
eq_(
inspect(connection).get_sequence_names(
schema=config.test_schema_2
),
[],
)
@testing.requires.schemas
def test_get_sequence_names_sequences_schema(self, connection):
eq_(
sorted(
inspect(connection).get_sequence_names(
schema=config.test_schema
)
),
["schema_seq", "user_id_seq"],
)
|
HasSequenceTest
|
python
|
numpy__numpy
|
numpy/matrixlib/tests/test_defmatrix.py
|
{
"start": 6906,
"end": 8601
}
|
class ____:
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.], [3., 4.]])
mA = matrix(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** i).A, B))
B = np.dot(B, A)
Ainv = linalg.inv(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** -i).A, B))
B = np.dot(B, Ainv)
assert_(np.allclose((mA * mA).A, np.dot(A, A)))
assert_(np.allclose((mA + mA).A, (A + A)))
assert_(np.allclose((3 * mA).A, (3 * A)))
mA2 = matrix(A)
mA2 *= 3
assert_(np.allclose(mA2.A, 3 * A))
def test_pow(self):
"""Test raising a matrix to an integer power works as expected."""
m = matrix("1. 2.; 3. 4.")
m2 = m.copy()
m2 **= 2
mi = m.copy()
mi **= -1
m4 = m2.copy()
m4 **= 2
assert_array_almost_equal(m2, m**2)
assert_array_almost_equal(m4, np.dot(m2, m2))
assert_array_almost_equal(np.dot(mi, m), np.eye(2))
def test_scalar_type_pow(self):
m = matrix([[1, 2], [3, 4]])
for scalar_t in [np.int8, np.uint8]:
two = scalar_t(2)
assert_array_almost_equal(m ** 2, m ** two)
def test_notimplemented(self):
'''Check that 'not implemented' operations produce a failure.'''
A = matrix([[1., 2.],
[3., 4.]])
# __rpow__
with assert_raises(TypeError):
1.0**A
# __mul__ with something not a list, ndarray, tuple, or scalar
with assert_raises(TypeError):
A * object()
|
TestAlgebra
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/io/json.py
|
{
"start": 2811,
"end": 5168
}
|
class ____(BaseIO):
fname = "__test__.json"
params = [
["split", "columns", "index", "values", "records"],
["df", "df_date_idx", "df_td_int_ts", "df_int_floats", "df_int_float_str"],
]
param_names = ["orient", "frame"]
def setup(self, orient, frame):
N = 10**5
ncols = 5
index = date_range("20000101", periods=N, freq="h")
timedeltas = timedelta_range(start=1, periods=N, freq="s")
datetimes = date_range(start=1, periods=N, freq="s")
ints = np.random.randint(100000000, size=N)
longints = sys.maxsize * np.random.randint(100000000, size=N)
floats = np.random.randn(N)
strings = Index([f"i-{i}" for i in range(N)], dtype=object)
self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N))
self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index)
self.df_td_int_ts = DataFrame(
{
"td_1": timedeltas,
"td_2": timedeltas,
"int_1": ints,
"int_2": ints,
"ts_1": datetimes,
"ts_2": datetimes,
},
index=index,
)
self.df_int_floats = DataFrame(
{
"int_1": ints,
"int_2": ints,
"int_3": ints,
"float_1": floats,
"float_2": floats,
"float_3": floats,
},
index=index,
)
self.df_int_float_str = DataFrame(
{
"int_1": ints,
"int_2": ints,
"float_1": floats,
"float_2": floats,
"str_1": strings,
"str_2": strings,
},
index=index,
)
self.df_longint_float_str = DataFrame(
{
"longint_1": longints,
"longint_2": longints,
"float_1": floats,
"float_2": floats,
"str_1": strings,
"str_2": strings,
},
index=index,
)
def time_to_json(self, orient, frame):
getattr(self, frame).to_json(self.fname, orient=orient)
def peakmem_to_json(self, orient, frame):
getattr(self, frame).to_json(self.fname, orient=orient)
|
ToJSON
|
python
|
PyCQA__pylint
|
tests/utils/unittest_ast_walker.py
|
{
"start": 478,
"end": 2877
}
|
class ____:
class MockLinter:
def __init__(self, msgs: dict[str, bool]) -> None:
self._msgs = msgs
def is_message_enabled(self, msgid: str) -> bool:
return self._msgs.get(msgid, True)
class Checker(BaseChecker):
# pylint: disable-next=super-init-not-called
def __init__(self) -> None:
self.called: set[str] = set()
@only_required_for_messages("first-message")
def visit_module(
self, module: nodes.Module # pylint: disable=unused-argument
) -> None:
self.called.add("module")
@only_required_for_messages("second-message")
def visit_call(self, module: nodes.Call) -> None:
raise NotImplementedError
@only_required_for_messages("second-message", "third-message")
def visit_assignname(
self, module: nodes.AssignName # pylint: disable=unused-argument
) -> None:
self.called.add("assignname")
@only_required_for_messages("second-message")
def leave_assignname(self, module: nodes.AssignName) -> None:
raise NotImplementedError
def test_only_required_for_messages(self) -> None:
linter = self.MockLinter(
{"first-message": True, "second-message": False, "third-message": True}
)
walker = ASTWalker(linter) # type: ignore[arg-type]
checker = self.Checker()
walker.add_checker(checker)
walker.walk(astroid.parse("x = func()"))
assert {"module", "assignname"} == checker.called
def test_deprecated_methods(self) -> None:
class Checker(BaseChecker):
# pylint: disable-next=super-init-not-called
def __init__(self) -> None:
self.called = False
@only_required_for_messages("first-message")
def visit_assname(
self, node: nodes.AssignName # pylint: disable=unused-argument
) -> None:
self.called = True
linter = self.MockLinter({"first-message": True})
walker = ASTWalker(linter) # type: ignore[arg-type]
checker = Checker()
walker.add_checker(checker)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
walker.walk(astroid.parse("x = 1"))
assert not checker.called
|
TestASTWalker
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.