language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
ray-project__ray
|
release/release_logs/fetch_release_logs.py
|
{
"start": 1803,
"end": 1990
}
|
class ____:
id: str
number: int
commit: str
job_dict_list: List[Dict]
pipeline: str = BUILDKITE_PIPELINE
organization: str = BUILDKITE_ORGANIZATION
@dataclass
|
Build
|
python
|
cherrypy__cherrypy
|
cherrypy/test/test_tools.py
|
{
"start": 16246,
"end": 16785
}
|
class ____(unittest.TestCase):
def test_login_screen_returns_bytes(self):
"""
login_screen must return bytes even if unicode parameters are passed.
Issue 1132 revealed that login_screen would return unicode if the
username and password were unicode.
"""
sa = cherrypy.lib.cptools.SessionAuth()
res = sa.login_screen(
None,
username=str('nobody'),
password=str('anypass'),
)
self.assertTrue(isinstance(res, bytes))
|
SessionAuthTest
|
python
|
coleifer__peewee
|
playhouse/dataset.py
|
{
"start": 6044,
"end": 10523
}
|
class ____(object):
def __init__(self, dataset, name, model_class):
self.dataset = dataset
self.name = name
if model_class is None:
model_class = self._create_model()
model_class.create_table()
self.dataset._models[name] = model_class
@property
def model_class(self):
return self.dataset._models[self.name]
def __repr__(self):
return '<Table: %s>' % self.name
def __len__(self):
return self.find().count()
def __iter__(self):
return iter(self.find().iterator())
def _create_model(self):
class Meta:
table_name = self.name
return type(
str(self.name),
(self.dataset._base_model,),
{'Meta': Meta})
def create_index(self, columns, unique=False):
index = ModelIndex(self.model_class, columns, unique=unique)
self.model_class.add_index(index)
self.dataset._database.execute(index)
def _guess_field_type(self, value):
if isinstance(value, basestring):
return TextField
if isinstance(value, (datetime.date, datetime.datetime)):
return DateTimeField
elif value is True or value is False:
return BooleanField
elif isinstance(value, int):
return IntegerField
elif isinstance(value, float):
return FloatField
elif isinstance(value, Decimal):
return DecimalField
return TextField
@property
def columns(self):
return [f.name for f in self.model_class._meta.sorted_fields]
def _migrate_new_columns(self, data):
new_keys = set(data) - set(self.model_class._meta.fields)
new_keys -= set(self.model_class._meta.columns)
if new_keys:
operations = []
for key in new_keys:
field_class = self._guess_field_type(data[key])
field = field_class(null=True)
operations.append(
self.dataset._migrator.add_column(self.name, key, field))
field.bind(self.model_class, key)
migrate(*operations)
self.dataset.update_cache(self.name)
def __getitem__(self, item):
try:
return self.model_class[item]
except self.model_class.DoesNotExist:
pass
def __setitem__(self, item, value):
if not isinstance(value, dict):
raise ValueError('Table.__setitem__() value must be a dict')
pk = self.model_class._meta.primary_key
value[pk.name] = item
try:
with self.dataset.transaction() as txn:
self.insert(**value)
except IntegrityError:
self.dataset.update_cache(self.name)
self.update(columns=[pk.name], **value)
def __delitem__(self, item):
del self.model_class[item]
def insert(self, **data):
self._migrate_new_columns(data)
return self.model_class.insert(**data).execute()
def _apply_where(self, query, filters, conjunction=None):
conjunction = conjunction or operator.and_
if filters:
expressions = [
(self.model_class._meta.fields[column] == value)
for column, value in filters.items()]
query = query.where(reduce(conjunction, expressions))
return query
def update(self, columns=None, conjunction=None, **data):
self._migrate_new_columns(data)
filters = {}
if columns:
for column in columns:
filters[column] = data.pop(column)
return self._apply_where(
self.model_class.update(**data),
filters,
conjunction).execute()
def _query(self, **query):
return self._apply_where(self.model_class.select(), query)
def find(self, **query):
return self._query(**query).dicts()
def find_one(self, **query):
try:
return self.find(**query).get()
except self.model_class.DoesNotExist:
return None
def all(self):
return self.find()
def delete(self, **query):
return self._apply_where(self.model_class.delete(), query).execute()
def freeze(self, *args, **kwargs):
return self.dataset.freeze(self.all(), *args, **kwargs)
def thaw(self, *args, **kwargs):
return self.dataset.thaw(self.name, *args, **kwargs)
|
Table
|
python
|
facebookresearch__faiss
|
tests/test_ivflib.py
|
{
"start": 4470,
"end": 5564
}
|
class ____(unittest.TestCase):
"""Test in case of nprobe > nlist."""
def test_small_data(self):
d = 20
# nlist = (2^4)^2 = 256
index = faiss.index_factory(d, 'IMI2x4,Flat')
# When nprobe >= nlist, it is equivalent to an IndexFlat.
rs = np.random.RandomState(123)
xt = rs.rand(100, d).astype('float32')
xb = rs.rand(1000, d).astype('float32')
index.train(xt)
index.add(xb)
index.nprobe = 2048
k = 5
xq = rs.rand(10, d).astype('float32')
# test kNN search
D, I = index.search(xq, k)
ref_D, ref_I = faiss.knn(xq, xb, k)
assert np.all(D == ref_D)
assert np.all(I == ref_I)
# test range search
thresh = 0.1 # *squared* distance
lims, D, I = index.range_search(xq, thresh)
ref_index = faiss.IndexFlat(d)
ref_index.add(xb)
ref_lims, ref_D, ref_I = ref_index.range_search(xq, thresh)
assert np.all(lims == ref_lims)
assert np.all(D == ref_D)
assert np.all(I == ref_I)
|
TestSmallData
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_config/source.py
|
{
"start": 2108,
"end": 3147
}
|
class ____(ScalarUnion):
def __init__(self):
super().__init__(
scalar_type=bool,
non_scalar_schema=Selector({"env": str}),
_key="BoolSourceType",
)
def post_process(self, value):
check.param_invariant(isinstance(value, (dict, bool)), "value", "Should be pre-validated")
if not isinstance(value, dict):
return value
check.invariant(len(value) == 1, "Selector should have one entry")
key, cfg = next(iter(value.items()))
check.invariant(key == "env", "Only valid key is env")
value = _ensure_env_variable(cfg)
try:
return get_boolean_string_value(value)
except ValueError as e:
raise PostProcessingError(
f'Value "{value}" stored in env variable "{cfg}" cannot be coerced into an bool.'
) from e
StringSource: StringSourceType = StringSourceType()
IntSource: IntSourceType = IntSourceType()
BoolSource: BoolSourceType = BoolSourceType()
|
BoolSourceType
|
python
|
PyCQA__pylint
|
tests/functional/n/not_context_manager.py
|
{
"start": 415,
"end": 551
}
|
class ____(Manager):
pass
with AnotherManager():
pass
# Tests message for class that doesn't implement the protocol
|
AnotherManager
|
python
|
spyder-ide__spyder
|
spyder/plugins/outlineexplorer/widgets.py
|
{
"start": 5730,
"end": 6024
}
|
class ____(QTreeWidgetItem):
def clear(self):
self.takeChildren()
def append_children(self, index, node):
self.insertChild(index, node)
node.parent = self
def remove_children(self, node):
self.removeChild(node)
node.parent = None
|
BaseTreeItem
|
python
|
neetcode-gh__leetcode
|
python/0658-find-k-closest-elements.py
|
{
"start": 953,
"end": 1271
}
|
class ____:
def findClosestElements(self, arr: List[int], k: int, x: int) -> List[int]:
l, r = 0, len(arr) - k
while l < r:
m = (l + r) // 2
if x - arr[m] > arr[m + k] - x:
l = m + 1
else:
r = m
return arr[l : l + k]
|
Solution
|
python
|
django__django
|
django/db/models/functions/datetime.py
|
{
"start": 6285,
"end": 7367
}
|
class ____(Func):
template = "CURRENT_TIMESTAMP"
output_field = DateTimeField()
def as_postgresql(self, compiler, connection, **extra_context):
# PostgreSQL's CURRENT_TIMESTAMP means "the time at the start of the
# transaction". Use STATEMENT_TIMESTAMP to be cross-compatible with
# other databases.
return self.as_sql(
compiler, connection, template="STATEMENT_TIMESTAMP()", **extra_context
)
def as_mysql(self, compiler, connection, **extra_context):
return self.as_sql(
compiler, connection, template="CURRENT_TIMESTAMP(6)", **extra_context
)
def as_sqlite(self, compiler, connection, **extra_context):
return self.as_sql(
compiler,
connection,
template="STRFTIME('%%%%Y-%%%%m-%%%%d %%%%H:%%%%M:%%%%f', 'NOW')",
**extra_context,
)
def as_oracle(self, compiler, connection, **extra_context):
return self.as_sql(
compiler, connection, template="LOCALTIMESTAMP", **extra_context
)
|
Now
|
python
|
huggingface__transformers
|
src/transformers/models/idefics2/modeling_idefics2.py
|
{
"start": 12794,
"end": 13499
}
|
class ____(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
output_size: int,
hidden_act: str,
):
super().__init__()
self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.down_proj = nn.Linear(intermediate_size, output_size, bias=False)
self.act_fn = ACT2FN[hidden_act]
def forward(self, x):
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
# Copied from transformers.models.siglip.modeling_siglip.SiglipMultiheadAttentionPoolingHead with Siglip->Idefics2
|
Idefics2MLP
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/openid_connect/views.py
|
{
"start": 472,
"end": 4176
}
|
class ____(OAuth2Adapter):
def __init__(self, request, provider_id):
self.provider_id = provider_id
super().__init__(request)
@property
def openid_config(self):
if not hasattr(self, "_openid_config"):
server_url = self.get_provider().server_url
resp = get_adapter().get_requests_session().get(server_url)
resp.raise_for_status()
self._openid_config = resp.json()
return self._openid_config
@property
def basic_auth(self):
token_auth_method = self.get_provider().app.settings.get("token_auth_method")
if token_auth_method:
return token_auth_method == "client_secret_basic" # nosec
methods = self.openid_config.get("token_endpoint_auth_methods_supported", [])
# Basic auth is problematic, especially when client ID contains a colon.
return "client_secret_post" not in methods and "client_secret_basic" in methods
@property
def access_token_url(self):
return self.openid_config["token_endpoint"]
@property
def authorize_url(self):
return self.openid_config["authorization_endpoint"]
@property
def profile_url(self):
return self.openid_config["userinfo_endpoint"]
def complete_login(self, request, app, token: SocialToken, **kwargs):
id_token_str = kwargs["response"].get("id_token")
fetch_userinfo = app.settings.get("fetch_userinfo", True)
data = {}
if fetch_userinfo or (not id_token_str):
data["userinfo"] = self._fetch_user_info(token.token)
if id_token_str:
data["id_token"] = self._decode_id_token(app, id_token_str)
return self.get_provider().sociallogin_from_response(request, data)
def _fetch_user_info(self, access_token: str) -> dict:
response = (
get_adapter()
.get_requests_session()
.get(self.profile_url, headers={"Authorization": "Bearer " + access_token})
)
response.raise_for_status()
return response.json()
def _decode_id_token(self, app: SocialApp, id_token: str) -> dict:
"""
If the token was received by direct communication protected by
TLS between this library and Google, we are allowed to skip checking the
token signature according to the OpenID Connect Core 1.0 specification.
https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
"""
verify_signature = not self.did_fetch_access_token
return jwtkit.verify_and_decode(
credential=id_token,
keys_url=self.openid_config["jwks_uri"],
issuer=self.openid_config["issuer"],
audience=app.client_id,
lookup_kid=jwtkit.lookup_kid_jwk,
verify_signature=verify_signature,
)
def get_callback_url(self, request, app):
callback_url = reverse(
"openid_connect_callback", kwargs={"provider_id": self.provider_id}
)
protocol = self.redirect_uri_protocol
return build_absolute_uri(request, callback_url, protocol)
@login_not_required
def login(request, provider_id):
try:
view = OAuth2LoginView.adapter_view(
OpenIDConnectOAuth2Adapter(request, provider_id)
)
return view(request)
except SocialApp.DoesNotExist:
raise Http404
@login_not_required
def callback(request, provider_id):
try:
view = OAuth2CallbackView.adapter_view(
OpenIDConnectOAuth2Adapter(request, provider_id)
)
return view(request)
except SocialApp.DoesNotExist:
raise Http404
|
OpenIDConnectOAuth2Adapter
|
python
|
jina-ai__jina
|
jina/proto/docarray_v1/pb/jina_pb2_grpc.py
|
{
"start": 11624,
"end": 12522
}
|
class ____(object):
"""*
jina gRPC service to expose Endpoints from Executors.
"""
@staticmethod
def endpoint_discovery(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
'/jina.JinaDiscoverEndpointsRPC/endpoint_discovery',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
jina__pb2.EndpointsProto.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
|
JinaDiscoverEndpointsRPC
|
python
|
apache__thrift
|
lib/py/test/test_socket.py
|
{
"start": 1054,
"end": 3780
}
|
class ____(unittest.TestCase):
def test_failed_connection_raises_exception(self):
sock = TSocket(host="localhost", port=60606) # unused port
with self.assertRaises(TTransportException) as ctx:
sock.open()
exc = ctx.exception
self.assertEqual(exc.type, TTransportException.NOT_OPEN)
self.assertIn("Could not connect to any of", exc.message)
self.assertIsNotNone(exc.inner)
self.assertIn("Connection refused", str(exc.inner))
def test_socket_readtimeout_exception(self):
acc = ServerAcceptor(TServerSocket(port=0))
acc.start()
sock = TSocket(host="localhost", port=acc.port)
sock.open()
sock.setTimeout(1)
sock.write(b"sleep")
with self.assertRaises(TTransportException) as ctx:
sock.read(5)
exc = ctx.exception
self.assertEqual(exc.message, "read timeout")
acc.client.close() # this also blocks until the other thread is done
acc.close()
sock.close()
def test_isOpen_checks_for_readability(self):
# https://docs.python.org/3/library/socket.html#notes-on-socket-timeouts
# https://docs.python.org/3/library/socket.html#socket.socket.settimeout
timeouts = [
None, # blocking mode
0, # non-blocking mode
1.0, # timeout mode
]
for timeout in timeouts:
acc = ServerAcceptor(TServerSocket(port=0))
acc.start()
sock = TSocket(host="localhost", port=acc.port)
self.assertFalse(sock.isOpen())
sock.open()
sock.setTimeout(timeout)
# the socket shows as open immediately after connecting
self.assertTrue(sock.isOpen())
# and remains open during usage
sock.write(b"hello")
self.assertTrue(sock.isOpen())
while True:
try:
sock.read(5)
except TTransportException as exc:
if exc.inner.errno == errno.EAGAIN:
# try again when we're in non-blocking mode
continue
raise
break
self.assertTrue(sock.isOpen())
# once the server side closes, it no longer shows open
acc.client.close() # this also blocks until the other thread is done
acc.close()
self.assertIsNotNone(sock.handle)
self.assertFalse(sock.isOpen())
# after isOpen() returned False the socket should be closed (THRIFT-5813)
self.assertIsNone(sock.handle)
if __name__ == "__main__":
unittest.main()
|
TSocketTest
|
python
|
wandb__wandb
|
wandb/automations/_filters/run_metrics.py
|
{
"start": 11821,
"end": 12547
}
|
class ____(BaseMetricOperand):
"""Represents a single metric value when defining metric event filters."""
name: str
# Allow conversion of a single-value metric into an aggregated expression.
def max(self, window: int) -> MetricAgg:
return MetricAgg(name=self.name, agg=Agg.MAX, window=window)
def min(self, window: int) -> MetricAgg:
return MetricAgg(name=self.name, agg=Agg.MIN, window=window)
def avg(self, window: int) -> MetricAgg:
return MetricAgg(name=self.name, agg=Agg.AVG, window=window)
# Aliased method for users familiar with e.g. torch/tf/numpy/pandas/polars/etc.
def mean(self, window: int) -> MetricAgg:
return self.avg(window=window)
|
MetricVal
|
python
|
Textualize__textual
|
src/textual/command.py
|
{
"start": 12010,
"end": 13097
}
|
class ____(Option):
"""Class that holds a hit in the [`CommandList`][textual.command.CommandList]."""
def __init__(
self,
prompt: VisualType,
hit: DiscoveryHit | Hit,
id: str | None = None,
disabled: bool = False,
) -> None:
"""Initialise the option.
Args:
prompt: The prompt for the option.
hit: The details of the hit associated with the option.
id: The optional ID for the option.
disabled: The initial enabled/disabled state. Enabled by default.
"""
super().__init__(prompt, id, disabled)
self.hit = hit
"""The details of the hit associated with the option."""
def __hash__(self) -> int:
return id(self)
def __lt__(self, other: object) -> bool:
if isinstance(other, Command):
return self.hit < other.hit
return NotImplemented
def __eq__(self, other: object) -> bool:
if isinstance(other, Command):
return self.hit == other.hit
return NotImplemented
|
Command
|
python
|
pypa__setuptools
|
setuptools/_vendor/wheel/cli/convert.py
|
{
"start": 2680,
"end": 3054
}
|
class ____(metaclass=ABCMeta):
name: str
version: str
pyver: str = "py2.py3"
abi: str = "none"
platform: str = "any"
metadata: Message
@property
def dist_info_dir(self) -> str:
return f"{self.name}-{self.version}.dist-info"
@abstractmethod
def generate_contents(self) -> Iterator[tuple[str, bytes]]:
pass
|
ConvertSource
|
python
|
pytorch__pytorch
|
torch/optim/adadelta.py
|
{
"start": 508,
"end": 16873
}
|
class ____(Optimizer):
def __init__(
self,
params: ParamsT,
lr: Union[float, Tensor] = 1.0,
rho: float = 0.9,
eps: float = 1e-6,
weight_decay: float = 0,
foreach: Optional[bool] = None,
*,
capturable: bool = False,
maximize: bool = False,
differentiable: bool = False,
) -> None:
if isinstance(lr, Tensor) and lr.numel() != 1:
raise ValueError("Tensor lr must be 1-element")
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= rho <= 1.0:
raise ValueError(f"Invalid rho value: {rho}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= weight_decay:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = {
"lr": lr,
"rho": rho,
"eps": eps,
"weight_decay": weight_decay,
"maximize": maximize,
"capturable": capturable,
"foreach": foreach,
"differentiable": differentiable,
}
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("foreach", None)
group.setdefault("maximize", False)
group.setdefault("differentiable", False)
group.setdefault("capturable", False)
for p in group["params"]:
p_state = self.state.get(p, [])
if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
p_state["step"] = (
torch.tensor(
step_val, dtype=_get_scalar_dtype(), device=p.device
)
if group["capturable"]
else torch.tensor(step_val, dtype=_get_scalar_dtype())
)
def _init_group(
self,
group: dict[str, Any],
params_with_grad: list[Tensor],
grads: list[Tensor],
square_avgs: list[Tensor],
acc_deltas: list[Tensor],
state_steps: list[Tensor],
):
has_complex = False
p: Tensor
for p in group["params"]:
if p.grad is None:
continue
has_complex |= torch.is_complex(p)
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError("Adadelta does not support sparse gradients")
grads.append(p.grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state["step"] = (
torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
if group["capturable"]
else torch.zeros((), dtype=_get_scalar_dtype())
)
state["square_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
state["acc_delta"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
square_avgs.append(state["square_avg"])
acc_deltas.append(state["acc_delta"])
state_steps.append(state["step"])
return has_complex
@_use_grad_for_differentiable
def step(self, closure=None):
"""Perform a single optimization step.
Args:
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
self._cuda_graph_capture_health_check()
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad: list[Tensor] = []
grads: list[Tensor] = []
square_avgs: list[Tensor] = []
acc_deltas: list[Tensor] = []
state_steps: list[Tensor] = []
(
lr,
rho,
eps,
weight_decay,
foreach,
maximize,
differentiable,
capturable,
) = (
group["lr"],
group["rho"],
group["eps"],
group["weight_decay"],
group["foreach"],
group["maximize"],
group["differentiable"],
group["capturable"],
)
has_complex = self._init_group(
group, params_with_grad, grads, square_avgs, acc_deltas, state_steps
)
adadelta(
params_with_grad,
grads,
square_avgs,
acc_deltas,
state_steps,
lr=lr,
rho=rho,
eps=eps,
weight_decay=weight_decay,
foreach=foreach,
maximize=maximize,
differentiable=differentiable,
capturable=capturable,
has_complex=has_complex,
)
return loss
Adadelta.__doc__ = (
r"""Implements Adadelta algorithm.
.. math::
\begin{aligned}
&\rule{110mm}{0.4pt} \\
&\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)},
\: f(\theta) \text{ (objective)}, \: \rho \text{ (decay)},
\: \lambda \text{ (weight decay)} \\
&\textbf{initialize} : v_0 \leftarrow 0 \: \text{ (square avg)},
\: u_0 \leftarrow 0 \: \text{ (accumulate variables)} \\[-1.ex]
&\rule{110mm}{0.4pt} \\
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm}if \: \lambda \neq 0 \\
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
&\hspace{5mm} v_t \leftarrow v_{t-1} \rho + g^2_t (1 - \rho) \\
&\hspace{5mm}\Delta x_t \leftarrow \frac{\sqrt{u_{t-1} +
\epsilon }}{ \sqrt{v_t + \epsilon} }g_t \hspace{21mm} \\
&\hspace{5mm} u_t \leftarrow u_{t-1} \rho +
\Delta x^2_t (1 - \rho) \\
&\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \gamma \Delta x_t \\
&\rule{110mm}{0.4pt} \\[-1.ex]
&\bf{return} \: \theta_t \\[-1.ex]
&\rule{110mm}{0.4pt} \\[-1.ex]
\end{aligned}
For further details regarding the algorithm we refer to `ADADELTA: An Adaptive Learning Rate Method`_.
"""
+ rf"""
Args:
{_params_doc}
lr (float, Tensor, optional): coefficient that scale delta before it is applied
to the parameters (default: 1.0)
rho (float, optional): coefficient used for computing a running average
of squared gradients (default: 0.9). A higher value of `rho` will
result in a slower average, which can be helpful for preventing
oscillations in the learning process.
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-6).
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
{_foreach_doc}
{_capturable_doc}
{_maximize_doc}
{_differentiable_doc}
.. _ADADELTA\: An Adaptive Learning Rate Method:
https://arxiv.org/abs/1212.5701
"""
)
def _single_tensor_adadelta(
params: list[Tensor],
grads: list[Tensor],
square_avgs: list[Tensor],
acc_deltas: list[Tensor],
state_steps: list[Tensor],
*,
lr: float,
rho: float,
eps: float,
weight_decay: float,
maximize: bool,
differentiable: bool,
capturable: bool,
has_complex: bool,
) -> None:
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch.compiler.is_compiling() and capturable:
capturable_supported_devices = _get_capturable_supported_devices(
supports_xla=False
)
if not all(
p.device.type == step.device.type
and p.device.type in capturable_supported_devices
for p, step in zip(params, state_steps, strict=True)
):
raise AssertionError(
f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
)
if not torch.jit.is_scripting():
lr = _to_scalar(lr)
for param, grad, square_avg, acc_delta, step in zip(
params, grads, square_avgs, acc_deltas, state_steps, strict=True
):
step += 1
grad = grad if not maximize else -grad
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
if torch.is_complex(param):
square_avg = torch.view_as_real(square_avg)
acc_delta = torch.view_as_real(acc_delta)
grad = torch.view_as_real(grad)
square_avg.mul_(rho).addcmul_(grad, grad, value=1 - rho)
std = square_avg.add(eps).sqrt_()
delta = acc_delta.add(eps).sqrt_()
if differentiable:
delta = delta.clone()
delta.div_(std).mul_(grad)
acc_delta.mul_(rho).addcmul_(delta, delta, value=1 - rho)
if torch.is_complex(param):
delta = torch.view_as_complex(delta)
param.add_(delta, alpha=-lr)
def _multi_tensor_adadelta(
params: list[Tensor],
grads: list[Tensor],
square_avgs: list[Tensor],
acc_deltas: list[Tensor],
state_steps: list[Tensor],
*,
lr: float,
rho: float,
eps: float,
weight_decay: float,
maximize: bool,
differentiable: bool,
capturable: bool,
has_complex: bool,
) -> None:
if differentiable:
raise AssertionError("_foreach ops don't support autograd")
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch.compiler.is_compiling() and capturable:
capturable_supported_devices = _get_capturable_supported_devices(
supports_xla=False
)
if not all(
p.device.type == step.device.type
and p.device.type in capturable_supported_devices
for p, step in zip(params, state_steps, strict=True)
):
raise AssertionError(
f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
)
if len(params) == 0:
return
lr = _to_scalar(lr)
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
[params, grads, square_avgs, acc_deltas, state_steps] # type: ignore[list-item]
)
for (
device_params_,
device_grads_,
device_square_avgs_,
device_acc_deltas_,
device_state_steps_,
), _ in grouped_tensors.values():
device_params = cast(list[Tensor], device_params_)
device_grads = cast(list[Tensor], device_grads_)
device_square_avgs = cast(list[Tensor], device_square_avgs_)
device_acc_deltas = cast(list[Tensor], device_acc_deltas_)
device_state_steps = cast(list[Tensor], device_state_steps_)
if has_complex:
_view_as_real(
device_params, device_grads, device_square_avgs, device_acc_deltas
)
# Update steps
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if not torch.compiler.is_compiling() and device_state_steps[0].is_cpu:
torch._foreach_add_(
device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
)
else:
torch._foreach_add_(device_state_steps, 1)
if maximize:
device_grads = torch._foreach_neg(device_grads) # type: ignore[assignment]
if weight_decay != 0:
# Reuse the intermediate memory (device_grads) already allocated for maximize
if maximize:
torch._foreach_add_(device_grads, device_params, alpha=weight_decay)
else:
device_grads = torch._foreach_add( # type: ignore[assignment]
device_grads, device_params, alpha=weight_decay
)
torch._foreach_mul_(device_square_avgs, rho)
torch._foreach_addcmul_(
device_square_avgs, device_grads, device_grads, value=1 - rho
)
std = torch._foreach_add(device_square_avgs, eps)
torch._foreach_sqrt_(std)
deltas = torch._foreach_add(device_acc_deltas, eps)
torch._foreach_sqrt_(deltas)
torch._foreach_div_(deltas, std)
torch._foreach_mul_(deltas, device_grads)
torch._foreach_mul_(device_acc_deltas, rho)
torch._foreach_addcmul_(device_acc_deltas, deltas, deltas, value=1 - rho)
# If LR is a tensor, the else branch will internally call item()
# which will cause silent incorrectness if we are capturing
if capturable and isinstance(lr, torch.Tensor):
torch._foreach_mul_(deltas, -lr)
torch._foreach_add_(device_params, deltas)
else:
torch._foreach_add_(device_params, deltas, alpha=-lr)
@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adadelta)
def adadelta(
params: list[Tensor],
grads: list[Tensor],
square_avgs: list[Tensor],
acc_deltas: list[Tensor],
state_steps: list[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
capturable: bool = False,
foreach: Optional[bool] = None,
differentiable: bool = False,
has_complex: bool = False,
*,
lr: float,
rho: float,
eps: float,
weight_decay: float,
maximize: bool,
) -> None:
r"""Functional API that performs Adadelta algorithm computation.
See :class:`~torch.optim.Adadelta` for details.
"""
# this check is slow during compilation, so we skip it
# if it's strictly needed we can add this check back in dynamo
if not torch.compiler.is_compiling() and not all(
isinstance(t, torch.Tensor) for t in state_steps
):
raise RuntimeError(
"API has changed, `state_steps` argument must contain a list of singleton tensors"
)
# We still respect when the user inputs False for foreach.
if foreach is None:
_, foreach = _default_to_fused_or_foreach(
params, differentiable, use_fused=False
)
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_adadelta
else:
func = _single_tensor_adadelta
func(
params,
grads,
square_avgs,
acc_deltas,
state_steps,
lr=lr,
rho=rho,
eps=eps,
weight_decay=weight_decay,
maximize=maximize,
differentiable=differentiable,
capturable=capturable,
has_complex=has_complex,
)
|
Adadelta
|
python
|
numpy__numpy
|
numpy/polynomial/tests/test_laguerre.py
|
{
"start": 915,
"end": 3265
}
|
class ____:
x = np.linspace(-3, 3, 100)
def test_lagadd(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = lag.lagadd([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_lagsub(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = lag.lagsub([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_lagmulx(self):
assert_equal(lag.lagmulx([0]), [0])
assert_equal(lag.lagmulx([1]), [1, -1])
for i in range(1, 5):
ser = [0] * i + [1]
tgt = [0] * (i - 1) + [-i, 2 * i + 1, -(i + 1)]
assert_almost_equal(lag.lagmulx(ser), tgt)
def test_lagmul(self):
# check values of result
for i in range(5):
pol1 = [0] * i + [1]
val1 = lag.lagval(self.x, pol1)
for j in range(5):
msg = f"At i={i}, j={j}"
pol2 = [0] * j + [1]
val2 = lag.lagval(self.x, pol2)
pol3 = lag.lagmul(pol1, pol2)
val3 = lag.lagval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1 * val2, err_msg=msg)
def test_lagdiv(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
ci = [0] * i + [1]
cj = [0] * j + [1]
tgt = lag.lagadd(ci, cj)
quo, rem = lag.lagdiv(tgt, ci)
res = lag.lagadd(lag.lagmul(quo, ci), rem)
assert_almost_equal(trim(res), trim(tgt), err_msg=msg)
def test_lagpow(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
c = np.arange(i + 1)
tgt = reduce(lag.lagmul, [c] * j, np.array([1]))
res = lag.lagpow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
|
TestArithmetic
|
python
|
ray-project__ray
|
python/ray/_common/tests/test_ray_option_utils.py
|
{
"start": 6684,
"end": 7333
}
|
class ____:
def test_simple_update(self):
original = {"num_cpus": 1, "name": "a"}
new = {"num_cpus": 2, "num_gpus": 1}
updated = update_options(original, new)
assert updated == {"num_cpus": 2, "name": "a", "num_gpus": 1}
def test_update_with_empty_new(self):
original = {"num_cpus": 1}
updated = update_options(original, {})
assert updated == original
def test_update_empty_original(self):
new = {"num_cpus": 1}
updated = update_options({}, new)
assert updated == new
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
|
TestUpdateOptions
|
python
|
walkccc__LeetCode
|
solutions/17. Letter Combinations of a Phone Number/17-2.py
|
{
"start": 0,
"end": 396
}
|
class ____:
def letterCombinations(self, digits: str) -> list[str]:
if not digits:
return []
ans = ['']
digitToLetters = ['', '', 'abc', 'def', 'ghi',
'jkl', 'mno', 'pqrs', 'tuv', 'wxyz']
for d in digits:
temp = []
for s in ans:
for c in digitToLetters[int(d)]:
temp.append(s + c)
ans = temp
return ans
|
Solution
|
python
|
jina-ai__jina
|
tests/integration/inspect_deployments_flow/test_inspect_deployments_flow.py
|
{
"start": 491,
"end": 3288
}
|
class ____(DummyEvaluator1):
tag = 3
docs = DocumentArray([x for x in random_docs(1)])
params = ['HANG', 'COLLECT', 'REMOVE']
def validate(ids, expect):
assert len(ids) > 0
for j in ids:
tmp_dir = os.environ.get('TEST_EVAL_FLOW_TMPDIR')
fname = f'{tmp_dir}/{j}.txt'
assert os.path.exists(fname) == expect
if expect:
with open(fname, encoding='utf-8') as fp:
assert fp.read() != ''
@pytest.fixture
def temp_folder(tmpdir):
os.environ['TEST_EVAL_FLOW_TMPDIR'] = str(tmpdir)
yield
del os.environ['TEST_EVAL_FLOW_TMPDIR']
@pytest.mark.parametrize('inspect', params)
@pytest.mark.parametrize('protocol', ['websocket', 'grpc'])
def test_flow1(inspect, protocol, temp_folder):
f = Flow(protocol=protocol, inspect=inspect).add(
uses=DummyEvaluator1,
env={'TEST_EVAL_FLOW_TMPDIR': os.environ.get('TEST_EVAL_FLOW_TMPDIR')},
)
with f:
res = f.post(on='/index', inputs=docs)
assert len(res) > 0
@pytest.mark.parametrize('inspect', params)
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
def test_flow2(inspect, protocol, temp_folder):
f = (
Flow(protocol=protocol, inspect=inspect)
.add()
.inspect(
uses=DummyEvaluator1,
env={'TEST_EVAL_FLOW_TMPDIR': os.environ.get('TEST_EVAL_FLOW_TMPDIR')},
)
)
with f:
res = f.index(docs)
assert len(res) > 0
validate([1], expect=f.args.inspect.is_keep)
@pytest.mark.parametrize('inspect', params)
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
def test_flow3(inspect, protocol, temp_folder):
env = {'TEST_EVAL_FLOW_TMPDIR': os.environ.get('TEST_EVAL_FLOW_TMPDIR')}
f = (
Flow(protocol=protocol, inspect=inspect)
.add(name='p1')
.inspect(uses='DummyEvaluator1', env=env)
.add(name='p2', needs='gateway')
.needs(['p1', 'p2'])
.inspect(uses='DummyEvaluator2', env=env)
)
with f:
res = f.index(docs)
assert len(res) > 0
validate([1, 2], expect=f.args.inspect.is_keep)
@pytest.mark.parametrize('inspect', params)
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
def test_flow4(inspect, protocol, temp_folder):
env = {'TEST_EVAL_FLOW_TMPDIR': os.environ.get('TEST_EVAL_FLOW_TMPDIR')}
f = (
Flow(protocol=protocol, inspect=inspect)
.add()
.inspect(uses='DummyEvaluator1', env=env)
.add()
.inspect(uses='DummyEvaluator2', env=env)
.add()
.inspect(uses='DummyEvaluator3', env=env)
.plot(build=True)
)
with f:
res = f.index(docs)
assert len(res) > 0
validate([1, 2, 3], expect=f.args.inspect.is_keep)
|
DummyEvaluator3
|
python
|
pytransitions__transitions
|
tests/test_threading.py
|
{
"start": 716,
"end": 5455
}
|
class ____(TestTransitions):
def setUp(self):
self.machine_cls = LockedMachine # type: Type[LockedMachine]
self.stuff = Stuff(machine_cls=self.machine_cls)
self.stuff.heavy_processing = heavy_processing
self.stuff.machine.add_transition('forward', 'A', 'B', before='heavy_processing')
def tearDown(self):
pass
def test_thread_access(self):
thread = Thread(target=self.stuff.forward)
thread.start()
# give thread some time to start
time.sleep(0.01)
self.assertTrue(self.stuff.is_B())
def test_parallel_access(self):
thread = Thread(target=self.stuff.forward)
thread.start()
# give thread some time to start
time.sleep(0.01)
self.stuff.to_C()
# if 'forward' has not been locked, it is still running
# we have to wait to be sure it is done
time.sleep(1)
self.assertEqual(self.stuff.state, "C")
def test_parallel_deep(self):
self.stuff.machine.add_transition('deep', source='*', dest='C', after='to_D')
thread = Thread(target=self.stuff.deep)
thread.start()
time.sleep(0.01)
self.stuff.to_C()
time.sleep(1)
self.assertEqual(self.stuff.state, "C")
def test_conditional_access(self):
self.stuff.heavy_checking = heavy_checking # checking takes 1s and returns False
self.stuff.machine.add_transition('advance', 'A', 'B', conditions='heavy_checking')
self.stuff.machine.add_transition('advance', 'A', 'D')
t = Thread(target=self.stuff.advance)
t.start()
time.sleep(0.1)
logger.info('Check if state transition done...')
# Thread will release lock before Transition is finished
res = self.stuff.is_D()
self.assertTrue(res)
def test_pickle(self):
import sys
if sys.version_info < (3, 4):
import dill as pickle
else:
import pickle
# go to non initial state B
self.stuff.to_B()
# pickle Stuff model
dump = pickle.dumps(self.stuff)
self.assertIsNotNone(dump)
stuff2 = pickle.loads(dump)
self.assertTrue(stuff2.is_B())
# check if machines of stuff and stuff2 are truly separated
stuff2.to_A()
self.stuff.to_C()
self.assertTrue(stuff2.is_A())
thread = Thread(target=stuff2.forward)
thread.start()
# give thread some time to start
time.sleep(0.01)
# both objects should be in different states
# and also not share locks
begin = time.time()
# stuff should not be locked and execute fast
self.assertTrue(self.stuff.is_C())
fast = time.time()
# stuff2 should be locked and take about 1 second
# to be executed
self.assertTrue(stuff2.is_B())
blocked = time.time()
self.assertAlmostEqual(fast - begin, 0, delta=0.1)
self.assertAlmostEqual(blocked - begin, 1, delta=0.1)
def test_context_managers(self):
class CounterContext(object):
def __init__(self):
self.counter = 0
self.level = 0
self.max = 0
super(CounterContext, self).__init__()
def __enter__(self):
self.counter += 1
self.level += 1
self.max = max(self.level, self.max)
def __exit__(self, *exc):
self.level -= 1
M = LockedMachine
c = CounterContext()
m = M(states=['A', 'B', 'C', 'D'], transitions=[['reset', '*', 'A']], initial='A', machine_context=c)
m.get_triggers('A')
self.assertEqual(c.max, 1) # was 3 before
self.assertEqual(c.counter, 4) # was 72 (!) before
# This test has been used to quantify the changes made in locking in version 0.5.0.
# See https://github.com/tyarkoni/transitions/issues/167 for the results.
# def test_performance(self):
# import timeit
# states = ['A', 'B', 'C']
# transitions = [['go', 'A', 'B'], ['go', 'B', 'C'], ['go', 'C', 'A']]
#
# M1 = MachineFactory.get_predefined()
# M2 = MachineFactory.get_predefined(locked=True)
#
# def test_m1():
# m1 = M1(states=states, transitions=transitions, initial='A')
# m1.get_triggers('A')
#
# def test_m2():
# m2 = M2(states=states, transitions=transitions, initial='A')
# m2.get_triggers('A')
#
# t1 = timeit.timeit(test_m1, number=20000)
# t2 = timeit.timeit(test_m2, number=20000)
# self.assertAlmostEqual(t2/t1, 1, delta=0.5)
|
TestLockedTransitions
|
python
|
TheAlgorithms__Python
|
conversions/prefix_conversions.py
|
{
"start": 132,
"end": 442
}
|
class ____(Enum):
yotta = 24
zetta = 21
exa = 18
peta = 15
tera = 12
giga = 9
mega = 6
kilo = 3
hecto = 2
deca = 1
deci = -1
centi = -2
milli = -3
micro = -6
nano = -9
pico = -12
femto = -15
atto = -18
zepto = -21
yocto = -24
|
SIUnit
|
python
|
getsentry__sentry
|
src/sentry/utils/snowflake.py
|
{
"start": 2096,
"end": 2300
}
|
class ____(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = "Max allowed ID retry reached. Please try again in a second"
@dataclass(frozen=True, eq=True)
|
MaxSnowflakeRetryError
|
python
|
keras-team__keras
|
keras/src/ops/linalg.py
|
{
"start": 16636,
"end": 18455
}
|
class ____(Operation):
def __init__(self, full_matrices=True, compute_uv=True, *, name=None):
super().__init__(name=name)
self.full_matrices = full_matrices
self.compute_uv = compute_uv
def call(self, x):
return _svd(x, self.full_matrices, self.compute_uv)
def compute_output_spec(self, x):
_assert_2d(x)
rows, columns = x.shape[-2:]
batches = x.shape[:-2]
s_shape = batches + (min(rows, columns),)
if self.full_matrices:
u_shape = batches + (rows, rows)
v_shape = batches + (columns, columns)
else:
u_shape = batches + (rows, min(rows, columns))
v_shape = batches + (min(rows, columns), columns)
if self.compute_uv:
return (
KerasTensor(u_shape, x.dtype),
KerasTensor(s_shape, x.dtype),
KerasTensor(v_shape, x.dtype),
)
return KerasTensor(s_shape, x.dtype)
@keras_export(["keras.ops.svd", "keras.ops.linalg.svd"])
def svd(x, full_matrices=True, compute_uv=True):
"""Computes the singular value decomposition of a matrix.
Args:
x: Input tensor of shape `(..., M, N)`.
Returns:
A tuple of three tensors: a tensor of shape `(..., M, M)` containing the
left singular vectors, a tensor of shape `(..., M, N)` containing the
singular values and a tensor of shape `(..., N, N)` containing the
right singular vectors.
"""
if any_symbolic_tensors((x,)):
return SVD(full_matrices, compute_uv).symbolic_call(x)
return _svd(x, full_matrices, compute_uv)
def _svd(x, full_matrices=True, compute_uv=True):
x = backend.convert_to_tensor(x)
_assert_2d(x)
return backend.linalg.svd(x, full_matrices, compute_uv)
|
SVD
|
python
|
celery__celery
|
celery/exceptions.py
|
{
"start": 4161,
"end": 4231
}
|
class ____(CeleryWarning):
"""Fixup related warning."""
|
FixupWarning
|
python
|
Textualize__textual
|
src/textual/css/_style_properties.py
|
{
"start": 4057,
"end": 4250
}
|
class ____(GenericProperty[bool, bool]):
"""A property that requires a True or False value."""
def validate_value(self, value: object) -> bool:
return bool(value)
|
BooleanProperty
|
python
|
eventlet__eventlet
|
tests/patcher_test.py
|
{
"start": 12520,
"end": 17515
}
|
class ____(ProcessBase):
prologue = """import eventlet
eventlet.monkey_patch()
import threading
def test():
t = threading.currentThread()
"""
epilogue = """
t = eventlet.spawn(test)
t.wait()
"""
def test_join(self):
self.write_to_tempfile("newmod", self.prologue + """
def test2():
global t2
t2 = threading.currentThread()
eventlet.spawn(test2)
""" + self.epilogue + """
print(repr(t2))
t2.join()
""")
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, "\n".join(lines))
assert lines[0].startswith('<_GreenThread'), lines[0]
def test_name(self):
self.write_to_tempfile("newmod", self.prologue + """
print(t.name)
print(t.getName())
print(t.get_name())
t.name = 'foo'
print(t.name)
print(t.getName())
print(t.get_name())
t.setName('bar')
print(t.name)
print(t.getName())
print(t.get_name())
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 10, "\n".join(lines))
for i in range(0, 3):
self.assertEqual(lines[i], "GreenThread-1", lines[i])
for i in range(3, 6):
self.assertEqual(lines[i], "foo", lines[i])
for i in range(6, 9):
self.assertEqual(lines[i], "bar", lines[i])
def test_ident(self):
self.write_to_tempfile("newmod", self.prologue + """
print(id(t._g))
print(t.ident)
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
self.assertEqual(lines[0], lines[1])
def test_is_alive(self):
self.write_to_tempfile("newmod", self.prologue + """
print(t.is_alive())
print(t.isAlive())
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
self.assertEqual(lines[0], "True", lines[0])
self.assertEqual(lines[1], "True", lines[1])
def test_is_daemon(self):
self.write_to_tempfile("newmod", self.prologue + """
print(t.is_daemon())
print(t.isDaemon())
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
self.assertEqual(lines[0], "True", lines[0])
self.assertEqual(lines[1], "True", lines[1])
def test_patcher_existing_locks_early():
tests.run_isolated('patcher_existing_locks_early.py')
def test_patcher_existing_locks_late():
tests.run_isolated('patcher_existing_locks_late.py')
def test_patcher_existing_locks_locked():
tests.run_isolated('patcher_existing_locks_locked.py')
@tests.skip_if_CRLock_exist
def test_patcher_existing_locks_unlocked():
tests.run_isolated('patcher_existing_locks_unlocked.py')
def test_patcher_existing_logging_module_lock():
tests.run_isolated('patcher_existing_logging_module_lock.py')
def test_importlib_lock():
tests.run_isolated('patcher_importlib_lock.py')
def test_threading_condition():
tests.run_isolated('patcher_threading_condition.py')
def test_threading_join():
tests.run_isolated('patcher_threading_join.py')
@pytest.mark.xfail(
sys.platform == "darwin",
reason="Mysterious failure, see https://github.com/eventlet/eventlet/issues/1068"
)
def test_socketserver_selectors():
tests.run_isolated('patcher_socketserver_selectors.py')
def test_blocking_select_methods_are_deleted():
tests.run_isolated('patcher_blocking_select_methods_are_deleted.py')
def test_regular_file_readall():
tests.run_isolated('regular_file_readall.py')
def test_threading_current():
tests.run_isolated('patcher_threading_current.py')
def test_threadpoolexecutor():
tests.run_isolated('patcher_threadpoolexecutor.py')
FORK_REASON = "fork() doesn't work well on macOS, and definitely doesn't work on Windows"
@pytest.mark.skipif(
not sys.platform.startswith("linux"),
reason=FORK_REASON
)
def test_fork_after_monkey_patch():
tests.run_isolated('patcher_fork_after_monkey_patch.py')
@pytest.mark.skipif(
not sys.platform.startswith("linux"),
reason=FORK_REASON
)
def test_fork_after_monkey_patch_threading():
tests.run_isolated('fork_in_main_thread.py')
@pytest.mark.skipif(
not sys.platform.startswith("linux"),
reason=FORK_REASON
)
def test_fork_in_thread_after_monkey_patch_threading():
tests.run_isolated('fork_in_thread.py')
def test_builtin():
tests.run_isolated('patcher_builtin.py')
def test_open_kwargs():
tests.run_isolated("patcher_open_kwargs.py")
def test_patcher_existing_locks():
tests.run_isolated("patcher_existing_locks_preexisting.py")
def test_patcher_existing_locks_exception():
tests.run_isolated("patcher_existing_locks_exception.py")
def test_patcher_threading_subclass_done():
tests.run_isolated("patcher_threading_subclass_done.py")
|
GreenThreadWrapper
|
python
|
getsentry__sentry
|
src/sentry/issues/endpoints/organization_group_suspect_flags.py
|
{
"start": 868,
"end": 3078
}
|
class ____(GroupEndpoint):
publish_status = {"GET": ApiPublishStatus.PRIVATE}
def get(self, request: Request, group: Group) -> Response:
"""Stats bucketed by time."""
if not features.has(
"organizations:feature-flag-suspect-flags",
group.organization,
actor=request.user,
):
return Response(status=404)
environments = [e.name for e in get_environments(request, group.organization)]
group_id = group.id
organization_id = group.organization.id
project_id = group.project.id
start, end = get_date_range_from_params(request.GET)
# Clamp the range to be within the issue's first and last seen timestamps.
start, end = max(start, group.first_seen), min(end, group.last_seen)
# To increase our cache hit-rate we round the dates down to the nearest 5 minute interval.
if end - start > timedelta(minutes=5):
start = start.replace(minute=(start.minute // 5) * 5, second=0, microsecond=0)
end = end.replace(minute=(end.minute // 5) * 5, second=0, microsecond=0)
response_data: ResponseData = {
"data": get_suspect_flag_scores(
organization_id,
project_id,
start,
end,
environments,
group_id,
)
}
# Record a distribution of suspect flag scores.
for item in response_data["data"]:
metrics.distribution("flags.suspect.score", item["score"])
if item["score"] >= 1:
logging.info(
"sentry.replays.slow_click",
extra={
"event_type": "flag_score_log",
"org_id": group.organization.id,
"project_id": group.project.id,
"flag": item["flag"],
"score": item["score"],
"issue_id": group.id,
"is_filtered": item["is_filtered"],
},
)
return Response(response_data, status=200)
|
OrganizationGroupSuspectFlagsEndpoint
|
python
|
gevent__gevent
|
src/gevent/events.py
|
{
"start": 6316,
"end": 6901
}
|
class ____(Interface):
"""
The event emitted when the event loop is blocked.
This event is emitted in the monitor thread.
.. versionchanged:: 24.11.1
Add the *hub* attribute.
"""
greenlet = Attribute("The greenlet that appeared to be blocking the loop.")
blocking_time = Attribute("The approximate time in seconds the loop has been blocked.")
info = Attribute("A list of string lines providing extra info. You may modify this list.")
hub = Attribute("""If not None, the hub being blocked.""")
@implementer(IEventLoopBlocked)
|
IEventLoopBlocked
|
python
|
PrefectHQ__prefect
|
src/prefect/task_runners.py
|
{
"start": 17233,
"end": 18853
}
|
class ____(concurrent.futures.Future[bytes]):
"""Wraps a future-of-future and unwraps the result."""
def __init__(
self,
resolution_future: concurrent.futures.Future[concurrent.futures.Future[bytes]],
):
super().__init__()
self._resolution_future = resolution_future
self._process_future: concurrent.futures.Future[bytes] | None = None
# When resolution completes, hook up to the process future
def on_resolution_done(
fut: concurrent.futures.Future[concurrent.futures.Future[bytes]],
) -> None:
try:
self._process_future = fut.result()
# Forward process future result to this future
def on_process_done(
process_fut: concurrent.futures.Future[bytes],
) -> None:
try:
result = process_fut.result()
self.set_result(result)
except Exception as e:
self.set_exception(e)
self._process_future.add_done_callback(on_process_done)
except Exception as e:
self.set_exception(e)
resolution_future.add_done_callback(on_resolution_done)
def cancel(self) -> bool:
if self._process_future:
return self._process_future.cancel()
return self._resolution_future.cancel()
def cancelled(self) -> bool:
if self._process_future:
return self._process_future.cancelled()
return self._resolution_future.cancelled()
|
_ChainedFuture
|
python
|
PrefectHQ__prefect
|
src/prefect/server/database/orm_models.py
|
{
"start": 31910,
"end": 32557
}
|
class ____(Base):
"""
SQLAlchemy model of a logging statement.
"""
name: Mapped[str]
level: Mapped[int] = mapped_column(sa.SmallInteger, index=True)
flow_run_id: Mapped[Optional[uuid.UUID]] = mapped_column(index=True)
task_run_id: Mapped[Optional[uuid.UUID]] = mapped_column(index=True)
message: Mapped[str] = mapped_column(sa.Text)
# The client-side timestamp of this logged statement.
timestamp: Mapped[DateTime] = mapped_column(index=True)
__table_args__: Any = (
sa.Index(
"ix_log__flow_run_id_timestamp",
"flow_run_id",
"timestamp",
),
)
|
Log
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/test_task.py
|
{
"start": 1624,
"end": 4950
}
|
class ____(TestCase):
def test__no_detector_id(self) -> None:
"""
Test that the workflow_status_update_handler does not crash
when no detector_id is provided in the status change message.
"""
group = self.create_group(project=self.project)
activity = Activity(
project=self.project,
group=group,
type=ActivityType.SET_RESOLVED.value,
data={"fingerprint": ["test_fingerprint"]},
)
message = StatusChangeMessageData(
id="test_message_id",
project_id=self.project.id,
new_status=GroupStatus.RESOLVED,
new_substatus=None,
fingerprint=["test_fingerprint"],
detector_id=None, # No detector_id provided
activity_data=None,
)
with mock.patch("sentry.workflow_engine.tasks.workflows.metrics.incr") as mock_incr:
workflow_status_update_handler(group, message, activity)
mock_incr.assert_called_with("workflow_engine.tasks.error.no_detector_id")
def test_single_processing(self) -> None:
detector = self.create_detector(project=self.project)
group = self.create_group(project=self.project, type=MetricIssue.type_id)
activity = Activity(
project=self.project,
group=group,
type=ActivityType.SET_RESOLVED.value,
data={"fingerprint": ["test_fingerprint"]},
)
message = StatusChangeMessageData(
id="test_message_id",
project_id=self.project.id,
new_status=GroupStatus.RESOLVED,
new_substatus=None,
fingerprint=["test_fingerprint"],
detector_id=detector.id,
activity_data={"test": "test"},
)
with mock.patch(
"sentry.workflow_engine.tasks.workflows.process_workflow_activity.delay"
) as mock_delay:
workflow_status_update_handler(group, message, activity)
mock_delay.assert_called_once_with(
activity_id=activity.id,
group_id=group.id,
detector_id=detector.id,
)
def test_dual_processing(self) -> None:
detector = self.create_detector(project=self.project)
group = self.create_group(project=self.project, type=MetricIssue.type_id)
activity = Activity(
project=self.project,
group=group,
type=ActivityType.SET_RESOLVED.value,
data={"fingerprint": ["test_fingerprint"]},
)
message = StatusChangeMessageData(
id="test_message_id",
project_id=self.project.id,
new_status=GroupStatus.RESOLVED,
new_substatus=None,
fingerprint=["test_fingerprint"],
detector_id=detector.id,
activity_data={"test": "test"},
)
with mock.patch(
"sentry.workflow_engine.tasks.workflows.process_workflow_activity.delay"
) as mock_delay:
workflow_status_update_handler(group, message, activity)
mock_delay.assert_called_once_with(
activity_id=activity.id,
group_id=group.id,
detector_id=detector.id,
)
|
WorkflowStatusUpdateHandlerTests
|
python
|
nedbat__coveragepy
|
tests/test_arcs.py
|
{
"start": 38459,
"end": 42094
}
|
class ____(CoverageTest):
"""Arc tests for generators."""
def test_yield_in_loop(self) -> None:
self.check_coverage(
"""\
def gen(inp):
for n in inp:
yield n
list(gen([1,2,3]))
""",
branchz="23 2-1",
branchz_missing="",
)
def test_padded_yield_in_loop(self) -> None:
self.check_coverage(
"""\
def gen(inp):
i = 2
for n in inp:
i = 4
yield n
i = 6
i = 7
list(gen([1,2,3]))
""",
branchz="34 37",
branchz_missing="",
)
def test_bug_308(self) -> None:
self.check_coverage(
"""\
def run():
for i in range(10):
yield lambda: i
for f in run():
print(f())
""",
branchz="23 2. 56 5.",
branchz_missing="",
)
self.check_coverage(
"""\
def run():
yield lambda: 100
for i in range(10):
yield lambda: i
for f in run():
print(f())
""",
branchz="34 3. 67 6.",
branchz_missing="",
)
self.check_coverage(
"""\
def run():
yield lambda: 100 # no branch miss
for f in run():
print(f())
""",
branchz="45 4.",
branchz_missing="",
)
def test_bug_324(self) -> None:
# This code is tricky: the list() call pulls all the values from gen(),
# but each of them is a generator itself that is never iterated. As a
# result, the generator expression on line 3 is never entered or run.
self.check_coverage(
"""\
def gen(inp):
for n in inp:
yield (i * 2 for i in range(n))
list(gen([1,2,3]))
""",
branchz="23 2.",
branchz_missing="",
)
def test_coroutines(self) -> None:
self.check_coverage(
"""\
def double_inputs():
while len([1]): # avoid compiler differences
x = yield
x *= 2
yield x
gen = double_inputs()
next(gen)
print(gen.send(10))
next(gen)
print(gen.send(6))
""",
branchz="23 2-1",
branchz_missing="2-1",
)
assert self.stdout() == "20\n12\n"
def test_yield_from(self) -> None:
self.check_coverage(
"""\
def gen(inp):
i = 2
for n in inp:
i = 4
yield from range(3)
i = 6
i = 7
list(gen([1,2,3]))
""",
branchz="34 37",
branchz_missing="",
)
def test_abandoned_yield(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/440
self.check_coverage(
"""\
def gen():
print(2)
yield 3
print(4)
print(next(gen()))
""",
lines=[1, 2, 3, 4, 6],
missing="4",
branchz="",
branchz_missing="",
)
assert self.stdout() == "2\n3\n"
|
YieldTest
|
python
|
ray-project__ray
|
rllib/examples/envs/classes/memory_leaking_env.py
|
{
"start": 182,
"end": 915
}
|
class ____(RandomEnv):
"""An env that leaks very little memory.
Useful for proving that our memory-leak tests can catch the
slightest leaks.
"""
def __init__(self, config=None):
super().__init__(config)
self._leak = {}
self._steps_after_reset = 0
@override(RandomEnv)
def reset(self, *, seed=None, options=None):
self._steps_after_reset = 0
return super().reset(seed=seed, options=options)
@override(RandomEnv)
def step(self, action):
self._steps_after_reset += 1
# Only leak once an episode.
if self._steps_after_reset == 2:
self._leak[uuid.uuid4().hex.upper()] = 1
return super().step(action)
|
MemoryLeakingEnv
|
python
|
gevent__gevent
|
src/greentest/3.14/test_thread.py
|
{
"start": 13300,
"end": 14691
}
|
class ____(unittest.TestCase):
def setUp(self):
self.read_fd, self.write_fd = os.pipe()
@support.requires_fork()
@threading_helper.reap_threads
def test_forkinthread(self):
pid = None
def fork_thread(read_fd, write_fd):
nonlocal pid
# Ignore the warning about fork with threads.
with warnings.catch_warnings(category=DeprecationWarning,
action="ignore"):
# fork in a thread (DANGER, undefined per POSIX)
if (pid := os.fork()):
# parent process
return
# child process
try:
os.close(read_fd)
os.write(write_fd, b"OK")
finally:
os._exit(0)
with threading_helper.wait_threads_exit():
thread.start_new_thread(fork_thread, (self.read_fd, self.write_fd))
self.assertEqual(os.read(self.read_fd, 2), b"OK")
os.close(self.write_fd)
self.assertIsNotNone(pid)
support.wait_process(pid, exitcode=0)
def tearDown(self):
try:
os.close(self.read_fd)
except OSError:
pass
try:
os.close(self.write_fd)
except OSError:
pass
if __name__ == "__main__":
unittest.main()
|
TestForkInThread
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets/concepts/partitions_schedules_sensors/static_partitioned_job.py
|
{
"start": 400,
"end": 653
}
|
class ____(Config):
continent_name: str
@op
def continent_op(context: OpExecutionContext, config: ContinentOpConfig):
context.log.info(config.continent_name)
@job(config=continent_config)
def continent_job():
continent_op()
|
ContinentOpConfig
|
python
|
keras-team__keras
|
keras/src/trainers/data_adapters/py_dataset_adapter_test.py
|
{
"start": 2466,
"end": 2915
}
|
class ____(py_dataset_adapter.PyDataset):
@property
def num_batches(self):
return 4
def __getitem__(self, index):
if index < 2:
return (
np.random.random((8, 4)).astype("float32"),
np.random.random((8, 2)).astype("float32"),
)
raise ValueError("Expected exception")
@pytest.mark.skipif(testing.tensorflow_uses_gpu(), reason="Flaky on GPU")
|
ExceptionPyDataset
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/datafusion.py
|
{
"start": 18800,
"end": 22500
}
|
class ____(GoogleCloudBaseOperator):
"""
Creates a Cloud Data Fusion pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionCreatePipelineOperator`
:param pipeline_name: Your pipeline name.
:param pipeline: The pipeline definition. For more information check:
https://docs.cdap.io/cdap/current/en/developer-manual/pipelines/developing-pipelines.html#pipeline-configuration-file-format
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
operator_extra_links = (DataFusionPipelineLink(),)
template_fields: Sequence[str] = (
"instance_name",
"pipeline_name",
"impersonation_chain",
)
def __init__(
self,
*,
pipeline_name: str,
pipeline: dict[str, Any],
instance_name: str,
location: str,
namespace: str = "default",
project_id: str = PROVIDE_PROJECT_ID,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pipeline_name = pipeline_name
self.pipeline = pipeline
self.namespace = namespace
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating Data Fusion pipeline: %s", self.pipeline_name)
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
api_url = instance["apiEndpoint"]
hook.create_pipeline(
pipeline_name=self.pipeline_name,
pipeline=self.pipeline,
instance_url=api_url,
namespace=self.namespace,
)
DataFusionPipelineLink.persist(
context=context,
uri=instance["serviceEndpoint"],
pipeline_name=self.pipeline_name,
namespace=self.namespace,
)
self.log.info("Pipeline %s created", self.pipeline_name)
|
CloudDataFusionCreatePipelineOperator
|
python
|
jazzband__django-oauth-toolkit
|
tests/test_password.py
|
{
"start": 1308,
"end": 2708
}
|
class ____(BaseTest):
def test_get_token(self):
"""
Request an access token using Resource Owner Password Flow
"""
token_request_data = {
"grant_type": "password",
"username": "test_user",
"password": "123456",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(set(content["scope"].split()), {"read", "write"})
self.assertEqual(content["expires_in"], self.oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_bad_credentials(self):
"""
Request an access token using Resource Owner Password Flow
"""
token_request_data = {
"grant_type": "password",
"username": "test_user",
"password": "NOT_MY_PASS",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
|
TestPasswordTokenView
|
python
|
kamyu104__LeetCode-Solutions
|
Python/reconstruct-original-digits-from-english.py
|
{
"start": 62,
"end": 901
}
|
class ____(object):
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
# The count of each char in each number string.
cnts = [Counter(_) for _ in ["zero", "one", "two", "three", \
"four", "five", "six", "seven", \
"eight", "nine"]]
# The order for greedy method.
order = [0, 2, 4, 6, 8, 1, 3, 5, 7, 9]
# The unique char in the order.
unique_chars = ['z', 'o', 'w', 't', 'u', \
'f', 'x', 's', 'g', 'n']
cnt = Counter(list(s))
res = []
for i in order:
while cnt[unique_chars[i]] > 0:
cnt -= cnts[i]
res.append(i)
res.sort()
return "".join(map(str, res))
|
Solution
|
python
|
facelessuser__soupsieve
|
tests/test_level4/test_required.py
|
{
"start": 53,
"end": 877
}
|
class ____(util.TestCase):
"""Test required selectors."""
MARKUP = """
<form>
<input id="1" type="name" required>
<input id="2" type="checkbox" required>
<input id="3" type="email">
<textarea id="4" name="name" cols="30" rows="10" required></textarea>
<select id="5" name="nm" required>
<!-- options -->
</select>
</form>
"""
def test_required(self):
"""Test required."""
self.assert_selector(
self.MARKUP,
":required",
['1', '2', '4', '5'],
flags=util.HTML
)
def test_specific_required(self):
"""Test specific required."""
self.assert_selector(
self.MARKUP,
"input:required",
['1', '2'],
flags=util.HTML
)
|
TestRequired
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/linalg/linear_operator_test_util.py
|
{
"start": 38408,
"end": 40409
}
|
class ____(
LinearOperatorDerivedClassTest, metaclass=abc.ABCMeta):
"""Base test class appropriate for square operators.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@staticmethod
def operator_shapes_infos():
shapes_info = OperatorShapesInfo
# non-batch operators (n, n) and batch operators.
return [
shapes_info((0, 0)),
shapes_info((1, 1)),
shapes_info((1, 3, 3)),
shapes_info((3, 4, 4)),
shapes_info((2, 1, 4, 4))]
def make_rhs(self, operator, adjoint, with_batch=True):
# This operator is square, so rhs and x will have same shape.
# adjoint value makes no difference because the operator shape doesn't
# change since it is square, but be pedantic.
return self.make_x(operator, adjoint=not adjoint, with_batch=with_batch)
def make_x(self, operator, adjoint, with_batch=True):
# Value of adjoint makes no difference because the operator is square.
# Return the number of systems to solve, R, equal to 1 or 2.
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
n = operator.domain_dimension.value
if with_batch:
x_shape = batch_shape + [n, r]
else:
x_shape = [n, r]
else:
batch_shape = operator.batch_shape_tensor()
n = operator.domain_dimension_tensor()
if with_batch:
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
else:
x_shape = [n, r]
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
|
SquareLinearOperatorDerivedClassTest
|
python
|
scipy__scipy
|
scipy/_lib/_testutils.py
|
{
"start": 3935,
"end": 12279
}
|
class ____:
'''
These are situations that can be tested in our pythran tests:
- A function with multiple array arguments and then
other positional and keyword arguments.
- A function with array-like keywords (e.g. `def somefunc(x0, x1=None)`.
Note: list/tuple input is not yet tested!
`self.arguments`: A dictionary which key is the index of the argument,
value is tuple(array value, all supported dtypes)
`self.partialfunc`: A function used to freeze some non-array argument
that of no interests in the original function
'''
ALL_INTEGER = [np.int8, np.int16, np.int32, np.int64, np.intc, np.intp]
ALL_FLOAT = [np.float32, np.float64]
ALL_COMPLEX = [np.complex64, np.complex128]
def setup_method(self):
self.arguments = {}
self.partialfunc = None
self.expected = None
def get_optional_args(self, func):
# get optional arguments with its default value,
# used for testing keywords
signature = inspect.signature(func)
optional_args = {}
for k, v in signature.parameters.items():
if v.default is not inspect.Parameter.empty:
optional_args[k] = v.default
return optional_args
def get_max_dtype_list_length(self):
# get the max supported dtypes list length in all arguments
max_len = 0
for arg_idx in self.arguments:
cur_len = len(self.arguments[arg_idx][1])
if cur_len > max_len:
max_len = cur_len
return max_len
def get_dtype(self, dtype_list, dtype_idx):
# get the dtype from dtype_list via index
# if the index is out of range, then return the last dtype
if dtype_idx > len(dtype_list)-1:
return dtype_list[-1]
else:
return dtype_list[dtype_idx]
def test_all_dtypes(self):
for type_idx in range(self.get_max_dtype_list_length()):
args_array = []
for arg_idx in self.arguments:
new_dtype = self.get_dtype(self.arguments[arg_idx][1],
type_idx)
args_array.append(self.arguments[arg_idx][0].astype(new_dtype))
self.pythranfunc(*args_array)
def test_views(self):
args_array = []
for arg_idx in self.arguments:
args_array.append(self.arguments[arg_idx][0][::-1][::-1])
self.pythranfunc(*args_array)
def test_strided(self):
args_array = []
for arg_idx in self.arguments:
args_array.append(np.repeat(self.arguments[arg_idx][0],
2, axis=0)[::2])
self.pythranfunc(*args_array)
def _pytest_has_xdist():
"""
Check if the pytest-xdist plugin is installed, providing parallel tests
"""
# Check xdist exists without importing, otherwise pytests emits warnings
from importlib.util import find_spec
return find_spec('xdist') is not None
def check_free_memory(free_mb):
"""
Check *free_mb* of memory is available, otherwise do pytest.skip
"""
import pytest
try:
mem_free = _parse_size(os.environ['SCIPY_AVAILABLE_MEM'])
msg = '{} MB memory required, but environment SCIPY_AVAILABLE_MEM={}'.format(
free_mb, os.environ['SCIPY_AVAILABLE_MEM'])
except KeyError:
mem_free = _get_mem_available()
if mem_free is None:
pytest.skip("Could not determine available memory; set SCIPY_AVAILABLE_MEM "
"variable to free memory in MB to run the test.")
msg = f'{free_mb} MB memory required, but {mem_free/1e6} MB available'
if mem_free < free_mb * 1e6:
pytest.skip(msg)
def _parse_size(size_str):
suffixes = {'': 1e6,
'b': 1.0,
'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,
'kb': 1e3, 'Mb': 1e6, 'Gb': 1e9, 'Tb': 1e12,
'kib': 1024.0, 'Mib': 1024.0**2, 'Gib': 1024.0**3, 'Tib': 1024.0**4}
m = re.match(r'^\s*(\d+)\s*({})\s*$'.format('|'.join(suffixes.keys())),
size_str,
re.I)
if not m or m.group(2) not in suffixes:
raise ValueError("Invalid size string")
return float(m.group(1)) * suffixes[m.group(2)]
def _get_mem_available():
"""
Get information about memory available, not counting swap.
"""
try:
import psutil
return psutil.virtual_memory().available
except (ImportError, AttributeError):
pass
if sys.platform.startswith('linux'):
info = {}
with open('/proc/meminfo') as f:
for line in f:
p = line.split()
info[p[0].strip(':').lower()] = float(p[1]) * 1e3
if 'memavailable' in info:
# Linux >= 3.14
return info['memavailable']
else:
return info['memfree'] + info['cached']
return None
def _test_cython_extension(tmp_path, srcdir):
"""
Helper function to test building and importing Cython modules that
make use of the Cython APIs for BLAS, LAPACK, optimize, and special.
"""
import pytest
try:
subprocess.check_call(["meson", "--version"])
except FileNotFoundError:
pytest.skip("No usable 'meson' found")
# Make safe for being called by multiple threads within one test
tmp_path = tmp_path / str(threading.get_ident())
# build the examples in a temporary directory
mod_name = os.path.split(srcdir)[1]
shutil.copytree(srcdir, tmp_path / mod_name)
build_dir = tmp_path / mod_name / 'tests' / '_cython_examples'
target_dir = build_dir / 'build'
os.makedirs(target_dir, exist_ok=True)
# Ensure we use the correct Python interpreter even when `meson` is
# installed in a different Python environment (see numpy#24956)
native_file = str(build_dir / 'interpreter-native-file.ini')
with open(native_file, 'w') as f:
f.write("[binaries]\n")
f.write(f"python = '{sys.executable}'")
if sys.platform == "win32":
subprocess.check_call(["meson", "setup",
"--buildtype=release",
"--native-file", native_file,
"--vsenv", str(build_dir)],
cwd=target_dir,
)
else:
subprocess.check_call(["meson", "setup",
"--native-file", native_file, str(build_dir)],
cwd=target_dir
)
subprocess.check_call(["meson", "compile", "-vv"], cwd=target_dir)
# import without adding the directory to sys.path
suffix = sysconfig.get_config_var('EXT_SUFFIX')
def load(modname):
so = (target_dir / modname).with_suffix(suffix)
spec = spec_from_file_location(modname, so)
mod = module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
# test that the module can be imported
return load("extending"), load("extending_cpp")
def _run_concurrent_barrier(n_workers, fn, *args, **kwargs):
"""
Run a given function concurrently across a given number of threads.
This is equivalent to using a ThreadPoolExecutor, but using the threading
primitives instead. This function ensures that the closure passed by
parameter gets called concurrently by setting up a barrier before it gets
called before any of the threads.
Arguments
---------
n_workers: int
Number of concurrent threads to spawn.
fn: callable
Function closure to execute concurrently. Its first argument will
be the thread id.
*args: tuple
Variable number of positional arguments to pass to the function.
**kwargs: dict
Keyword arguments to pass to the function.
"""
barrier = threading.Barrier(n_workers)
def closure(i, *args, **kwargs):
barrier.wait()
fn(i, *args, **kwargs)
workers = []
for i in range(0, n_workers):
workers.append(threading.Thread(
target=closure,
args=(i,) + args, kwargs=kwargs))
for worker in workers:
worker.start()
for worker in workers:
worker.join()
|
_TestPythranFunc
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 523627,
"end": 524527
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("completed_iterations", "duration", "iterations", "start_day")
completed_iterations = sgqlc.types.Field(
sgqlc.types.non_null(
sgqlc.types.list_of(
sgqlc.types.non_null("ProjectV2IterationFieldIteration")
)
),
graphql_name="completedIterations",
)
duration = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="duration")
iterations = sgqlc.types.Field(
sgqlc.types.non_null(
sgqlc.types.list_of(
sgqlc.types.non_null("ProjectV2IterationFieldIteration")
)
),
graphql_name="iterations",
)
start_day = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="startDay")
|
ProjectV2IterationFieldConfiguration
|
python
|
huggingface__transformers
|
src/transformers/models/gpt_neox/modeling_gpt_neox.py
|
{
"start": 14113,
"end": 15907
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: GPTNeoXConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = GPTNeoXAttention(config=config, layer_idx=layer_idx)
self.mlp = GPTNeoXMLP(config)
self.input_layernorm = GPTNeoXRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = GPTNeoXRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
|
GPTNeoXDecoderLayer
|
python
|
scikit-learn__scikit-learn
|
sklearn/utils/_encode.py
|
{
"start": 10066,
"end": 11773
}
|
class ____(Counter):
"""Counter with support for nan values."""
def __init__(self, items):
super().__init__(self._generate_items(items))
def _generate_items(self, items):
"""Generate items without nans. Stores the nan counts separately."""
for item in items:
if not is_scalar_nan(item):
yield item
continue
if not hasattr(self, "nan_count"):
self.nan_count = 0
self.nan_count += 1
def __missing__(self, key):
if hasattr(self, "nan_count") and is_scalar_nan(key):
return self.nan_count
raise KeyError(key)
def _get_counts(values, uniques):
"""Get the count of each of the `uniques` in `values`.
The counts will use the order passed in by `uniques`. For non-object dtypes,
`uniques` is assumed to be sorted and `np.nan` is at the end.
"""
if values.dtype.kind in "OU":
counter = _NaNCounter(values)
output = np.zeros(len(uniques), dtype=np.int64)
for i, item in enumerate(uniques):
with suppress(KeyError):
output[i] = counter[item]
return output
unique_values, counts = _unique_np(values, return_counts=True)
# Recorder unique_values based on input: `uniques`
uniques_in_values = np.isin(uniques, unique_values, assume_unique=True)
if np.isnan(unique_values[-1]) and np.isnan(uniques[-1]):
uniques_in_values[-1] = True
unique_valid_indices = np.searchsorted(unique_values, uniques[uniques_in_values])
output = np.zeros_like(uniques, dtype=np.int64)
output[uniques_in_values] = counts[unique_valid_indices]
return output
|
_NaNCounter
|
python
|
walkccc__LeetCode
|
solutions/2915. Length of the Longest Subsequence That Sums to Target/2915.py
|
{
"start": 0,
"end": 651
}
|
class ____:
def lengthOfLongestSubsequence(self, nums: list[int], target: int) -> int:
n = len(nums)
# dp[i][j] := the maximum length of any subsequence of the first i numbers
# that sum to j
dp = [[-1] * (target + 1) for _ in range(n + 1)]
for i in range(n + 1):
dp[i][0] = 0
for i in range(1, n + 1):
num = nums[i - 1]
for j in range(1, target + 1):
# 1. Skip `num`.
if j < num or dp[i - 1][j - num] == -1:
dp[i][j] = dp[i - 1][j]
# 2. Skip `num` or pick `num`.
else:
dp[i][j] = max(dp[i - 1][j], 1 + dp[i - 1][j - num])
return dp[n][target]
|
Solution
|
python
|
spyder-ide__spyder
|
spyder/plugins/completion/api.py
|
{
"start": 13197,
"end": 17401
}
|
class ____:
"""Text document synchronization modes supported by a lsp-server"""
NONE = 0 # Text synchronization is not supported
FULL = 1 # Text synchronization requires all document contents
INCREMENTAL = 2 # Partial text synchronization is supported
# Save options.
SAVE_OPTIONS = {
# The client is supposed to include the content on save.
'includeText': True
}
# Text synchronization capabilities
TEXT_DOCUMENT_SYNC_OPTIONS = {
# Open and close notifications are sent to the server.
'openClose': True,
# Change notifications are sent to the server.
# See TextDocumentSyncKind.NONE, TextDocumentSyncKind.FULL
# and TextDocumentSyncKind.INCREMENTAL.
'change': TextDocumentSyncKind.NONE,
# Will save notifications are sent to the server.
'willSave': False,
# Will save wait until requests are sent to the server.
'willSaveWaitUntil': False,
# Save notifications are sent to the server.
'save': SAVE_OPTIONS
}
# Code completion options
COMPLETION_OPTIONS = {
# The server provides support to resolve additional
# information for a completion item.
'resolveProvider': False,
# The characters that trigger completion automatically.
'triggerCharacters': []
}
# Signature help options
SIGNATURE_HELP_OPTIONS = {
# The characters that trigger signature help automatically.
'triggerCharacters': []
}
# Code lens options
CODE_LENS_OPTIONS = {
# Code lens has a resolve provider as well.
'resolveProvider': False
}
# Format document on type options
DOCUMENT_ON_TYPE_FORMATTING_OPTIONS = {
# A character on which formatting should be triggered, like `}`.
'firstTriggerCharacter': None,
# More trigger characters.
'moreTriggerCharacter': [],
}
# Document link options
DOCUMENT_LINK_OPTIONS = {
# Document links have a resolve provider as well.
'resolveProvider': False
}
# Execute command options.
EXECUTE_COMMAND_OPTIONS = {
# The commands to be executed on the server
'commands': []
}
# Workspace options.
WORKSPACE_OPTIONS = {
# The server has support for workspace folders
'workspaceFolders': {
'supported': False,
'changeNotifications': False
}
}
# Server available capabilites options as defined by the protocol.
SERVER_CAPABILITES = {
# Defines how text documents are synced.
# Is either a detailed structure defining each notification or
# for backwards compatibility the TextDocumentSyncKind number.
'textDocumentSync': TEXT_DOCUMENT_SYNC_OPTIONS,
# The server provides hover support.
'hoverProvider': False,
# The server provides completion support.
'completionProvider': COMPLETION_OPTIONS,
# The server provides signature help support.
'signatureHelpProvider': SIGNATURE_HELP_OPTIONS,
# The server provides goto definition support.
'definitionProvider': False,
# The server provides find references support.
'referencesProvider': False,
# The server provides document highlight support.
'documentHighlightProvider': False,
# The server provides document symbol support.
'documentSymbolProvider': False,
# The server provides workspace symbol support.
'workspaceSymbolProvider': False,
# The server provides code actions.
'codeActionProvider': False,
# The server provides code lens.
'codeLensProvider': CODE_LENS_OPTIONS,
# The server provides document formatting.
'documentFormattingProvider': False,
# The server provides document range formatting.
'documentRangeFormattingProvider': False,
# The server provides document formatting on typing.
'documentOnTypeFormattingProvider': DOCUMENT_ON_TYPE_FORMATTING_OPTIONS,
# The server provides rename support.
'renameProvider': False,
# The server provides document link support.
'documentLinkProvider': DOCUMENT_LINK_OPTIONS,
# The server provides execute command support.
'executeCommandProvider': EXECUTE_COMMAND_OPTIONS,
# Workspace specific server capabillities.
'workspace': WORKSPACE_OPTIONS,
# Experimental server capabilities.
'experimental': None
}
|
TextDocumentSyncKind
|
python
|
spyder-ide__spyder
|
spyder/plugins/variableexplorer/widgets/dataframeeditor.py
|
{
"start": 4947,
"end": 7097
}
|
class ____:
Row = 'row_section'
ColumnAndRest = 'column_section'
# Supported real and complex number types
REAL_NUMBER_TYPES = (float, int, np.int64, np.int32)
COMPLEX_NUMBER_TYPES = (complex, np.complex64, np.complex128)
# Used to convert bool intrance to false since bool('False') will return True
_bool_false = ['false', 'f', '0', '0.', '0.0', ' ']
# Default format for data frames with floats
DEFAULT_FORMAT = '.6g'
# Limit at which dataframe is considered so large that it is loaded on demand
LARGE_SIZE = 5e5
LARGE_NROWS = 1e5
LARGE_COLS = 60
ROWS_TO_LOAD = 500
COLS_TO_LOAD = 40
# Background colours
BACKGROUND_NUMBER_MINHUE = 0.66 # hue for largest number
BACKGROUND_NUMBER_HUERANGE = 0.33 # (hue for smallest) minus (hue for largest)
BACKGROUND_NUMBER_SATURATION = 0.7
BACKGROUND_NUMBER_VALUE = 1.0
BACKGROUND_NUMBER_ALPHA = 0.6
BACKGROUND_NONNUMBER_COLOR = SpyderPalette.COLOR_BACKGROUND_2
BACKGROUND_STRING_ALPHA = 0.05
BACKGROUND_MISC_ALPHA = 0.3
# =============================================================================
# ---- Utility functions
# =============================================================================
def is_any_real_numeric_dtype(dtype) -> bool:
"""
Test whether a Pandas dtype is a real numeric type.
"""
try:
import pandas.api.types
return pandas.api.types.is_any_real_numeric_dtype(dtype)
except Exception:
# Pandas version 1
return dtype in REAL_NUMBER_TYPES
def bool_false_check(value):
"""
Used to convert bool entrance to false.
Needed since any string in bool('') will return True.
"""
if value.lower() in _bool_false:
value = ''
return value
def global_max(col_vals, index):
"""Returns the global maximum and minimum."""
col_vals_without_None = [x for x in col_vals if x is not None]
max_col, min_col = zip(*col_vals_without_None)
return max(max_col), min(min_col)
# =============================================================================
# ---- Main classes
# =============================================================================
|
DataframeEditorToolbarSections
|
python
|
apache__airflow
|
airflow-core/src/airflow/jobs/job.py
|
{
"start": 3002,
"end": 16970
}
|
class ____(Base, LoggingMixin):
"""
The ORM class representing Job stored in the database.
Jobs are processing items with state and duration that aren't task instances.
"""
__tablename__ = "job"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
dag_id: Mapped[str | None] = mapped_column(
String(ID_LEN),
)
state: Mapped[str | None] = mapped_column(String(20))
job_type: Mapped[str | None] = mapped_column(String(30))
start_date: Mapped[datetime | None] = mapped_column(UtcDateTime())
end_date: Mapped[datetime | None] = mapped_column(UtcDateTime())
latest_heartbeat: Mapped[datetime | None] = mapped_column(UtcDateTime())
executor_class: Mapped[str | None] = mapped_column(String(500))
hostname: Mapped[str | None] = mapped_column(String(500))
unixname: Mapped[str | None] = mapped_column(String(1000))
__table_args__ = (
Index("job_type_heart", job_type, latest_heartbeat),
Index("idx_job_state_heartbeat", state, latest_heartbeat),
Index("idx_job_dag_id", dag_id),
)
task_instances_enqueued = relationship(
"TaskInstance",
primaryjoin="Job.id == foreign(TaskInstance.queued_by_job_id)",
backref=backref("queued_by_job", uselist=False),
)
dag_runs = relationship(
"DagRun",
primaryjoin=lambda: Job.id == foreign(_resolve_dagrun_model().creating_job_id),
backref="creating_job",
)
dag_model = relationship(
"DagModel",
primaryjoin="Job.dag_id == DagModel.dag_id",
viewonly=True,
foreign_keys=[dag_id],
)
"""
TaskInstances which have been enqueued by this Job.
Only makes sense for SchedulerJob.
"""
def __init__(self, executor: BaseExecutor | None = None, heartrate=None, **kwargs):
# Save init parameters as DB fields
self.heartbeat_failed = False
self.hostname = get_hostname()
if executor:
self.executors = [executor]
self.start_date = timezone.utcnow()
self.latest_heartbeat = timezone.utcnow()
self.previous_heartbeat = None
if heartrate is not None:
self.heartrate = heartrate
self.unixname = getuser()
self.max_tis_per_query: int = conf.getint("scheduler", "max_tis_per_query")
try:
get_listener_manager().hook.on_starting(component=self)
except Exception:
self.log.exception("error calling listener")
super().__init__(**kwargs)
@property
def executor(self):
return self.executors[0]
@cached_property
def executors(self):
return ExecutorLoader.init_executors()
@cached_property
def heartrate(self) -> float:
return Job._heartrate(str(self.job_type))
def is_alive(self) -> bool:
"""
Is this job currently alive.
We define alive as in a state of RUNNING, and having sent a heartbeat
within a multiple of the heartrate (default of 2.1)
"""
threshold_value = health_check_threshold(self.job_type, self.heartrate)
return Job._is_alive(
state=self.state,
health_check_threshold_value=threshold_value,
latest_heartbeat=self.latest_heartbeat,
)
@provide_session
def kill(self, session: Session = NEW_SESSION) -> NoReturn:
"""Handle on_kill callback and updates state in database."""
try:
self.on_kill()
except Exception as e:
self.log.error("on_kill() method failed: %s", e)
job = session.scalar(select(Job).where(Job.id == self.id).limit(1))
if job is not None:
job.end_date = timezone.utcnow()
session.merge(job)
session.commit()
raise AirflowException("Job shut down externally.")
def on_kill(self):
"""Will be called when an external kill command is received."""
@provide_session
def heartbeat(
self, heartbeat_callback: Callable[[Session], None], session: Session = NEW_SESSION
) -> None:
"""
Update the job's entry in the database with the latest_heartbeat timestamp.
This allows for the job to be killed externally and allows the system
to monitor what is actually active. For instance, an old heartbeat
for SchedulerJob would mean something is wrong. This also allows for
any job to be killed externally, regardless of who is running it or on
which machine it is running.
Note that if your heart rate is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
:param heartbeat_callback: Callback that will be run when the heartbeat is recorded in the Job
:param session to use for saving the job
"""
previous_heartbeat = self.latest_heartbeat
with DebugTrace.start_span(span_name="heartbeat", component="Job") as span:
try:
span.set_attribute("heartbeat", str(self.latest_heartbeat))
# This will cause it to load from the db
session.merge(self)
previous_heartbeat = self.latest_heartbeat
if self.state == JobState.RESTARTING:
self.kill()
# Figure out how long to sleep for
sleep_for: float = 0
if self.latest_heartbeat:
seconds_remaining = (
self.heartrate - (timezone.utcnow() - self.latest_heartbeat).total_seconds()
)
sleep_for = max(0, seconds_remaining)
if span.is_recording():
span.add_event(name="sleep", attributes={"sleep_for": sleep_for})
sleep(sleep_for)
# Update last heartbeat time
with create_session() as session:
# Make the session aware of this object
session.merge(self)
self.latest_heartbeat = timezone.utcnow()
session.commit()
time_since_last_heartbeat: float = (
0
if previous_heartbeat is None
else (timezone.utcnow() - previous_heartbeat).total_seconds()
)
health_check_threshold_value = health_check_threshold(self.job_type, self.heartrate)
if time_since_last_heartbeat > health_check_threshold_value:
self.log.info("Heartbeat recovered after %.2f seconds", time_since_last_heartbeat)
# At this point, the DB has updated.
previous_heartbeat = self.latest_heartbeat
heartbeat_callback(session)
self.log.debug("[heartbeat]")
self.heartbeat_failed = False
except OperationalError:
Stats.incr(convert_camel_to_snake(self.__class__.__name__) + "_heartbeat_failure", 1, 1)
if not self.heartbeat_failed:
self.log.exception("%s heartbeat failed with error", self.__class__.__name__)
self.heartbeat_failed = True
msg = f"{self.__class__.__name__} heartbeat got an exception"
if span.is_recording():
span.add_event(name="error", attributes={"message": msg})
if self.is_alive():
self.log.error(
"%s heartbeat failed with error. Scheduler may go into unhealthy state",
self.__class__.__name__,
)
msg = f"{self.__class__.__name__} heartbeat failed with error. Scheduler may go into unhealthy state"
if span.is_recording():
span.add_event(name="error", attributes={"message": msg})
else:
msg = f"{self.__class__.__name__} heartbeat failed with error. Scheduler is in unhealthy state"
self.log.error(msg)
if span.is_recording():
span.add_event(name="error", attributes={"message": msg})
# We didn't manage to heartbeat, so make sure that the timestamp isn't updated
self.latest_heartbeat = previous_heartbeat
@provide_session
def prepare_for_execution(self, session: Session = NEW_SESSION):
"""Prepare the job for execution."""
Stats.incr(self.__class__.__name__.lower() + "_start", 1, 1)
self.state = JobState.RUNNING
self.start_date = timezone.utcnow()
session.add(self)
session.commit()
make_transient(self)
@provide_session
def complete_execution(self, session: Session = NEW_SESSION):
try:
get_listener_manager().hook.before_stopping(component=self)
except Exception:
self.log.exception("error calling listener")
self.end_date = timezone.utcnow()
session.merge(self)
session.commit()
Stats.incr(self.__class__.__name__.lower() + "_end", 1, 1)
@provide_session
def most_recent_job(self, session: Session = NEW_SESSION) -> Job | None:
"""Return the most recent job of this type, if any, based on last heartbeat received."""
return most_recent_job(str(self.job_type), session=session)
@staticmethod
def _heartrate(job_type: str) -> float:
if job_type == "TriggererJob":
return conf.getfloat("triggerer", "JOB_HEARTBEAT_SEC")
if job_type == "SchedulerJob":
return conf.getfloat("scheduler", "SCHEDULER_HEARTBEAT_SEC")
# Heartrate used to be hardcoded to scheduler, so in all other
# cases continue to use that value for back compat
return conf.getfloat("scheduler", "JOB_HEARTBEAT_SEC")
@staticmethod
def _is_alive(
state: JobState | str | None,
health_check_threshold_value: float | int,
latest_heartbeat: datetime | None,
) -> bool:
if latest_heartbeat is None:
return False
return (
state == JobState.RUNNING
and (timezone.utcnow() - latest_heartbeat).total_seconds() < health_check_threshold_value
)
@provide_session
def most_recent_job(job_type: str, session: Session = NEW_SESSION) -> Job | None:
"""
Return the most recent job of this type, if any, based on last heartbeat received.
Jobs in "running" state take precedence over others to make sure alive
job is returned if it is available.
:param job_type: job type to query for to get the most recent job for
:param session: Database session
"""
return session.scalar(
select(Job)
.where(Job.job_type == job_type)
.order_by(
# Put "running" jobs at the front.
case({JobState.RUNNING: 0}, value=Job.state, else_=1),
Job.latest_heartbeat.desc(),
)
.limit(1)
)
@provide_session
def run_job(
job: Job, execute_callable: Callable[[], int | None], session: Session = NEW_SESSION
) -> int | None:
"""
Run the job.
The Job is always an ORM object and setting the state is happening within the
same DB session and the session is kept open throughout the whole execution.
:meta private:
"""
job.prepare_for_execution(session=session)
try:
return execute_job(job, execute_callable=execute_callable)
finally:
job.complete_execution(session=session)
def execute_job(job: Job, execute_callable: Callable[[], int | None]) -> int | None:
"""
Execute the job.
Job execution requires no session as generally executing session does not require an
active database connection. The session might be temporary acquired and used if the job
runs heartbeat during execution, but this connection is only acquired for the time of heartbeat
and in case of AIP-44 implementation it happens over the Internal API rather than directly via
the database.
After the job is completed, state of the Job is updated and it should be updated in the database,
which happens in the "complete_execution" step (which again can be executed locally in case of
database operations or over the Internal API call.
:param job: Job to execute - DB job. It does
not really matter, because except of running the heartbeat and state setting,
the runner should not modify the job state.
:param execute_callable: callable to execute when running the job.
:meta private:
"""
ret = None
try:
ret = execute_callable()
# In case of max runs or max duration
job.state = JobState.SUCCESS
except SystemExit:
# In case of ^C or SIGTERM
job.state = JobState.SUCCESS
except Exception:
job.state = JobState.FAILED
raise
return ret
@add_debug_span
def perform_heartbeat(
job: Job, heartbeat_callback: Callable[[Session], None], only_if_necessary: bool
) -> None:
"""
Perform heartbeat for the Job passed to it,optionally checking if it is necessary.
:param job: job to perform heartbeat for
:param heartbeat_callback: callback to run by the heartbeat
:param only_if_necessary: only heartbeat if it is necessary (i.e. if there are things to run for
triggerer for example)
"""
seconds_remaining: float = 0.0
if job.latest_heartbeat and job.heartrate:
seconds_remaining = job.heartrate - (timezone.utcnow() - job.latest_heartbeat).total_seconds()
if seconds_remaining > 0 and only_if_necessary:
return
job.heartbeat(heartbeat_callback=heartbeat_callback)
|
Job
|
python
|
huggingface__transformers
|
tests/models/swin2sr/test_image_processing_swin2sr.py
|
{
"start": 1184,
"end": 3205
}
|
class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_rescale=True,
rescale_factor=1 / 255,
do_pad=True,
size_divisor=8,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_pad = do_pad
self.size_divisor = size_divisor
def prepare_image_processor_dict(self):
return {
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
"size_divisor": self.size_divisor,
}
def expected_output_image_shape(self, images):
img = images[0]
if isinstance(img, Image.Image):
input_width, input_height = img.size
elif isinstance(img, np.ndarray):
input_height, input_width = img.shape[-3:-1]
else:
input_height, input_width = img.shape[-2:]
pad_height = (input_height // self.size_divisor + 1) * self.size_divisor - input_height
pad_width = (input_width // self.size_divisor + 1) * self.size_divisor - input_width
return self.num_channels, input_height + pad_height, input_width + pad_width
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
|
Swin2SRImageProcessingTester
|
python
|
pallets__click
|
src/click/types.py
|
{
"start": 6436,
"end": 6684
}
|
class ____(ParamType):
name = "text"
def convert(
self, value: t.Any, param: Parameter | None, ctx: Context | None
) -> t.Any:
return value
def __repr__(self) -> str:
return "UNPROCESSED"
|
UnprocessedParamType
|
python
|
doocs__leetcode
|
solution/0500-0599/0558.Logical OR of Two Binary Grids Represented as Quad-Trees/Solution.py
|
{
"start": 329,
"end": 1454
}
|
class ____:
def intersect(self, quadTree1: "Node", quadTree2: "Node") -> "Node":
def dfs(t1, t2):
if t1.isLeaf and t2.isLeaf:
return Node(t1.val or t2.val, True)
if t1.isLeaf:
return t1 if t1.val else t2
if t2.isLeaf:
return t2 if t2.val else t1
res = Node()
res.topLeft = dfs(t1.topLeft, t2.topLeft)
res.topRight = dfs(t1.topRight, t2.topRight)
res.bottomLeft = dfs(t1.bottomLeft, t2.bottomLeft)
res.bottomRight = dfs(t1.bottomRight, t2.bottomRight)
isLeaf = (
res.topLeft.isLeaf
and res.topRight.isLeaf
and res.bottomLeft.isLeaf
and res.bottomRight.isLeaf
)
sameVal = (
res.topLeft.val
== res.topRight.val
== res.bottomLeft.val
== res.bottomRight.val
)
if isLeaf and sameVal:
res = res.topLeft
return res
return dfs(quadTree1, quadTree2)
|
Solution
|
python
|
python-openxml__python-docx
|
src/docx/oxml/simpletypes.py
|
{
"start": 5191,
"end": 5496
}
|
class ____(XsdString):
@classmethod
def validate(cls, value: Any) -> None:
cls.validate_string(value)
valid_values = ("page", "column", "textWrapping")
if value not in valid_values:
raise ValueError("must be one of %s, got '%s'" % (valid_values, value))
|
ST_BrType
|
python
|
Netflix__metaflow
|
test/test_config/hellodecos.py
|
{
"start": 481,
"end": 2080
}
|
class ____(MyBaseFlowSpec):
cfg = Config(
"cfg",
default_value={
"args_decorator": "with_args",
"user_retry_decorator": "my_decorators.retry",
"bar": 43,
},
)
@conda(python="3.10.*")
@environment(vars={"FOO": 42})
@step
def start(self):
print("Starting flow")
print("Added decorators: ", self.user_added_step_decorators)
assert self.user_added_step_decorators[0] == "time_step"
self.next(self.m0)
@time_step
@with_args(foo=cfg.bar, bar="baz")
@step
def m0(self):
print("Added decorators: ", self.user_added_step_decorators)
assert self.user_added_step_decorators[0] == "time_step"
assert (
self.user_added_step_decorators[1] == "with_args({'foo': 43, 'bar': 'baz'})"
)
print("m0")
self.next(self.m1)
# Shows how a step can be totally skipped
@SkipStep(skip_steps=["m1"])
@step
def m1(self):
assert False, "This step should not be executed"
self.next(self.m2)
@AddArgsDecorator(bar=cfg.bar, baz="baz")
@AddTimeStep
@step
def m2(self):
print("Added decorators: ", self.user_added_step_decorators)
assert (
self.user_added_step_decorators[0] == "with_args({'bar': 43, 'baz': 'baz'})"
)
assert self.user_added_step_decorators[1] == "time_step"
print("m2")
self.next(self.end)
@step
def end(self):
print("Flow completed successfully")
if __name__ == "__main__":
DecoFlow()
|
DecoFlow
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/automatic_id_none_refresh/tutorial001.py
|
{
"start": 92,
"end": 2084
}
|
class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
print("Before interacting with the database")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
print("After adding to the session")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
session.commit()
print("After committing the session")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
print("After committing the session, show IDs")
print("Hero 1 ID:", hero_1.id)
print("Hero 2 ID:", hero_2.id)
print("Hero 3 ID:", hero_3.id)
print("After committing the session, show names")
print("Hero 1 name:", hero_1.name)
print("Hero 2 name:", hero_2.name)
print("Hero 3 name:", hero_3.name)
session.refresh(hero_1)
session.refresh(hero_2)
session.refresh(hero_3)
print("After refreshing the heroes")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
print("After the session closes")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
def main():
create_db_and_tables()
create_heroes()
if __name__ == "__main__":
main()
|
Hero
|
python
|
keras-team__keras
|
keras/src/initializers/constant_initializers.py
|
{
"start": 1484,
"end": 2476
}
|
class ____(Initializer):
"""Initializer that generates tensors initialized to 0.
Examples:
>>> # Standalone usage:
>>> initializer = Zeros()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = Zeros()
>>> layer = Dense(units=3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes
are supported. If not specified, `keras.backend.floatx()`
is used, which default to `float32` unless you configured it
otherwise (via `keras.backend.set_floatx(float_dtype)`).
"""
dtype = standardize_dtype(dtype)
return ops.zeros(shape, dtype=dtype)
@keras_export(["keras.initializers.Ones", "keras.initializers.ones"])
|
Zeros
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_T.py
|
{
"start": 4310,
"end": 5935
}
|
class ____(Benchmark):
r"""
Trefethen objective function.
This class defines the Trefethen [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Trefethen}}(x) = 0.25 x_{1}^{2} + 0.25 x_{2}^{2}
+ e^{\sin\left(50 x_{1}\right)}
- \sin\left(10 x_{1} + 10 x_{2}\right)
+ \sin\left(60 e^{x_{2}}\right)
+ \sin\left[70 \sin\left(x_{1}\right)\right]
+ \sin\left[\sin\left(80 x_{2}\right)\right]
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -3.3068686474` for
:math:`x = [-0.02440307923, 0.2106124261]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[-0.02440307923, 0.2106124261]]
self.fglob = -3.3068686474
def fun(self, x, *args):
self.nfev += 1
val = 0.25 * x[0] ** 2 + 0.25 * x[1] ** 2
val += exp(sin(50. * x[0])) - sin(10 * x[0] + 10 * x[1])
val += sin(60 * exp(x[1]))
val += sin(70 * sin(x[0]))
val += sin(sin(80 * x[1]))
return val
|
Trefethen
|
python
|
pallets__werkzeug
|
src/werkzeug/exceptions.py
|
{
"start": 19389,
"end": 20422
}
|
class ____(HTTPException):
"""Adds an optional ``retry_after`` parameter which will set the
``Retry-After`` header. May be an :class:`int` number of seconds or
a :class:`~datetime.datetime`.
"""
def __init__(
self,
description: str | None = None,
response: SansIOResponse | None = None,
retry_after: datetime | int | None = None,
) -> None:
super().__init__(description, response)
self.retry_after = retry_after
def get_headers(
self,
environ: WSGIEnvironment | None = None,
scope: dict[str, t.Any] | None = None,
) -> list[tuple[str, str]]:
headers = super().get_headers(environ, scope)
if self.retry_after:
if isinstance(self.retry_after, datetime):
from .http import http_date
value = http_date(self.retry_after)
else:
value = str(self.retry_after)
headers.append(("Retry-After", value))
return headers
|
_RetryAfter
|
python
|
patrick-kidger__equinox
|
equinox/debug/_max_traces.py
|
{
"start": 909,
"end": 964
}
|
class ____:
__slots__ = ("__weakref__",)
|
_Weakrefable
|
python
|
PyCQA__pylint
|
tests/functional/m/match_class_pattern.py
|
{
"start": 238,
"end": 283
}
|
class ____(A):
__match_args__ = ("x", "y")
|
B
|
python
|
numpy__numpy
|
numpy/lib/tests/test_index_tricks.py
|
{
"start": 16575,
"end": 19552
}
|
class ____:
def test_basic(self):
a = np.zeros((3, 3), int)
fill_diagonal(a, 5)
assert_array_equal(
a, np.array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
)
def test_tall_matrix(self):
a = np.zeros((10, 3), int)
fill_diagonal(a, 5)
assert_array_equal(
a, np.array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
)
def test_tall_matrix_wrap(self):
a = np.zeros((10, 3), int)
fill_diagonal(a, 5, True)
assert_array_equal(
a, np.array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5],
[0, 0, 0],
[5, 0, 0],
[0, 5, 0],
[0, 0, 5],
[0, 0, 0],
[5, 0, 0],
[0, 5, 0]])
)
def test_wide_matrix(self):
a = np.zeros((3, 10), int)
fill_diagonal(a, 5)
assert_array_equal(
a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])
)
def test_operate_4d_array(self):
a = np.zeros((3, 3, 3, 3), int)
fill_diagonal(a, 4)
i = np.array([0, 1, 2])
assert_equal(np.where(a != 0), (i, i, i, i))
def test_low_dim_handling(self):
# raise error with low dimensionality
a = np.zeros(3, int)
with assert_raises_regex(ValueError, "at least 2-d"):
fill_diagonal(a, 5)
def test_hetero_shape_handling(self):
# raise error with high dimensionality and
# shape mismatch
a = np.zeros((3, 3, 7, 3), int)
with assert_raises_regex(ValueError, "equal length"):
fill_diagonal(a, 2)
def test_diag_indices():
di = diag_indices(4)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
a[di] = 100
assert_array_equal(
a, np.array([[100, 2, 3, 4],
[5, 100, 7, 8],
[9, 10, 100, 12],
[13, 14, 15, 100]])
)
# Now, we create indices to manipulate a 3-d array:
d3 = diag_indices(2, 3)
# And use it to set the diagonal of a zeros array to 1:
a = np.zeros((2, 2, 2), int)
a[d3] = 1
assert_array_equal(
a, np.array([[[1, 0],
[0, 0]],
[[0, 0],
[0, 1]]])
)
|
TestFillDiagonal
|
python
|
pytest-dev__pytest
|
src/_pytest/timing.py
|
{
"start": 572,
"end": 1611
}
|
class ____:
"""
Represents an instant in time, used to both get the timestamp value and to measure
the duration of a time span.
Inspired by Rust's `std::time::Instant`.
"""
# Creation time of this instant, using time.time(), to measure actual time.
# Note: using a `lambda` to correctly get the mocked time via `MockTiming`.
time: float = dataclasses.field(default_factory=lambda: time(), init=False)
# Performance counter tick of the instant, used to measure precise elapsed time.
# Note: using a `lambda` to correctly get the mocked time via `MockTiming`.
perf_count: float = dataclasses.field(
default_factory=lambda: perf_counter(), init=False
)
def elapsed(self) -> Duration:
"""Measure the duration since `Instant` was created."""
return Duration(start=self, stop=Instant())
def as_utc(self) -> datetime:
"""Instant as UTC datetime."""
return datetime.fromtimestamp(self.time, timezone.utc)
@dataclasses.dataclass(frozen=True)
|
Instant
|
python
|
apache__airflow
|
providers/apache/kafka/tests/unit/apache/kafka/hooks/test_consume.py
|
{
"start": 1068,
"end": 2108
}
|
class ____:
"""
Test consumer hook.
"""
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="kafka_d",
conn_type="kafka",
extra=json.dumps(
{"socket.timeout.ms": 10, "bootstrap.servers": "localhost:9092", "group.id": "test_group"}
),
)
)
create_connection_without_db(
Connection(
conn_id="kafka_bad",
conn_type="kafka",
extra=json.dumps({}),
)
)
self.hook = KafkaConsumerHook(["test_1"], kafka_config_id="kafka_d")
@patch("airflow.providers.apache.kafka.hooks.base.AdminClient")
def test_get_consumer(self, mock_client):
mock_client_spec = MagicMock(spec=AdminClient)
mock_client.return_value = mock_client_spec
assert self.hook.get_consumer() == self.hook.get_conn
|
TestConsumerHook
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 536228,
"end": 536705
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of CreateDiscussion"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "discussion")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
discussion = sgqlc.types.Field("Discussion", graphql_name="discussion")
"""The discussion that was just created."""
|
CreateDiscussionPayload
|
python
|
python__mypy
|
mypy/test/teststubgen.py
|
{
"start": 59251,
"end": 60928
}
|
class ____(unittest.TestCase):
def test_python_module(self) -> None:
with ModuleInspect() as m:
p = m.get_package_properties("inspect")
assert p is not None
assert p.name == "inspect"
assert p.file
assert p.path is None
assert p.is_c_module is False
assert p.subpackages == []
def test_python_package(self) -> None:
with ModuleInspect() as m:
p = m.get_package_properties("unittest")
assert p is not None
assert p.name == "unittest"
assert p.file
assert p.path
assert p.is_c_module is False
assert p.subpackages
assert all(sub.startswith("unittest.") for sub in p.subpackages)
def test_c_module(self) -> None:
with ModuleInspect() as m:
p = m.get_package_properties("_socket")
assert p is not None
assert p.name == "_socket"
assert p.path is None
assert p.is_c_module is True
assert p.subpackages == []
def test_non_existent(self) -> None:
with ModuleInspect() as m:
with self.assertRaises(InspectError) as e:
m.get_package_properties("foobar-non-existent")
assert str(e.exception) == "No module named 'foobar-non-existent'"
def module_to_path(out_dir: str, module: str) -> str:
fnam = os.path.join(out_dir, f"{module.replace('.', '/')}.pyi")
if not os.path.exists(fnam):
alt_fnam = fnam.replace(".pyi", "/__init__.pyi")
if os.path.exists(alt_fnam):
return alt_fnam
return fnam
|
ModuleInspectSuite
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py
|
{
"start": 423,
"end": 2026
}
|
class ____(
object
): # Y040 Do not inherit from "object" explicitly, as it is redundant in Python 3
def __new__(cls, *args: Any, **kwargs: Any) -> Bad:
... # Y034 "__new__" methods usually return "self" at runtime. Consider using "typing_extensions.Self" in "Bad.__new__", e.g. "def __new__(cls, *args: Any, **kwargs: Any) -> Self: ..."
def __repr__(self) -> str:
... # Y029 Defining __repr__ or __str__ in a stub is almost always redundant
def __str__(self) -> builtins.str:
... # Y029 Defining __repr__ or __str__ in a stub is almost always redundant
def __eq__(self, other: Any) -> bool:
... # Y032 Prefer "object" to "Any" for the second parameter in "__eq__" methods
def __ne__(self, other: typing.Any) -> typing.Any:
... # Y032 Prefer "object" to "Any" for the second parameter in "__ne__" methods
def __enter__(self) -> Bad:
... # Y034 "__enter__" methods in classes like "Bad" usually return "self" at runtime. Consider using "typing_extensions.Self" in "Bad.__enter__", e.g. "def __enter__(self) -> Self: ..."
async def __aenter__(self) -> Bad:
... # Y034 "__aenter__" methods in classes like "Bad" usually return "self" at runtime. Consider using "typing_extensions.Self" in "Bad.__aenter__", e.g. "async def __aenter__(self) -> Self: ..."
def __iadd__(self, other: Bad) -> Bad:
... # Y034 "__iadd__" methods in classes like "Bad" usually return "self" at runtime. Consider using "typing_extensions.Self" in "Bad.__iadd__", e.g. "def __iadd__(self, other: Bad) -> Self: ..."
|
Bad
|
python
|
django-compressor__django-compressor
|
compressor/tests/test_filters.py
|
{
"start": 1130,
"end": 7916
}
|
class ____(TestCase):
CHARSET = "utf-8"
def setUp(self):
self.test_precompiler = os.path.join(test_dir, "precompiler.py")
self.setup_infile()
self.cached_precompiler_args = dict(
content=self.content,
charset=self.CHARSET,
filename=self.filename,
mimetype="text/css",
)
def setup_infile(self, filename="static/css/one.css"):
self.filename = os.path.join(test_dir, filename)
with io.open(self.filename, encoding=self.CHARSET) as file:
self.content = file.read()
def test_precompiler_dict_options(self):
command = "%s %s {option}" % (sys.executable, self.test_precompiler)
option = (
"option",
"option",
)
CompilerFilter.options = dict([option])
compiler = CompilerFilter(
content=self.content,
filename=self.filename,
charset=self.CHARSET,
command=command,
)
self.assertIn(option, compiler.options)
def test_precompiler_infile_outfile(self):
command = "%s %s -f {infile} -o {outfile}" % (
sys.executable,
self.test_precompiler,
)
compiler = CompilerFilter(
content=self.content,
filename=self.filename,
charset=self.CHARSET,
command=command,
)
self.assertEqual("body { color:#990; }", compiler.input())
def test_precompiler_infile_with_spaces(self):
self.setup_infile("static/css/filename with spaces.css")
command = "%s %s -f {infile} -o {outfile}" % (
sys.executable,
self.test_precompiler,
)
compiler = CompilerFilter(
content=self.content,
filename=self.filename,
charset=self.CHARSET,
command=command,
)
self.assertEqual("body { color:#424242; }", compiler.input())
def test_precompiler_infile_stdout(self):
command = "%s %s -f {infile}" % (sys.executable, self.test_precompiler)
compiler = CompilerFilter(
content=self.content, filename=None, charset=None, command=command
)
self.assertEqual("body { color:#990; }%s" % os.linesep, compiler.input())
def test_precompiler_stdin_outfile(self):
command = "%s %s -o {outfile}" % (sys.executable, self.test_precompiler)
compiler = CompilerFilter(
content=self.content, filename=None, charset=None, command=command
)
self.assertEqual("body { color:#990; }", compiler.input())
def test_precompiler_stdin_stdout(self):
command = "%s %s" % (sys.executable, self.test_precompiler)
compiler = CompilerFilter(
content=self.content, filename=None, charset=None, command=command
)
self.assertEqual("body { color:#990; }%s" % os.linesep, compiler.input())
def test_precompiler_stdin_stdout_filename(self):
command = "%s %s" % (sys.executable, self.test_precompiler)
compiler = CompilerFilter(
content=self.content,
filename=self.filename,
charset=self.CHARSET,
command=command,
)
self.assertEqual("body { color:#990; }%s" % os.linesep, compiler.input())
def test_precompiler_output_unicode(self):
command = "%s %s" % (sys.executable, self.test_precompiler)
compiler = CompilerFilter(
content=self.content, filename=self.filename, command=command
)
self.assertEqual(type(compiler.input()), str)
def test_precompiler_cache(self):
# The cache may already have data in it depending on the order the tests are
# run, so start by clearing it:
cache.clear()
command = "%s %s -f {infile} -o {outfile}" % (
sys.executable,
self.test_precompiler,
)
compiler = CachedCompilerFilter(command=command, **self.cached_precompiler_args)
self.assertEqual("body { color:#990; }", compiler.input())
# We tell whether the precompiler actually ran by inspecting compiler.infile. If not None, the compiler had to
# write the input out to the file for the external command. If None, it was in the cache and thus skipped.
self.assertIsNotNone(compiler.infile) # Not cached
compiler = CachedCompilerFilter(command=command, **self.cached_precompiler_args)
self.assertEqual("body { color:#990; }", compiler.input())
self.assertIsNone(compiler.infile) # Cached
self.cached_precompiler_args[
"content"
] += " " # Invalidate cache by slightly changing content
compiler = CachedCompilerFilter(command=command, **self.cached_precompiler_args)
self.assertEqual("body { color:#990; }", compiler.input())
self.assertIsNotNone(compiler.infile) # Not cached
@mock.patch("django.core.cache.backends.locmem.LocMemCache.get")
def test_precompiler_cache_issue750(self, mock_cache):
# emulate memcached and return string
mock_cache.side_effect = lambda key: str("body { color:#990; }")
command = "%s %s -f {infile} -o {outfile}" % (
sys.executable,
self.test_precompiler,
)
compiler = CachedCompilerFilter(command=command, **self.cached_precompiler_args)
self.assertEqual("body { color:#990; }", compiler.input())
self.assertEqual(
type(compiler.input()), type(smart_str("body { color:#990; }"))
)
def test_precompiler_not_cacheable(self):
command = "%s %s -f {infile} -o {outfile}" % (
sys.executable,
self.test_precompiler,
)
self.cached_precompiler_args["mimetype"] = "text/different"
compiler = CachedCompilerFilter(command=command, **self.cached_precompiler_args)
self.assertEqual("body { color:#990; }", compiler.input())
self.assertIsNotNone(compiler.infile) # Not cached
compiler = CachedCompilerFilter(command=command, **self.cached_precompiler_args)
self.assertEqual("body { color:#990; }", compiler.input())
self.assertIsNotNone(compiler.infile) # Not cached
def test_precompiler_caches_empty_files(self):
command = "%s %s -f {infile} -o {outfile}" % (
sys.executable,
self.test_precompiler,
)
compiler = CachedCompilerFilter(command=command, **self.cached_precompiler_args)
self.assertEqual("body { color:#990; }", compiler.input())
cache.set(compiler.get_cache_key(), "")
compiler = CachedCompilerFilter(command=command, **self.cached_precompiler_args)
self.assertEqual("", compiler.input())
|
PrecompilerTestCase
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/core/graphql_client.py
|
{
"start": 6634,
"end": 10298
}
|
class ____:
def __init__(
self,
url: str,
session: requests.Session,
headers: Optional[dict[str, Any]] = None,
verify: bool = True,
timeout: int = DEFAULT_TIMEOUT,
cookies: Optional[dict[str, Any]] = None,
proxies: Optional[dict[str, Any]] = None,
max_retries: int = 0,
backoff_factor: float = DEFAULT_BACKOFF_FACTOR,
):
self.url = url
self.headers = headers
self.verify = verify
self.timeout = timeout
self.cookies = cookies
self._session = session
self._proxies = proxies
self._max_retries = max_retries
self._backoff_factor = backoff_factor
@property
def session(self) -> requests.Session:
return self._session
def execute(
self,
query: str,
variable_values: Optional[Mapping[str, Any]] = None,
headers: Optional[Mapping[str, str]] = None,
idempotent_mutation: bool = False,
):
if "mutation " in query and not idempotent_mutation:
all_headers = {
**(self.headers if self.headers is not None else {}),
**(headers if headers is not None else {}),
}
# mutations can be made idempotent if they use Idempotency-Key header
retry_on_read_timeout = bool(all_headers.get("Idempotency-Key"))
else:
retry_on_read_timeout = True
return _retry_loop(
lambda: self._execute_retry(query, variable_values, headers),
max_retries=self._max_retries,
backoff_factor=self._backoff_factor,
retry_on_read_timeout=retry_on_read_timeout,
)
def _execute_retry(
self,
query: str,
variable_values: Optional[Mapping[str, Any]],
headers: Optional[Mapping[str, Any]],
):
response = self._session.post(
self.url,
headers={
**(self.headers if self.headers is not None else {}),
**(headers if headers is not None else {}),
"Content-type": "application/json",
},
cookies=self.cookies,
timeout=self.timeout,
verify=self.verify,
json={
"query": query,
"variables": variable_values if variable_values else {},
},
proxies=self._proxies,
)
try:
result = response.json()
if not isinstance(result, dict):
result = {}
except ValueError:
result = {}
if "errors" not in result and "data" not in result and "maintenance" not in result:
response.raise_for_status()
raise requests.HTTPError("Unexpected GraphQL response", response=response)
if "maintenance" in result:
maintenance_info = result["maintenance"]
raise DagsterCloudMaintenanceException(
message=maintenance_info.get("message"),
timeout=maintenance_info.get("timeout"),
retry_interval=maintenance_info.get("retry_interval"),
)
if "errors" in result:
raise DagsterCloudAgentServerError(f"Error in GraphQL response: {result['errors']}")
return result
def get_agent_headers(config_value: dict[str, Any], scope: DagsterCloudInstanceScope):
return get_dagster_cloud_api_headers(
config_value["agent_token"],
scope=scope,
deployment_name=config_value.get("deployment"),
additional_headers=config_value.get("headers"),
)
|
DagsterCloudGraphQLClient
|
python
|
google__jax
|
tests/api_test.py
|
{
"start": 254419,
"end": 257174
}
|
class ____(jtu.JaxTestCase):
def test_non_jaxtype_arg(self):
# For the test to fail without the invalid JaxType filter we need to pass
# in a valid JaxType that forces the invalid Jaxtype to be raised to an
# abstract value.
def f(not_a_jaxtype, a_jaxtype):
# then Jax needs to try and evaluate the abstractified non-JaxType
if not_a_jaxtype:
return a_jaxtype
return 0
f = api.named_call(f, name="test")
out = jax.jit(f, static_argnums=(0,))("not a Jaxtype", 1)
self.assertEqual(out, 1)
@parameterized.parameters(jax.jit, jax.grad, jax.vmap, jax.remat)
def test_jax_transforms(self, transform):
f = jnp.sum
x = jnp.array([1.])
unnamed_out = transform(f)(x)
named_out = transform(api.named_call(f, name="test"))(x)
self.assertEqual(unnamed_out, named_out)
def test_static_argnums(self):
f = api.named_call(lambda x, y: y if x else None, name="test")
f = jax.jit(f, static_argnums=(0,))
out = f(True, 5)
self.assertEqual(out, 5)
def test_partial_eval(self):
f = api.named_call(lambda x, y: y if x else None, name="test")
f = jax.jit(functools.partial(f, True))
out = f(5)
self.assertEqual(out, 5)
@parameterized.parameters(
[dict(func=func, jit=jit)
for func in ['identity_trivial', 'identity', 'closure_trivial', 'closure',
'asarray', 'device_put']
for jit in jtu.JIT_IMPLEMENTATION
if not (jit._name == "noop" and func in ('identity', 'identity_trivial'))
],
)
def test_integer_overflow(self, jit, func):
funcdict = {
'identity_trivial': lambda x: x, # may hit trivial dispatch path
'identity': lambda x: x + 0,
'closure_trivial': lambda x: jax.jit(lambda: x)(),
'closure': lambda x: jax.jit(lambda: x + 0)(),
'asarray': lambda x: jnp.asarray(x), # add lambdas so no cross-test cache
'device_put': lambda x: api.device_put(x),
}
f = jit(funcdict[func])
int_dtype = dtypes.default_int_dtype()
int_max = np.iinfo(int_dtype).max
int_min = np.iinfo(int_dtype).min
# check before any jit cache entries
self.assertRaises(OverflowError, f, int_max + 1)
self.assertRaises(OverflowError, f, int_min - 1)
self.assertEqual(f(int_max).dtype, int_dtype)
self.assertEqual(f(int_min).dtype, int_dtype)
self.assertAllClose(f(int_max), int_max)
self.assertAllClose(f(int_min), int_min)
# check after any cache entries
self.assertRaises(OverflowError, f, int_max + 1)
self.assertRaises(OverflowError, f, int_min - 1)
if func in ('trivial', 'identity'):
self.assertRaisesRegex(
OverflowError, 'An overflow.*whose argument path is x.', f,
int_max + 1)
|
NamedCallTest
|
python
|
jazzband__django-pipeline
|
pipeline/compressors/closure.py
|
{
"start": 91,
"end": 290
}
|
class ____(SubProcessCompressor):
def compress_js(self, js):
command = (settings.CLOSURE_BINARY, settings.CLOSURE_ARGUMENTS)
return self.execute_command(command, js)
|
ClosureCompressor
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/cloud_storage_transfer_service.py
|
{
"start": 5394,
"end": 7048
}
|
class ____:
"""Helper class for validating transfer job body."""
def __init__(self, body: dict) -> None:
if not body:
raise AirflowException("The required parameter 'body' is empty or None")
self.body = body
def _verify_data_source(self) -> None:
is_gcs = GCS_DATA_SOURCE in self.body[TRANSFER_SPEC]
is_aws_s3 = AWS_S3_DATA_SOURCE in self.body[TRANSFER_SPEC]
is_http = HTTP_DATA_SOURCE in self.body[TRANSFER_SPEC]
sources_count = sum([is_gcs, is_aws_s3, is_http])
if sources_count > 1:
raise AirflowException(
"More than one data source detected. Please choose exactly one data source from: "
"gcsDataSource, awsS3DataSource and httpDataSource."
)
def _restrict_aws_credentials(self) -> None:
aws_transfer = AWS_S3_DATA_SOURCE in self.body[TRANSFER_SPEC]
if aws_transfer and AWS_ACCESS_KEY in self.body[TRANSFER_SPEC][AWS_S3_DATA_SOURCE]:
raise AirflowException(
"AWS credentials detected inside the body parameter (awsAccessKey). This is not allowed, "
"please use Airflow connections to store credentials."
)
def validate_body(self) -> None:
"""
Validate the body.
Checks if body specifies `transferSpec` if yes, then check if AWS credentials
are passed correctly and no more than 1 data source was selected.
:raises: AirflowException
"""
if TRANSFER_SPEC in self.body:
self._restrict_aws_credentials()
self._verify_data_source()
|
TransferJobValidator
|
python
|
getsentry__sentry
|
src/sentry/notifications/notification_action/action_validation.py
|
{
"start": 5369,
"end": 6557
}
|
class ____(BaseActionValidatorHandler):
provider = Action.Type.PAGERDUTY
notify_action_form = PagerDutyNotifyServiceForm
def _get_services(self) -> list[tuple[int, str]]:
organization_integrations = integration_service.get_organization_integrations(
providers=[Action.Type.PAGERDUTY], organization_id=self.organization.id
)
return [
(v["id"], v["service_name"])
for oi in organization_integrations
for v in oi.config.get("pagerduty_services", [])
]
def generate_action_form_payload(self) -> dict[str, Any]:
payload = super().generate_action_form_payload()
return {
**payload,
"services": self._get_services(),
}
def generate_action_form_data(self) -> dict[str, Any]:
return {
"account": self.validated_data["integration_id"],
"service": self.validated_data["config"]["target_identifier"],
}
def update_action_data(self, cleaned_data: dict[str, Any]) -> dict[str, Any]:
return self.validated_data
@action_validator_registry.register(Action.Type.OPSGENIE)
|
PagerdutyActionValidatorHandler
|
python
|
pallets__quart
|
src/quart/ctx.py
|
{
"start": 847,
"end": 4055
}
|
class ____:
"""A base context relating to either request or websockets, bound to the
current task.
Attributes:
app: The app itself.
request_websocket: The request or websocket itself.
url_adapter: An adapter bound to this request.
session: The session information relating to this request.
"""
def __init__(
self,
app: Quart,
request_websocket: BaseRequestWebsocket,
session: SessionMixin | None = None,
) -> None:
self.app = app
self.request_websocket = request_websocket
self.url_adapter = app.create_url_adapter(self.request_websocket)
self.request_websocket.routing_exception = None
self.request_websocket.json_module = app.json
self.session = session
self.preserved = False
self._cv_tokens: list[tuple[Token, AppContext | None]] = []
def copy(self) -> _BaseRequestWebsocketContext:
return self.__class__(self.app, self.request_websocket, self.session)
def match_request(self) -> None:
"""Match the request against the adapter.
Override this method to configure request matching, it should
set the request url_rule and view_args and optionally a
routing_exception.
"""
try:
(
self.request_websocket.url_rule,
self.request_websocket.view_args,
) = self.url_adapter.match( # type: ignore
return_rule=True
) # noqa
except HTTPException as exception:
self.request_websocket.routing_exception = exception
async def push(self) -> None:
raise NotImplementedError()
async def pop(self, exc: BaseException | None) -> None:
raise NotImplementedError()
async def auto_pop(self, exc: BaseException | None) -> None:
if self.request_websocket.scope.get("_quart._preserve_context", False) or (
exc is not None and self.app.config["PRESERVE_CONTEXT_ON_EXCEPTION"]
):
self.preserved = True
else:
await self.pop(exc)
async def __aenter__(self) -> _BaseRequestWebsocketContext:
await self.push()
return self
async def __aexit__(
self, exc_type: type, exc_value: BaseException, tb: TracebackType
) -> None:
await self.auto_pop(exc_value)
async def _push_appctx(self, token: Token) -> None:
app_ctx = _cv_app.get(None)
if app_ctx is None or app_ctx.app is not self.app:
app_ctx = self.app.app_context()
await app_ctx.push()
else:
app_ctx = None
self._cv_tokens.append((token, app_ctx))
async def _push(self) -> None:
if self.session is None:
session_interface = self.app.session_interface
self.session = await self.app.ensure_async(session_interface.open_session)(
self.app, self.request_websocket
)
if self.session is None:
self.session = await session_interface.make_null_session(self.app)
if self.url_adapter is not None:
self.match_request()
|
_BaseRequestWebsocketContext
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-fertile-pyramids-in-a-land.py
|
{
"start": 33,
"end": 852
}
|
class ____(object):
def countPyramids(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
def count(grid, reverse):
def get_grid(i, j):
return grid[~i][j] if reverse else grid[i][j]
result = 0
dp = [0]*len(grid[0])
for i in xrange(1, len(grid)):
new_dp = [0]*len(grid[0])
for j in xrange(1, len(grid[0])-1):
if get_grid(i, j) == get_grid(i-1, j-1) == get_grid(i-1, j) == get_grid(i-1, j+1) == 1:
new_dp[j] = min(dp[j-1], dp[j+1])+1
dp = new_dp
result += sum(dp)
return result
return count(grid, False) + count(grid, True)
# Time: O(m * n)
# Space: O(m * n)
|
Solution
|
python
|
Textualize__textual
|
src/textual/css/_style_properties.py
|
{
"start": 26455,
"end": 30008
}
|
class ____(Generic[EnumType]):
"""Descriptor for getting and setting string properties and ensuring that the set
value belongs in the set of valid values.
Args:
valid_values: The set of valid values that the descriptor can take.
default: The default value (or a factory thereof) of the property.
layout: Whether to refresh the node layout on value change.
refresh_children: Whether to refresh the node children on value change.
display: Does this property change display?
"""
def __init__(
self,
valid_values: set[str],
default: EnumType,
layout: bool = False,
refresh_children: bool = False,
refresh_parent: bool = False,
display: bool = False,
) -> None:
self._valid_values = valid_values
self._default = default
self._layout = layout
self._refresh_children = refresh_children
self._refresh_parent = refresh_parent
self._display = display
def __set_name__(self, owner: StylesBase, name: str) -> None:
self.name = name
def __get__(
self, obj: StylesBase, objtype: type[StylesBase] | None = None
) -> EnumType:
"""Get the string property, or the default value if it's not set.
Args:
obj: The `Styles` object.
objtype: The `Styles` class.
Returns:
The string property value.
"""
return obj.get_rule(self.name, self._default) # type: ignore
def _before_refresh(self, obj: StylesBase, value: str | None) -> None:
"""Do any housekeeping before asking for a layout refresh after a value change."""
def __set__(self, obj: StylesBase, value: EnumType | None = None):
"""Set the string property and ensure it is in the set of allowed values.
Args:
obj: The `Styles` object.
value: The string value to set the property to.
Raises:
StyleValueError: If the value is not in the set of valid values.
"""
_rich_traceback_omit = True
if value is None:
if obj.clear_rule(self.name):
self._before_refresh(obj, value)
obj.refresh(
layout=self._layout,
children=self._refresh_children,
parent=self._refresh_parent,
)
if self._display:
node = obj.node
if node is not None and node.parent:
node._nodes.updated()
else:
if value not in self._valid_values:
raise StyleValueError(
f"{self.name} must be one of {friendly_list(self._valid_values)} (received {value!r})",
help_text=string_enum_help_text(
self.name,
valid_values=list(self._valid_values),
context="inline",
),
)
if obj.set_rule(self.name, value):
if self._display and obj.node is not None:
node = obj.node
if node.parent:
node._nodes.updated()
node.parent._refresh_styles()
self._before_refresh(obj, value)
obj.refresh(
layout=self._layout,
children=self._refresh_children,
parent=self._refresh_parent,
)
|
StringEnumProperty
|
python
|
scipy__scipy
|
scipy/spatial/tests/test_kdtree.py
|
{
"start": 16427,
"end": 17986
}
|
class ____:
def setup_method(self):
self.rect = Rectangle([0, 0], [1, 1])
def test_min_inside(self):
assert_almost_equal(self.rect.min_distance_point([0.5, 0.5]), 0)
def test_min_one_side(self):
assert_almost_equal(self.rect.min_distance_point([0.5, 1.5]), 0.5)
def test_min_two_sides(self):
assert_almost_equal(self.rect.min_distance_point([2, 2]), np.sqrt(2))
def test_max_inside(self):
assert_almost_equal(self.rect.max_distance_point([0.5, 0.5]), 1/np.sqrt(2))
def test_max_one_side(self):
assert_almost_equal(self.rect.max_distance_point([0.5, 1.5]),
np.hypot(0.5, 1.5))
def test_max_two_sides(self):
assert_almost_equal(self.rect.max_distance_point([2, 2]), 2*np.sqrt(2))
def test_split(self):
less, greater = self.rect.split(0, 0.1)
assert_array_equal(less.maxes, [0.1, 1])
assert_array_equal(less.mins, [0, 0])
assert_array_equal(greater.maxes, [1, 1])
assert_array_equal(greater.mins, [0.1, 0])
def test_distance_l2():
assert_almost_equal(minkowski_distance([0, 0], [1, 1], 2), np.sqrt(2))
def test_distance_l1():
assert_almost_equal(minkowski_distance([0, 0], [1, 1], 1), 2)
def test_distance_linf():
assert_almost_equal(minkowski_distance([0, 0], [1, 1], np.inf), 1)
def test_distance_vectorization():
np.random.seed(1234)
x = np.random.randn(10, 1, 3)
y = np.random.randn(1, 7, 3)
assert_equal(minkowski_distance(x, y).shape, (10, 7))
|
Test_rectangle
|
python
|
numba__llvmlite
|
llvmlite/ir/values.py
|
{
"start": 25381,
"end": 28386
}
|
class ____(GlobalValue):
"""Represent a LLVM Function but does uses a Module as parent.
Global Values are stored as a set of dependencies (attribute `depends`).
"""
def __init__(self, module, ftype, name):
assert isinstance(ftype, types.Type)
super(Function, self).__init__(module, ftype.as_pointer(), name=name)
self.ftype = ftype
self.scope = _utils.NameScope()
self.blocks = []
self.attributes = FunctionAttributes()
self.args = tuple([Argument(self, t)
for t in ftype.args])
self.return_value = ReturnValue(self, ftype.return_type)
self.parent.add_global(self)
self.calling_convention = ''
@property
def module(self):
return self.parent
@property
def entry_basic_block(self):
return self.blocks[0]
@property
def basic_blocks(self):
return self.blocks
def append_basic_block(self, name=''):
blk = Block(parent=self, name=name)
self.blocks.append(blk)
return blk
def insert_basic_block(self, before, name=''):
"""Insert block before
"""
blk = Block(parent=self, name=name)
self.blocks.insert(before, blk)
return blk
def descr_prototype(self, buf):
"""
Describe the prototype ("head") of the function.
"""
state = "define" if self.blocks else "declare"
ret = self.return_value
args = ", ".join(str(a) for a in self.args)
name = self.get_reference()
attrs = ' ' + ' '.join(self.attributes._to_list(
self.ftype.return_type)) if self.attributes else ''
if any(self.args):
vararg = ', ...' if self.ftype.var_arg else ''
else:
vararg = '...' if self.ftype.var_arg else ''
linkage = self.linkage
cconv = self.calling_convention
prefix = " ".join(str(x) for x in [state, linkage, cconv, ret] if x)
metadata = self._stringify_metadata()
metadata = ' {}'.format(metadata) if metadata else ''
section = ' section "{}"'.format(self.section) if self.section else ''
pt_str = "{prefix} {name}({args}{vararg}){attrs}{section}{metadata}\n"
prototype = pt_str.format(prefix=prefix, name=name, args=args,
vararg=vararg, attrs=attrs, section=section,
metadata=metadata)
buf.append(prototype)
def descr_body(self, buf):
"""
Describe of the body of the function.
"""
for blk in self.blocks:
blk.descr(buf)
def descr(self, buf):
self.descr_prototype(buf)
if self.blocks:
buf.append("{\n")
self.descr_body(buf)
buf.append("}\n")
def __str__(self):
buf = []
self.descr(buf)
return "".join(buf)
@property
def is_declaration(self):
return len(self.blocks) == 0
|
Function
|
python
|
getsentry__sentry
|
tests/sentry/backup/test_imports.py
|
{
"start": 3862,
"end": 29532
}
|
class ____(ImportTestCase):
"""
Ensure that potentially damaging data is properly scrubbed at import time.
"""
def test_users_sanitized_in_user_scope(self) -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.json")
self.generate_tmp_users_json_file(tmp_path)
with open(tmp_path, "rb") as tmp_file:
import_in_user_scope(tmp_file, printer=NOOP_PRINTER)
with assume_test_silo_mode(SiloMode.CONTROL):
assert User.objects.count() == 4
assert (
User.objects.filter(is_managed=False, is_staff=False, is_superuser=False).count()
== 4
)
# Every user except `max_user` shares an email.
assert Email.objects.count() == 2
# All `UserEmail`s must have their verification status reset in this scope.
assert UserEmail.objects.count() == 4
assert UserEmail.objects.filter(is_verified=True).count() == 0
assert (
UserEmail.objects.filter(
date_hash_added__lt=datetime(2023, 7, 1, 0, 0, tzinfo=UTC)
).count()
== 0
)
assert (
UserEmail.objects.filter(validation_hash="mCnWesSVvYQcq7qXQ36AZHwosAd6cghE").count()
== 0
)
assert User.objects.filter(is_unclaimed=True).count() == 4
assert LostPasswordHash.objects.count() == 4
assert User.objects.filter(is_managed=True).count() == 0
assert User.objects.filter(is_staff=True).count() == 0
assert User.objects.filter(is_superuser=True).count() == 0
assert Authenticator.objects.count() == 0
assert UserPermission.objects.count() == 0
assert UserRole.objects.count() == 0
assert UserRoleUser.objects.count() == 0
def test_users_sanitized_in_organization_scope(self) -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.json")
self.generate_tmp_users_json_file(tmp_path)
with open(tmp_path, "rb") as tmp_file:
import_in_organization_scope(tmp_file, printer=NOOP_PRINTER)
with assume_test_silo_mode(SiloMode.CONTROL):
assert User.objects.count() == 4
assert (
User.objects.filter(is_managed=False, is_staff=False, is_superuser=False).count()
== 4
)
# Every user except `max_user` shares an email.
assert Email.objects.count() == 2
# All `UserEmail`s must have their verification status reset in this scope.
assert UserEmail.objects.count() == 4
assert UserEmail.objects.filter(is_verified=True).count() == 0
assert (
UserEmail.objects.filter(
date_hash_added__lt=datetime(2023, 7, 1, 0, 0, tzinfo=UTC)
).count()
== 0
)
assert (
UserEmail.objects.filter(validation_hash="mCnWesSVvYQcq7qXQ36AZHwosAd6cghE").count()
== 0
)
assert User.objects.filter(is_unclaimed=True).count() == 4
assert LostPasswordHash.objects.count() == 4
assert User.objects.filter(is_managed=True).count() == 0
assert User.objects.filter(is_staff=True).count() == 0
assert User.objects.filter(is_superuser=True).count() == 0
assert Authenticator.objects.count() == 0
assert UserPermission.objects.count() == 0
assert UserRole.objects.count() == 0
assert UserRoleUser.objects.count() == 0
def test_users_unsanitized_in_config_scope(self) -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.json")
self.generate_tmp_users_json_file(tmp_path)
with open(tmp_path, "rb") as tmp_file:
import_in_config_scope(tmp_file, printer=NOOP_PRINTER)
with assume_test_silo_mode(SiloMode.CONTROL):
assert User.objects.count() == 4
assert User.objects.filter(is_unclaimed=True).count() == 4
assert LostPasswordHash.objects.count() == 4
assert User.objects.filter(is_managed=True).count() == 1
assert User.objects.filter(is_staff=True).count() == 2
assert User.objects.filter(is_superuser=True).count() == 2
assert (
User.objects.filter(is_managed=False, is_staff=False, is_superuser=False).count()
== 2
)
assert UserEmail.objects.count() == 4
# Unlike the "global" scope, we do not keep authentication information for the "config"
# scope.
assert Authenticator.objects.count() == 0
# Every user except `max_user` shares an email.
assert Email.objects.count() == 2
# All `UserEmail`s must have their verification status reset in this scope.
assert UserEmail.objects.count() == 4
assert UserEmail.objects.filter(is_verified=True).count() == 0
assert (
UserEmail.objects.filter(
date_hash_added__lt=datetime(2023, 7, 1, 0, 0, tzinfo=UTC)
).count()
== 0
)
assert (
UserEmail.objects.filter(validation_hash="mCnWesSVvYQcq7qXQ36AZHwosAd6cghE").count()
== 0
)
# 1 from `max_user`, 1 from `permission_user`.
assert UserPermission.objects.count() == 2
# 1 from `max_user`.
assert UserRole.objects.count() == 1
assert UserRoleUser.objects.count() == 2
def test_users_unsanitized_in_global_scope(self) -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.json")
self.generate_tmp_users_json_file(tmp_path)
with open(tmp_path, "rb") as tmp_file:
import_in_global_scope(tmp_file, printer=NOOP_PRINTER)
with assume_test_silo_mode(SiloMode.CONTROL):
assert User.objects.count() == 4
# We don't mark `Global`ly imported `User`s unclaimed.
assert User.objects.filter(is_unclaimed=True).count() == 0
assert LostPasswordHash.objects.count() == 0
assert User.objects.filter(is_managed=True).count() == 1
assert User.objects.filter(is_staff=True).count() == 2
assert User.objects.filter(is_superuser=True).count() == 2
assert (
User.objects.filter(is_managed=False, is_staff=False, is_superuser=False).count()
== 2
)
assert UserEmail.objects.count() == 4
# Unlike the "config" scope, we keep authentication information for the "global" scope.
assert Authenticator.objects.count() == 4
# Every user except `max_user` shares an email.
assert Email.objects.count() == 2
# All `UserEmail`s must have their imported verification status reset in this scope.
assert UserEmail.objects.count() == 4
assert UserEmail.objects.filter(is_verified=True).count() == 4
assert (
UserEmail.objects.filter(
date_hash_added__lt=datetime(2023, 7, 1, 0, 0, tzinfo=UTC)
).count()
== 4
)
assert (
UserEmail.objects.filter(validation_hash="mCnWesSVvYQcq7qXQ36AZHwosAd6cghE").count()
== 4
)
# 1 from `max_user`, 1 from `permission_user`.
assert UserPermission.objects.count() == 2
# 1 from `max_user`.
assert UserRole.objects.count() == 1
assert UserRoleUser.objects.count() == 2
def test_generate_suffix_for_already_taken_organization(self) -> None:
owner = self.create_user(email="testing@example.com")
self.create_organization(name="some-org", owner=owner)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = self.export_to_tmp_file_and_clear_database(tmp_dir)
# Note that we have created an organization with the same name as one we are about to
# import.
existing_org = self.create_organization(owner=self.user, name="some-org")
with open(tmp_path, "rb") as tmp_file:
import_in_organization_scope(tmp_file, printer=NOOP_PRINTER)
assert Organization.objects.count() == 2
assert Organization.objects.filter(slug__icontains="some-org").count() == 2
assert Organization.objects.filter(slug__iexact="some-org").count() == 1
imported_organization = Organization.objects.get(slug__icontains="some-org-")
assert imported_organization.id != existing_org.id
org_chunk = RegionImportChunk.objects.get(
model="sentry.organization", min_ordinal=1, max_ordinal=1
)
assert len(org_chunk.inserted_map) == 1
assert len(org_chunk.inserted_identifiers) == 1
for slug in org_chunk.inserted_identifiers.values():
assert slug.startswith("some-org-")
with assume_test_silo_mode(SiloMode.CONTROL):
assert (
OrganizationSlugReservation.objects.filter(
slug__icontains="some-org",
reservation_type=OrganizationSlugReservationType.PRIMARY,
).count()
== 2
)
assert OrganizationSlugReservation.objects.filter(slug__iexact="some-org").count() == 1
# Assert that the slug update RPC has completed and generated a valid matching primary
# slug reservation.
slug_reservation = OrganizationSlugReservation.objects.filter(
slug__icontains="some-org-",
reservation_type=OrganizationSlugReservationType.PRIMARY,
).get()
assert OrganizationMapping.objects.count() == 2
assert OrganizationMapping.objects.filter(slug__icontains="some-org").count() == 2
assert OrganizationMapping.objects.filter(slug__iexact="some-org").count() == 1
org_mapping = OrganizationMapping.objects.get(slug__icontains="some-org-")
assert org_mapping.slug == slug_reservation.slug == imported_organization.slug
assert (
org_mapping.organization_id
== slug_reservation.organization_id
== imported_organization.id
)
def test_generate_suffix_for_already_taken_organization_with_control_option(self) -> None:
with override_options({"hybrid_cloud.control-organization-provisioning": True}):
self.test_generate_suffix_for_already_taken_organization()
def test_generate_suffix_for_already_taken_username(self) -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
self.create_user("min_user")
tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.json")
with open(tmp_path, "wb+") as tmp_file:
models = self.json_of_exhaustive_user_with_minimum_privileges()
tmp_file.write(orjson.dumps(self.sort_in_memory_json(models)))
# Import twice, to check that new suffixes are assigned both times.
with open(tmp_path, "rb") as tmp_file:
import_in_user_scope(tmp_file, printer=NOOP_PRINTER)
with open(tmp_path, "rb") as tmp_file:
import_in_user_scope(tmp_file, printer=NOOP_PRINTER)
with assume_test_silo_mode(SiloMode.CONTROL):
assert User.objects.count() == 3
assert (
User.objects.filter(username__icontains="min_user")
.values("username")
.distinct()
.count()
== 3
)
assert User.objects.filter(username__iexact="min_user").count() == 1
assert User.objects.filter(username__icontains="min_user-").count() == 2
def test_bad_invalid_user(self) -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.json")
with open(tmp_path, "wb+") as tmp_file:
models = self.json_of_exhaustive_user_with_minimum_privileges()
# Modify all username to be longer than 128 characters.
for model in models:
if model["model"] == "sentry.user":
model["fields"]["username"] = "x" * 129
tmp_file.write(orjson.dumps(models))
with open(tmp_path, "rb") as tmp_file:
with pytest.raises(ImportingError) as err:
import_in_user_scope(tmp_file, printer=NOOP_PRINTER)
assert err.value.context.get_kind() == RpcImportErrorKind.ValidationError
assert err.value.context.on.model == "sentry.user"
@patch("sentry.users.models.userip.geo_by_addr")
def test_good_regional_user_ip_in_global_scope(self, mock_geo_by_addr: MagicMock) -> None:
mock_geo_by_addr.return_value = {
"country_code": "US",
"region": "CA",
"subdivision": "San Francisco",
}
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.json")
with open(tmp_path, "wb+") as tmp_file:
models = self.json_of_exhaustive_user_with_minimum_privileges()
# Modify the UserIP to be in California, USA.
for model in models:
if model["model"] == "sentry.userip":
model["fields"]["ip_address"] = "8.8.8.8"
tmp_file.write(orjson.dumps(models))
with open(tmp_path, "rb") as tmp_file:
import_in_global_scope(tmp_file, printer=NOOP_PRINTER)
with assume_test_silo_mode(SiloMode.CONTROL):
assert UserIP.objects.count() == 1
assert UserIP.objects.filter(ip_address="8.8.8.8").exists()
assert UserIP.objects.filter(country_code="US").exists()
assert UserIP.objects.filter(region_code="CA").exists()
# Unlike org/user scope, this must NOT be reset.
assert not UserIP.objects.filter(
last_seen__gt=datetime(2023, 7, 1, 0, 0, tzinfo=UTC)
).exists()
assert not UserIP.objects.filter(
first_seen__gt=datetime(2023, 7, 1, 0, 0, tzinfo=UTC)
).exists()
# Regression test for getsentry/self-hosted#2468.
@patch("sentry.users.models.userip.geo_by_addr")
def test_good_multiple_user_ips_per_user_in_global_scope(
self, mock_geo_by_addr: MagicMock
) -> None:
mock_geo_by_addr.return_value = {
"country_code": "US",
"region": "CA",
"subdivision": "San Francisco",
}
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.json")
with open(tmp_path, "wb+") as tmp_file:
models = self.json_of_exhaustive_user_with_minimum_privileges()
# Modify the UserIP to be in California, USA.
for model in models:
if model["model"] == "sentry.userip":
model["fields"]["ip_address"] = "8.8.8.8"
# Add a two copies of the same IP - so the user now has 2 `UserIP` models for the IP
# `8.8.8.9`, 1 for `8.8.8.8`, and 1 for `8.8.8.7`. After import, we would expect to
# only see one model for each IP.
models.append(
{
"model": "sentry.userip",
"pk": 3,
"fields": {
"user": 2,
"ip_address": "8.8.8.9",
"country_code": "US",
"region_code": "CA",
"first_seen": "2013-04-05T03:29:45.000Z",
"last_seen": "2013-04-05T03:29:45.000Z",
},
}
)
models.append(
{
"model": "sentry.userip",
"pk": 4,
"fields": {
"user": 2,
"ip_address": "8.8.8.9",
"country_code": "CA", # Incorrect value - importing should fix this.
"region_code": "BC", # Incorrect value - importing should fix this.
"first_seen": "2014-04-05T03:29:45.000Z",
"last_seen": "2014-04-05T03:29:45.000Z",
},
}
)
models.append(
{
"model": "sentry.userip",
"pk": 4,
"fields": {
"user": 2,
"ip_address": "8.8.8.7",
"country_code": None, # Unknown value - importing should fix this.
"region_code": None, # Unknown value - importing should fix this.
"first_seen": "2014-04-05T03:29:45.000Z",
"last_seen": "2014-04-05T03:29:45.000Z",
},
}
)
tmp_file.write(orjson.dumps(self.sort_in_memory_json(models)))
with open(tmp_path, "rb") as tmp_file:
import_in_global_scope(tmp_file, printer=NOOP_PRINTER)
with assume_test_silo_mode(SiloMode.CONTROL):
assert UserIP.objects.count() == 3
assert UserIP.objects.filter(ip_address="8.8.8.9").count() == 1
assert UserIP.objects.filter(ip_address="8.8.8.8").count() == 1
assert UserIP.objects.filter(ip_address="8.8.8.7").count() == 1
assert UserIP.objects.filter(country_code="US").count() == 3
assert UserIP.objects.filter(region_code="CA").count() == 3
def test_bad_invalid_user_ip(self) -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.json")
with open(tmp_path, "wb+") as tmp_file:
models = self.json_of_exhaustive_user_with_minimum_privileges()
# Modify the IP address to be in invalid.
for m in models:
if m["model"] == "sentry.userip":
m["fields"]["ip_address"] = "0.1.2.3.4.5.6.7.8.9.abc.def"
tmp_file.write(orjson.dumps(list(models)))
with open(tmp_path, "rb") as tmp_file:
with pytest.raises(ImportingError) as err:
import_in_global_scope(tmp_file, printer=NOOP_PRINTER)
assert err.value.context.get_kind() == RpcImportErrorKind.ValidationError
assert err.value.context.on.model == "sentry.userip"
# Regression test for getsentry/self-hosted#2571.
def test_good_multiple_useremails_per_user_in_user_scope(self) -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.json")
with open(tmp_path, "wb+") as tmp_file:
models = self.json_of_exhaustive_user_with_minimum_privileges()
# Add two copies (1 verified, 1 not) of the same `UserEmail` - so the user now has 3
# `UserEmail` models, the latter of which have no corresponding `Email` entry.
models.append(
{
"model": "sentry.useremail",
"pk": 100,
"fields": {
"user": 2,
"email": "second@example.com",
"validation_hash": "7jvwev0oc8sFyEyEwfvDAwxidtGzpAov",
"date_hash_added": "2023-06-22T22:59:56.521Z",
"is_verified": True,
},
}
)
models.append(
{
"model": "sentry.useremail",
"pk": 101,
"fields": {
"user": 2,
"email": "third@example.com",
"validation_hash": "",
"date_hash_added": "2023-06-22T22:59:57.521Z",
"is_verified": False,
},
}
)
tmp_file.write(orjson.dumps(self.sort_in_memory_json(models)))
with open(tmp_path, "rb") as tmp_file:
import_in_user_scope(tmp_file, printer=NOOP_PRINTER)
with assume_test_silo_mode(SiloMode.CONTROL):
assert UserEmail.objects.count() == 3
assert UserEmail.objects.values("user").distinct().count() == 1
assert UserEmail.objects.filter(email="testing@example.com").exists()
assert UserEmail.objects.filter(email="second@example.com").exists()
assert UserEmail.objects.filter(email="third@example.com").exists()
# Validations are scrubbed and regenerated in non-global scopes.
assert UserEmail.objects.filter(validation_hash="").count() == 0
assert UserEmail.objects.filter(is_verified=True).count() == 0
# Regression test for getsentry/self-hosted#2571.
def test_good_multiple_useremails_per_user_in_global_scope(self) -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.json")
with open(tmp_path, "wb+") as tmp_file:
models = self.json_of_exhaustive_user_with_minimum_privileges()
# Add two copies (1 verified, 1 not) of the same `UserEmail` - so the user now has 3
# `UserEmail` models, the latter of which have no corresponding `Email` entry.
models.append(
{
"model": "sentry.useremail",
"pk": 100,
"fields": {
"user": 2,
"email": "second@example.com",
"validation_hash": "7jvwev0oc8sFyEyEwfvDAwxidtGzpAov",
"date_hash_added": "2023-06-22T22:59:56.521Z",
"is_verified": True,
},
}
)
models.append(
{
"model": "sentry.useremail",
"pk": 101,
"fields": {
"user": 2,
"email": "third@example.com",
"validation_hash": "",
"date_hash_added": "2023-06-22T22:59:57.521Z",
"is_verified": False,
},
}
)
tmp_file.write(orjson.dumps(self.sort_in_memory_json(models)))
with open(tmp_path, "rb") as tmp_file:
import_in_global_scope(tmp_file, printer=NOOP_PRINTER)
with assume_test_silo_mode(SiloMode.CONTROL):
assert UserEmail.objects.count() == 3
assert UserEmail.objects.values("user").distinct().count() == 1
assert UserEmail.objects.filter(email="testing@example.com").exists()
assert UserEmail.objects.filter(email="second@example.com").exists()
assert UserEmail.objects.filter(email="third@example.com").exists()
# Validation hashes are not touched in the global scope.
assert UserEmail.objects.filter(validation_hash="").count() == 1
assert UserEmail.objects.filter(is_verified=True).count() == 2
def test_bad_invalid_user_option(self) -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.json")
with open(tmp_path, "wb+") as tmp_file:
models = self.json_of_exhaustive_user_with_minimum_privileges()
# Modify the `timezone` option to be in invalid.
for m in models:
if m["model"] == "sentry.useroption" and m["fields"]["key"] == "timezone":
m["fields"]["value"] = '"MiddleEarth/Gondor"'
tmp_file.write(orjson.dumps(list(models)))
with open(tmp_path, "rb") as tmp_file:
with pytest.raises(ImportingError) as err:
import_in_user_scope(tmp_file, printer=NOOP_PRINTER)
assert err.value.context.get_kind() == RpcImportErrorKind.ValidationError
assert err.value.context.on.model == "sentry.useroption"
|
SanitizationTests
|
python
|
sympy__sympy
|
sympy/simplify/hyperexpand.py
|
{
"start": 19497,
"end": 23642
}
|
class ____(Expr):
""" A generalized hypergeometric function. """
def __new__(cls, ap, bq):
obj = super().__new__(cls)
obj.ap = Tuple(*list(map(expand, ap)))
obj.bq = Tuple(*list(map(expand, bq)))
return obj
@property
def args(self):
return (self.ap, self.bq)
@property
def sizes(self):
return (len(self.ap), len(self.bq))
@property
def gamma(self):
"""
Number of upper parameters that are negative integers
This is a transformation invariant.
"""
return sum(bool(x.is_integer and x.is_negative) for x in self.ap)
def _hashable_content(self):
return super()._hashable_content() + (self.ap,
self.bq)
def __call__(self, arg):
return hyper(self.ap, self.bq, arg)
def build_invariants(self):
"""
Compute the invariant vector.
Explanation
===========
The invariant vector is:
(gamma, ((s1, n1), ..., (sk, nk)), ((t1, m1), ..., (tr, mr)))
where gamma is the number of integer a < 0,
s1 < ... < sk
nl is the number of parameters a_i congruent to sl mod 1
t1 < ... < tr
ml is the number of parameters b_i congruent to tl mod 1
If the index pair contains parameters, then this is not truly an
invariant, since the parameters cannot be sorted uniquely mod1.
Examples
========
>>> from sympy.simplify.hyperexpand import Hyper_Function
>>> from sympy import S
>>> ap = (S.Half, S.One/3, S(-1)/2, -2)
>>> bq = (1, 2)
Here gamma = 1,
k = 3, s1 = 0, s2 = 1/3, s3 = 1/2
n1 = 1, n2 = 1, n2 = 2
r = 1, t1 = 0
m1 = 2:
>>> Hyper_Function(ap, bq).build_invariants()
(1, ((0, 1), (1/3, 1), (1/2, 2)), ((0, 2),))
"""
abuckets, bbuckets = sift(self.ap, _mod1), sift(self.bq, _mod1)
def tr(bucket):
bucket = list(bucket.items())
if not any(isinstance(x[0], Mod) for x in bucket):
bucket.sort(key=lambda x: default_sort_key(x[0]))
bucket = tuple([(mod, len(values)) for mod, values in bucket if
values])
return bucket
return (self.gamma, tr(abuckets), tr(bbuckets))
def difficulty(self, func):
""" Estimate how many steps it takes to reach ``func`` from self.
Return -1 if impossible. """
if self.gamma != func.gamma:
return -1
oabuckets, obbuckets, abuckets, bbuckets = [sift(params, _mod1) for
params in (self.ap, self.bq, func.ap, func.bq)]
diff = 0
for bucket, obucket in [(abuckets, oabuckets), (bbuckets, obbuckets)]:
for mod in set(list(bucket.keys()) + list(obucket.keys())):
if (mod not in bucket) or (mod not in obucket) \
or len(bucket[mod]) != len(obucket[mod]):
return -1
l1 = list(bucket[mod])
l2 = list(obucket[mod])
l1.sort()
l2.sort()
for i, j in zip(l1, l2):
diff += abs(i - j)
return diff
def _is_suitable_origin(self):
"""
Decide if ``self`` is a suitable origin.
Explanation
===========
A function is a suitable origin iff:
* none of the ai equals bj + n, with n a non-negative integer
* none of the ai is zero
* none of the bj is a non-positive integer
Note that this gives meaningful results only when none of the indices
are symbolic.
"""
for a in self.ap:
for b in self.bq:
if (a - b).is_integer and (a - b).is_negative is False:
return False
for a in self.ap:
if a == 0:
return False
for b in self.bq:
if b.is_integer and b.is_nonpositive:
return False
return True
|
Hyper_Function
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/core/test_has_props.py
|
{
"start": 10968,
"end": 11945
}
|
class ____(hp.HasProps):
foo = Either(List(Int), Int, default=10)
def test_HasProps_apply_theme_either_simple() -> None:
# check applying multiple themes
c = EitherSimpleDefault()
assert c.foo == 10
theme = dict(foo=20)
c.apply_theme(theme)
assert c.foo == 20
theme = dict(foo=30)
c.apply_theme(theme)
assert c.foo == 30
# check user set before theme
c = EitherSimpleDefault()
theme = dict(foo=30)
c.foo = 50
c.apply_theme(theme)
assert c.foo == 50
# check user set after theme
c = EitherSimpleDefault()
theme = dict(foo=30)
c.apply_theme(theme)
c.foo = 50
assert c.foo == 50
# check user set alt type
c = EitherSimpleDefault()
theme = dict(foo=30)
c.foo = [50]
c.apply_theme(theme)
assert c.foo == [50]
# check themed alt type
c = EitherSimpleDefault()
theme = dict(foo=[100])
c.apply_theme(theme)
assert c.foo == [100]
|
EitherSimpleDefault
|
python
|
ray-project__ray
|
rllib/offline/estimators/tests/test_ope.py
|
{
"start": 2962,
"end": 6409
}
|
class ____(unittest.TestCase):
"""Compilation tests for using OPE both standalone and in an RLlib Algorithm"""
@classmethod
def setUpClass(cls):
ray.init()
seed = 42
np.random.seed(seed)
rllib_dir = Path(__file__).parent.parent.parent.parent
train_data = os.path.join(rllib_dir, "offline/tests/data/cartpole/small.json")
env_name = "CartPole-v1"
cls.gamma = 0.99
n_episodes = 3
cls.q_model_config = {"n_iters": 160}
cls.config_dqn_on_cartpole = (
DQNConfig()
.environment(env=env_name)
.framework("torch")
.env_runners(batch_mode="complete_episodes")
.offline_data(
input_="dataset",
input_config={"format": "json", "paths": train_data},
)
.evaluation(
evaluation_interval=1,
evaluation_duration=n_episodes,
evaluation_num_env_runners=1,
evaluation_duration_unit="episodes",
off_policy_estimation_methods={
"is": {"type": ImportanceSampling, "epsilon_greedy": 0.1},
"wis": {"type": WeightedImportanceSampling, "epsilon_greedy": 0.1},
"dm_fqe": {"type": DirectMethod, "epsilon_greedy": 0.1},
"dr_fqe": {"type": DoublyRobust, "epsilon_greedy": 0.1},
},
)
.resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", 0)))
)
num_env_runners = 4
dsize = num_env_runners * 1024
feature_dim = 64
action_dim = 8
data = {
SampleBatch.OBS: np.random.randn(dsize, 1, feature_dim),
SampleBatch.ACTIONS: np.random.randint(0, action_dim, dsize).reshape(-1, 1),
SampleBatch.REWARDS: np.random.rand(dsize).reshape(-1, 1),
SampleBatch.ACTION_PROB: 1 / action_dim * np.ones((dsize, 1)),
}
cls.train_df = pd.DataFrame({k: list(v) for k, v in data.items()})
cls.train_df["type"] = "SampleBatch"
train_ds = ray.data.from_pandas(cls.train_df).repartition(num_env_runners)
cls.dqn_on_fake_ds = (
DQNConfig()
.environment(
observation_space=gym.spaces.Box(-1, 1, (feature_dim,)),
action_space=gym.spaces.Discrete(action_dim),
)
.env_runners(num_env_runners=num_env_runners)
.framework("torch")
# .env_runners(num_env_runners=num_env_runners)
.offline_data(
input_="dataset",
input_config={"loader_fn": lambda: train_ds},
)
.evaluation(
evaluation_num_env_runners=num_env_runners,
ope_split_batch_by_episode=False,
)
# make the policy deterministic
.training(categorical_distribution_temperature=1e-20)
.debugging(seed=seed)
)
# Read n episodes of data, assuming that one line is one episode.
reader = DatasetReader(read_json(train_data))
batches = [reader.next() for _ in range(n_episodes)]
cls.batch = concat_samples(batches)
cls.n_episodes = len(cls.batch.split_by_episode())
print("Episodes:", cls.n_episodes, "Steps:", cls.batch.count)
@classmethod
def tearDownClass(cls):
ray.shutdown()
|
TestOPE
|
python
|
pytorch__pytorch
|
torch/_dynamo/eval_frame.py
|
{
"start": 41176,
"end": 46513
}
|
class ____(_TorchDynamoContext):
def __init__(self, msg: Optional[str] = None, wrapping: bool = True) -> None:
super().__init__(callback=None)
self.msg = msg
self.wrapping = wrapping
def __call__(self, fn: Callable[..., Any]) -> Callable[..., Any]:
# Earlier this code was in the base class _TorchDynamoContext. But we
# moved it here to have better code organization. For disable, we just
# want the callback to be None. We don't have to check trace_rules or
# create any wrapper.
fn = innermost_fn(fn)
if isinstance(fn, torch.nn.Module):
mod = fn
new_mod = OptimizedModule(mod, self)
new_mod._torchdynamo_orig_callable = mod.forward
return new_mod
if isinstance(fn, type):
# User has wrapped the class with compile/disable decorator. Apply
# disable to init/call method.
cls_obj = fn
# Disable on init is useful for reconstruction of bytecodes where we
# want to prevent Dynamo from tracing into the init function. Check
# test_reconstruction in test_model_output.py.
cls_obj.__init__ = self(cls_obj.__init__) # type: ignore[misc]
cls_obj.__call__ = self(cls_obj.__call__)
if issubclass(cls_obj, torch.nn.Module):
# NN module variable tracker directly inlines the _call_impl. Disable it.
# pyrefly: ignore [missing-attribute]
cls_obj._call_impl = self(cls_obj._call_impl)
return cls_obj
assert callable(fn), (
f"A callable function is expected, but {type(fn)} is provided."
)
def _fn(*args: Any, **kwargs: Any) -> Any:
prior = set_eval_frame(None)
try:
_maybe_set_eval_frame(_callback_from_stance(self.callback))
try:
if torch.compiler.is_exporting():
with fx_traceback.annotate(
{
"_torchdynamo_disable": True,
"_torchdynamo_disable_recursive": True,
"_torchdynamo_disable_method": getattr(
fn, "__name__", type(fn).__name__
),
}
):
return fn(*args, **kwargs)
return fn(*args, **kwargs)
finally:
set_eval_frame(None)
finally:
_maybe_set_eval_frame(prior)
# Under some circumstances (e.g. precompile) we can end up calling @disable
# decorator in generated bytecode and trigger recompile. This is due to the
# fact that the old callback from torch.compile() is still active and under
# this circumstance we will trigger a failure with set_stance("fail_on_recompile").
# Therefore we want to skip calling into any frame in this case.
if self.wrapping:
_fn = functools.wraps(fn)(_fn)
_fn._torchdynamo_disable = True # type: ignore[attr-defined]
_fn._torchdynamo_disable_msg = self.msg # type: ignore[attr-defined]
# Save the function pointer to find the original callable while nesting
# of decorators.
_fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined]
_fn._torchdynamo_disable_recursive = True # type: ignore[attr-defined]
return _fn
def __reduce__(self) -> tuple[type[DisableContext], tuple[Any, ...]]:
return (self.__class__, ())
def _optimize_catch_errors(
compile_fn: convert_frame.ConvertFrameProtocol,
hooks: Hooks,
backend_ctx_ctor: Callable[
[], contextlib.AbstractContextManager[Any]
] = null_context,
fullgraph: bool = False,
error_on_graph_break: Optional[bool] = None,
export: bool = False,
dynamic: Optional[bool] = None,
compiler_config: Optional[Any] = None,
rebuild_ctx: Optional[Callable[[], Union[OptimizeContext, _NullDecorator]]] = None,
package: Optional[CompilePackage] = None,
) -> OptimizeContext:
return OptimizeContext(
convert_frame.catch_errors_wrapper(compile_fn, hooks),
backend_ctx_ctor=backend_ctx_ctor,
first_ctx=True,
fullgraph=fullgraph,
error_on_graph_break=error_on_graph_break,
export=export,
dynamic=dynamic,
compiler_config=compiler_config,
rebuild_ctx=rebuild_ctx,
package=package,
hooks=hooks,
)
def get_compiler_fn(
compiler_fn: Union[str, Callable[..., Any], None],
) -> WrapBackendDebug:
from .repro.after_dynamo import wrap_backend_debug
if compiler_fn is None:
# Special case None to avoid crashing in hasattr
compiler_str = None
elif hasattr(compiler_fn, "compiler_name"):
compiler_str = compiler_fn.compiler_name # type: ignore[union-attr]
assert isinstance(compiler_str, str)
elif isinstance(compiler_fn, str):
compiler_str = compiler_fn
else:
compiler_str = None
compiler_fn = lookup_backend(compiler_fn) # type: ignore[arg-type]
return wrap_backend_debug(compiler_fn, compiler_str)
|
DisableContext
|
python
|
pytorch__pytorch
|
torch/_higher_order_ops/effects.py
|
{
"start": 2090,
"end": 10100
}
|
class ____(HigherOrderOperator):
"""
with_effects(token, op, args, kwargs) -> (new_token, op_results)
This HOP helps ensure ordering between side effectful ops like prints or ops
using torchbind objects. This is needed to ensure a traced graph from
AOTAutograd is functional so that future optimization passes do not reorder
these operators. This is done through threading "effect tokens" through the
graph to enforce data dependence between side effectful ops.
The tokens are basically dummy values (torch.tensor([])). We create a token
per "effect type", which are enumerated in the _EffectType enum.
"""
def __init__(self) -> None:
super().__init__("with_effects")
def __call__(
self,
token,
op: OpType,
*args: tuple[Any, ...],
**kwargs: dict[str, Any],
) -> tuple[Any, ...]:
assert isinstance(op, (torch._ops.HigherOrderOperator, torch._ops.OpOverload))
assert not has_aliasing(op), "Ops with aliasing is not supported"
assert isinstance(kwargs, dict)
return super().__call__(token, op, *args, **kwargs)
with_effects = WithEffects()
def has_aliasing(op: OpType):
# NOT FOR PUBLIC USE
if isinstance(op, torch._ops.HigherOrderOperator):
return False
for arg in op._schema.arguments:
if arg.alias_info is not None:
return True
for arg in op._schema.returns:
if arg.alias_info is not None:
return True
return False
def has_effects(op) -> bool:
# Skip over the profiler's RecordFunction as they should not show up in the graph
_skip_ops = {torch.ops.profiler._record_function_exit._RecordFunction}
if op in _skip_ops:
return False
return (
isinstance(op, (torch._ops.HigherOrderOperator, torch._ops.OpOverload))
and not has_aliasing(op)
and _get_effect(op) is not None
)
def new_token_tensor() -> torch.Tensor:
return torch.tensor([])
@with_effects.py_impl(DispatchKey.CompositeExplicitAutograd)
def with_effects_dense(
token: torch.Tensor,
op: torch._ops.OpOverload,
*args: tuple[Any, ...],
**kwargs: dict[str, Any],
) -> tuple[torch.Tensor, ...]:
out = op(*args, **kwargs)
new_token = new_token_tensor()
# [NOTE: with_effects return type]
# Note that we should only do *out for tuple type, but not list type.
# This is to match the schema of the op.
# For tuple output, the length of schema output is the same as the length of out.
# For list output, the length of schema output is 1 (e.g. Tensor[]) regardless of the
# length of the list.
if isinstance(out, tuple):
return (new_token, *out)
return (new_token, out)
@with_effects.py_impl(FakeTensorMode)
def with_effects_fake(
mode,
token: torch.Tensor,
op: torch._ops.OpOverload,
*args: tuple[Any, ...],
**kwargs: dict[str, Any],
) -> tuple[torch.Tensor, ...]:
with mode:
result = with_effects_dense(token, op, *args, **kwargs)
return result
@with_effects.py_impl(ProxyTorchDispatchMode)
def with_effects_proxy(
mode,
token: torch.Tensor,
op: torch._ops.OpOverload,
*args: tuple[Any, ...],
**kwargs: dict[str, Any],
) -> tuple[torch.Tensor, ...]:
with disable_proxy_modes_tracing():
out = with_effects(token, op, *args, **kwargs)
proxy_token = mode.tracer.unwrap_proxy(token)
proxy_args = pytree.tree_map(mode.tracer.unwrap_proxy, args)
proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs)
from torch.fx.node import has_side_effect
# To avoid the being DCEed by graph.eliminate_dead_code if they.
# don't have output or their outputs are not used.
has_side_effect(op)
out_proxy = mode.tracer.create_proxy(
"call_function",
with_effects,
(proxy_token, op, *proxy_args),
proxy_kwargs,
)
result = track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer)
return result
with_effects.fallthrough(DispatchKey.AutogradCPU)
with_effects.fallthrough(DispatchKey.AutogradCUDA)
def _get_schema(op, args, kwargs: Optional[dict] = None) -> torch.FunctionSchema:
if isinstance(op, torch._ops.OpOverload):
return op._schema
elif op == call_torchbind:
return getattr(args[0], args[1]).schema
elif op == hop_print:
# hop_print currently expects (format_str, *kwargs) as its arguments
extra_kwargs = kwargs or {}
return op.gen_schema(*args, **extra_kwargs)
else:
raise RuntimeError(f"Unable to get schema for op {op}")
def handle_effects(
allow_token_discovery: bool,
tokens: dict[_EffectType, torch.Tensor],
op: OpType,
args: tuple[Any, ...],
kwargs: dict[str, Any],
) -> Any:
"""
Args:
allow_token_discovery: Whether or not we are discovering tokens. If this
is true, we will create a token for every side effect type seen that
does not have a token assigned yet. If this is false, the tokens
should've all been created ahead of time, so we will error if there is
no token mapping to every effect type.
tokens: Map of effect type to tokens. This is to chain operators of the
same effects together so that they do not get reordered in later
optimization passes.
"""
# Get a token. We can't do `tokens.get(op, torch.tensor([]))` because
# this will create an empty tensor during proxy mode tracing if the token
# doesn't exist. But the tokens should always exist during proxy mode tracing.
key = _get_effect(op)
assert key is not None
if key not in tokens:
assert allow_token_discovery, (
f"Could not find a token for effect {key} which came from the function {op}"
)
proxy_tensor_mode = torch._C._get_dispatch_mode(
torch._C._TorchDispatchModeKey.PROXY
)
if proxy_tensor_mode is not None:
# If we discovered a new token during tracing, we are in backward.
# Then we patch the graph, adding additional tangents_token as input to the joint graph.
tracer = proxy_tensor_mode.tracer
from torch.fx.experimental.proxy_tensor import (
disable_proxy_modes_tracing,
track_tensor_tree,
)
with disable_proxy_modes_tracing():
token_tensor = new_token_tensor()
token_proxy = proxy_tensor_mode.tracer.create_proxy(
"placeholder", "tangents_token", (), {}, name="tangents_token"
)
track_tensor_tree(token_tensor, token_proxy, constant=None, tracer=tracer)
tokens[key] = token_tensor
else:
tokens[key] = new_token_tensor()
token = tokens[key]
from torch._subclasses.functional_tensor import PythonFunctionalizeAPI
ctx = PythonFunctionalizeAPI()
unwrapped_token = ctx.unwrap_tensors([token])[0]
unwrapped_args = ctx.unwrap_tensors(args)
unwrapped_kwargs = ctx.unwrap_tensors(kwargs) # type: ignore[arg-type]
with ctx.redispatch_to_next():
(new_token, *unwrapped_outs) = with_effects(
unwrapped_token, op, *unwrapped_args, **unwrapped_kwargs
)
schema = _get_schema(op, unwrapped_args, unwrapped_kwargs)
if len(schema.returns) == 0:
assert unwrapped_outs[0] is None
unwrapped_outs = None # type: ignore[assignment]
elif len(schema.returns) == 1:
assert len(unwrapped_outs) == 1
unwrapped_outs = unwrapped_outs[0]
else:
assert len(unwrapped_outs) == len(schema.returns)
# Add the newly created token into the tokens map for a following call to
# use this token.
wrapped_token = ctx.wrap_tensors(new_token)
assert isinstance(wrapped_token, torch.Tensor)
tokens[key] = wrapped_token
# pyrefly: ignore [bad-argument-type]
return ctx.wrap_tensors(unwrapped_outs)
|
WithEffects
|
python
|
doocs__leetcode
|
solution/3300-3399/3311.Construct 2D Grid Matching Graph Layout/Solution.py
|
{
"start": 0,
"end": 1290
}
|
class ____:
def constructGridLayout(self, n: int, edges: List[List[int]]) -> List[List[int]]:
g = [[] for _ in range(n)]
for u, v in edges:
g[u].append(v)
g[v].append(u)
deg = [-1] * 5
for x, ys in enumerate(g):
deg[len(ys)] = x
if deg[1] != -1:
row = [deg[1]]
elif deg[4] == -1:
x = deg[2]
for y in g[x]:
if len(g[y]) == 2:
row = [x, y]
break
else:
x = deg[2]
row = [x]
pre = x
x = g[x][0]
while len(g[x]) > 2:
row.append(x)
for y in g[x]:
if y != pre and len(g[y]) < 4:
pre = x
x = y
break
row.append(x)
ans = [row]
vis = [False] * n
for _ in range(n // len(row) - 1):
for x in row:
vis[x] = True
nxt = []
for x in row:
for y in g[x]:
if not vis[y]:
nxt.append(y)
break
ans.append(nxt)
row = nxt
return ans
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-ascii-delete-sum-for-two-strings.py
|
{
"start": 33,
"end": 767
}
|
class ____(object):
def minimumDeleteSum(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: int
"""
dp = [[0] * (len(s2)+1) for _ in xrange(2)]
for j in xrange(len(s2)):
dp[0][j+1] = dp[0][j] + ord(s2[j])
for i in xrange(len(s1)):
dp[(i+1)%2][0] = dp[i%2][0] + ord(s1[i])
for j in xrange(len(s2)):
if s1[i] == s2[j]:
dp[(i+1)%2][j+1] = dp[i%2][j]
else:
dp[(i+1)%2][j+1] = min(dp[i%2][j+1] + ord(s1[i]), \
dp[(i+1)%2][j] + ord(s2[j]))
return dp[len(s1)%2][-1]
# Time: O(m * n)
# Space: O(m * n)
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-changes-to-make-k-semi-palindromes.py
|
{
"start": 2879,
"end": 3796
}
|
class ____(object):
def minimumChanges(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
def min_dist(left, right): # Time: O(nlogn)
return min(sum(s[left+i] != s[right-((i//d+1)*d-1)+(i%d)] for i in xrange((right-left+1)//2))
for d in divisors[right-left+1])
divisors = [[] for _ in xrange(len(s)+1)]
for i in xrange(1, len(divisors)): # Time: O(nlogn), Space: O(nlogn)
for j in xrange(i+i, len(divisors), i):
divisors[j].append(i)
dp = [[len(s)]*(k+1) for _ in xrange(len(s)+1)]
dp[0][0] = 0
for i in xrange(len(s)): # Time: O(n^2 * nlogn + n^2 * k), Space: O(n * k)
for j in xrange(i):
c = min_dist(j, i)
for l in xrange(k):
dp[i+1][l+1] = min(dp[i+1][l+1], dp[j][l]+c)
return dp[len(s)][k]
|
Solution3
|
python
|
huggingface__transformers
|
src/transformers/integrations/finegrained_fp8.py
|
{
"start": 24943,
"end": 26863
}
|
class ____(ConversionOps):
"""Inverse operation of :class:`Fp8Quantize`. Takes a pair (weight, scale) and reconstructs the fp32 tensor."""
def __init__(self, block_size: tuple[int, int] | None = None):
self.block_size = block_size
def convert(
self,
value: Sequence[torch.Tensor] | dict[str, torch.Tensor],
*,
context: dict[str, Any],
) -> torch.Tensor:
if isinstance(value, dict):
tensors = list(value.values())
else:
tensors = list(value) if isinstance(value, Sequence) else [value]
if len(tensors) != 2:
raise ValueError("Fp8Dequantize expects exactly two tensors: quantized weights and scales.")
quantized, scales = tensors
if not isinstance(quantized, torch.Tensor) or not isinstance(scales, torch.Tensor):
raise TypeError("Fp8Dequantize expects tensors as inputs.")
quantized_fp32 = quantized.to(torch.float32)
rows, cols = quantized_fp32.shape[-2:]
block_size = self.block_size
if block_size is None:
quant_config = context.get("quantization_config")
block_size = getattr(quant_config, "weight_block_size", None)
if block_size is None:
block_size = (rows, cols)
block_m, block_n = block_size
if rows % block_m != 0 or cols % block_n != 0:
raise ValueError(
f"Matrix dimensions ({rows}, {cols}) must be divisible by block sizes ({block_m}, {block_n})."
)
reshaped = quantized_fp32.reshape(-1, rows // block_m, block_m, cols // block_n, block_n)
expanded_scales = scales.to(torch.float32).reshape(-1, rows // block_m, cols // block_n)
expanded_scales = expanded_scales.unsqueeze(-1).unsqueeze(2)
dequantized = reshaped * expanded_scales
return dequantized.reshape(quantized_fp32.shape)
|
Fp8Dequantize
|
python
|
huggingface__transformers
|
tests/models/xlm/test_modeling_xlm.py
|
{
"start": 1396,
"end": 12252
}
|
class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_lengths=True,
use_token_type_ids=True,
use_labels=True,
gelu_activation=True,
sinusoidal_embeddings=False,
causal=False,
asm=False,
n_langs=2,
vocab_size=99,
n_special=0,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=2,
num_choices=4,
summary_type="last",
use_proj=True,
scope=None,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_lengths = use_input_lengths
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.gelu_activation = gelu_activation
self.sinusoidal_embeddings = sinusoidal_embeddings
self.causal = causal
self.asm = asm
self.n_langs = n_langs
self.vocab_size = vocab_size
self.n_special = n_special
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.summary_type = summary_type
self.use_proj = use_proj
self.scope = scope
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_lengths = None
if self.use_input_lengths:
input_lengths = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
sequence_labels = None
token_labels = None
is_impossible_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
is_impossible_labels = ids_tensor([self.batch_size], 2).float()
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def get_config(self):
return XLMConfig(
vocab_size=self.vocab_size,
n_special=self.n_special,
emb_dim=self.hidden_size,
n_layers=self.num_hidden_layers,
n_heads=self.num_attention_heads,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
gelu_activation=self.gelu_activation,
sinusoidal_embeddings=self.sinusoidal_embeddings,
asm=self.asm,
causal=self.causal,
n_langs=self.n_langs,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
summary_type=self.summary_type,
use_proj=self.use_proj,
num_labels=self.num_labels,
bos_token_id=self.bos_token_id,
)
def create_and_check_xlm_model(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = XLMModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, lengths=input_lengths, langs=token_type_ids)
result = model(input_ids, langs=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_xlm_lm_head(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = XLMWithLMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_xlm_simple_qa(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = XLMForQuestionAnsweringSimple(config)
model.to(torch_device)
model.eval()
outputs = model(input_ids)
outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
result = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_xlm_qa(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = XLMForQuestionAnswering(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
result_with_labels = model(
input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
p_mask=input_mask,
)
result_with_labels = model(
input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
)
(total_loss,) = result_with_labels.to_tuple()
result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
(total_loss,) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)
)
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)
)
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def create_and_check_xlm_sequence_classif(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = XLMForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
result = model(input_ids, labels=sequence_labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def create_and_check_xlm_token_classif(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
config.num_labels = self.num_labels
model = XLMForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_xlm_for_multiple_choice(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
config.num_choices = self.num_choices
model = XLMForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
|
XLMModelTester
|
python
|
sphinx-doc__sphinx
|
sphinx/util/build_phase.py
|
{
"start": 105,
"end": 277
}
|
class ____(IntEnum):
"""Build phase of Sphinx application."""
INITIALIZATION = 1
READING = 2
CONSISTENCY_CHECK = 3
RESOLVING = 3
WRITING = 4
|
BuildPhase
|
python
|
allegroai__clearml
|
clearml/backend_api/session/jsonmodels/fields.py
|
{
"start": 4355,
"end": 4441
}
|
class ____(BaseField):
"""String field."""
types = six.string_types
|
StringField
|
python
|
numba__numba
|
numba/tests/test_range.py
|
{
"start": 1130,
"end": 5324
}
|
class ____(unittest.TestCase):
def test_loop1_int16(self):
pyfunc = loop1
cfunc = njit((types.int16,))(pyfunc)
self.assertTrue(cfunc(5), pyfunc(5))
def test_loop2_int16(self):
pyfunc = loop2
cfunc = njit((types.int16, types.int16))(pyfunc)
self.assertTrue(cfunc(1, 6), pyfunc(1, 6))
def test_loop3_int32(self):
pyfunc = loop3
cfunc = njit((types.int32, types.int32, types.int32))(pyfunc)
arglist = [
(1, 2, 1),
(2, 8, 3),
(-10, -11, -10),
(-10, -10, -2),
]
for args in arglist:
self.assertEqual(cfunc(*args), pyfunc(*args))
def test_range_len1(self):
pyfunc = range_len1
typelist = [types.int16, types.int32, types.int64]
arglist = [5, 0, -5]
for typ in typelist:
cfunc = njit((typ,))(pyfunc)
for arg in arglist:
self.assertEqual(cfunc(typ(arg)), pyfunc(typ(arg)))
def test_range_len2(self):
pyfunc = range_len2
typelist = [types.int16, types.int32, types.int64]
arglist = [(1,6), (6,1), (-5, -1)]
for typ in typelist:
cfunc = njit((typ, typ))(pyfunc)
for args in arglist:
args_ = tuple(typ(x) for x in args)
self.assertEqual(cfunc(*args_), pyfunc(*args_))
def test_range_len3(self):
pyfunc = range_len3
typelist = [types.int16, types.int32, types.int64]
arglist = [
(1, 2, 1),
(2, 8, 3),
(-10, -11, -10),
(-10, -10, -2),
]
for typ in typelist:
cfunc = njit((typ, typ, typ))(pyfunc)
for args in arglist:
args_ = tuple(typ(x) for x in args)
self.assertEqual(cfunc(*args_), pyfunc(*args_))
def test_range_iter_len1(self):
range_func = range_len1
range_iter_func = range_iter_len1
typelist = [types.int16, types.int32, types.int64]
arglist = [5, 0, -5]
for typ in typelist:
cfunc = njit((typ,))(range_iter_func)
for arg in arglist:
self.assertEqual(cfunc(typ(arg)), range_func(typ(arg)))
def test_range_iter_list(self):
range_iter_func = range_iter_len2
cfunc = njit((types.List(types.intp, reflected=True),))(range_iter_func)
arglist = [1, 2, 3, 4, 5]
self.assertEqual(cfunc(arglist), len(arglist))
def test_range_attrs(self):
pyfunc = range_attrs
arglist = [(0, 0, 1),
(0, -1, 1),
(-1, 1, 1),
(-1, 4, 1),
(-1, 4, 10),
(5, -5, -2),]
cfunc = njit((types.int64, types.int64, types.int64),)(pyfunc)
for arg in arglist:
self.assertEqual(cfunc(*arg), pyfunc(*arg))
def test_range_contains(self):
pyfunc = range_contains
arglist = [(0, 0, 1),
(-1, 0, 1),
(1, 0, -1),
(0, -1, 1),
(0, 1, -1),
(-1, 1, 1),
(-1, 4, 1),
(-1, 4, 10),
(5, -5, -2),]
bool_vals = [True, False]
int_vals = [-10, -6, -5, -4, -2, -1, 0,
1, 2, 4, 5, 6, 10]
float_vals = [-1.1, -1.0, 0.0, 1.0, 1.1]
complex_vals = [1 + 0j, 1 + 1j, 1.1 + 0j, 1.0 + 1.1j]
vallist = (bool_vals + int_vals + float_vals
+ complex_vals)
cfunc = njit(pyfunc)
for arg in arglist:
for val in vallist:
self.assertEqual(cfunc(val, *arg), pyfunc(val, *arg))
non_numeric_vals = [{'a': 1}, [1, ], 'abc', (1,)]
cfunc_obj = jit(pyfunc, forceobj=True)
for arg in arglist:
for val in non_numeric_vals:
self.assertEqual(cfunc_obj(val, *arg), pyfunc(val, *arg))
@njit
def my_arange(start, stop, step):
x = np.zeros(len(range(start, stop, step)), dtype=np.uint64)
i = 0
for v in range(start, stop, step):
x[i] = v
i += 1
return x
|
TestRange
|
python
|
pytorch__pytorch
|
test/distributed/checkpoint/e2e/test_fine_tuning.py
|
{
"start": 2343,
"end": 7139
}
|
class ____(DTensorTestBase):
@property
def world_size(self) -> int:
return min(4, torch.accelerator.device_count())
@property
def backend(self):
curr_backend = dist.get_default_backend_for_device(self.device_type)
return f"cpu:gloo,{self.device_type}:{curr_backend}"
def pretrain(self, pretrain_dir: str) -> None:
device_mesh = init_device_mesh(self.device_type, (self.world_size,))
model = PreTrainedModel().to(self.device_type)
model = FSDP(model, device_mesh=device_mesh)
optim = torch.optim.Adam(model.parameters(), lr=1e-3)
# Training
for _ in range(3):
batch = torch.rand(32, DIM, device=self.device_type)
loss = model(batch).sum()
loss.backward()
optim.step()
optim.zero_grad()
# Save state_dict
model_state_dict, optim_state_dict = get_state_dict(model, optimizers=optim)
saved_state_dict = {"model": model_state_dict, "optim": optim_state_dict}
dist_cp.save(
state_dict=saved_state_dict,
storage_writer=dist_cp.FileSystemWriter(pretrain_dir),
)
def finetune(self, pretrain_dir: str, finetune_dir: str) -> None:
device_mesh = init_device_mesh(self.device_type, (self.world_size,))
model = FineTuningModel().to(self.device_type)
# TODO: make the parallelism more complicated, e.g., using 2D + DDP.
model = FSDP(model, use_orig_params=True, device_mesh=device_mesh)
optim = torch.optim.Adam(model.parameters(), lr=1e-3)
# Simulate that the fine tuning restart after 3 iterations
for i in range(2):
# Load pretrain submodules checkpoint
pretrain_state_dict = get_model_state_dict(
model,
submodules={model.pretrain},
options=StateDictOptions(keep_submodule_prefixes=False),
)
dist_cp.load(
{"model": pretrain_state_dict},
storage_reader=dist_cp.FileSystemReader(pretrain_dir),
)
set_model_state_dict(
model,
model_state_dict={model.pretrain: pretrain_state_dict},
options=StateDictOptions(strict=False),
)
try:
# Load training submodules checkpoint
model_state_dict, optim_state_dict = get_state_dict(
model,
optimizers=optim,
options=StateDictOptions(ignore_frozen_params=True),
)
dist_cp.load_state_dict(
{"model": model_state_dict, "optim": optim_state_dict},
storage_reader=dist_cp.FileSystemReader(pretrain_dir),
)
set_state_dict(
model,
optimizers=optim,
model_state_dict=model_state_dict,
optim_state_dict=optim_state_dict,
options=StateDictOptions(strict=False),
)
except KeyError:
# If this is the first round of the fine tuning, then nothing is saved.
# If this is the restart of the fine tuning, then checkpoint should exit.
self.assertEqual(i, 0)
# Training
for _ in range(3):
batch = torch.rand(32, DIM, device=self.device_type)
loss = model(batch).sum()
loss.backward()
optim.step()
optim.zero_grad()
# Save state_dict
model_state_dict, optim_state_dict = get_state_dict(
model,
optimizers=optim,
options=StateDictOptions(ignore_frozen_params=True),
)
saved_state_dict = {"model": model_state_dict, "optim": optim_state_dict}
dist_cp.save(
state_dict=saved_state_dict,
storage_writer=dist_cp.FileSystemWriter(finetune_dir),
)
@skip_if_lt_x_gpu(4)
@with_comms
@with_temp_dir
def test_fine_tuning(self) -> None:
self.assertTrue(os.path.exists(self.temp_dir))
pretrain_dir = os.path.join(self.temp_dir, "pretrain")
finetune_dir = os.path.join(self.temp_dir, "finetune")
print(pretrain_dir, finetune_dir)
if self.rank == 0:
os.mkdir(pretrain_dir)
os.mkdir(finetune_dir)
dist.barrier()
os.sync()
self.assertTrue(os.path.exists(pretrain_dir))
self.assertTrue(os.path.exists(finetune_dir))
self.pretrain(pretrain_dir)
self.finetune(pretrain_dir, finetune_dir)
if __name__ == "__main__":
run_tests()
|
TestFineTuning
|
python
|
openai__openai-python
|
src/openai/types/responses/file_search_tool_param.py
|
{
"start": 546,
"end": 817
}
|
class ____(TypedDict, total=False):
embedding_weight: Required[float]
"""The weight of the embedding in the reciprocal ranking fusion."""
text_weight: Required[float]
"""The weight of the text in the reciprocal ranking fusion."""
|
RankingOptionsHybridSearch
|
python
|
allegroai__clearml
|
examples/frameworks/jsonargparse/jsonargparse_nested_namespaces.py
|
{
"start": 112,
"end": 594
}
|
class ____:
opt1: str = "from default 1"
opt2: str = "from default 2"
if __name__ == "__main__":
Task.init(project_name="examples", task_name="jsonargparse nested namespaces")
parser = ArgumentParser()
parser.add_argument("--arg1.opt1", default="from default 1")
parser.add_argument("--arg1.opt2", default="from default 2")
parser.add_argument("--arg2", type=Arg2, default=Arg2())
parser.add_argument("--not-nested")
print(parser.parse_args())
|
Arg2
|
python
|
falconry__falcon
|
falcon/bench/queues/messages.py
|
{
"start": 586,
"end": 754
}
|
class ____:
def on_post(self, req, resp, tenant_id, queue_name):
pass
def on_get(self, req, resp, tenant_id, queue_name):
pass
|
CollectionResource
|
python
|
huggingface__transformers
|
src/transformers/models/auto/video_processing_auto.py
|
{
"start": 10032,
"end": 20188
}
|
class ____:
r"""
This is a generic video processor class that will be instantiated as one of the video processor classes of the
library when created with the [`AutoVideoProcessor.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
def __init__(self):
raise OSError(
"AutoVideoProcessor is designed to be instantiated "
"using the `AutoVideoProcessor.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
@replace_list_option_in_docstrings(VIDEO_PROCESSOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
r"""
Instantiate one of the video processor classes of the library from a pretrained model vocabulary.
The video processor class to instantiate is selected based on the `model_type` property of the config object
(either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's
missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained video_processor hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a video processor file saved using the
[`~video_processing_utils.BaseVideoProcessor.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- a path or url to a saved video processor JSON *file*, e.g.,
`./my_model_directory/preprocessor_config.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model video processor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the video processor files and override the cached versions if
they exist.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final video processor object. If `True`, then this
functions returns a `Tuple(video_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not video processor attributes: i.e., the part of
`kwargs` which has not been used to update `video_processor` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are video processor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* video processor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
>>> from transformers import AutoVideoProcessor
>>> # Download video processor from huggingface.co and cache.
>>> video_processor = AutoVideoProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-0.5b-ov-hf")
>>> # If video processor files are in a directory (e.g. video processor was saved using *save_pretrained('./test/saved_model/')*)
>>> # video_processor = AutoVideoProcessor.from_pretrained("./test/saved_model/")
```"""
config = kwargs.pop("config", None)
trust_remote_code = kwargs.pop("trust_remote_code", None)
kwargs["_from_auto"] = True
config_dict, _ = BaseVideoProcessor.get_video_processor_dict(pretrained_model_name_or_path, **kwargs)
video_processor_class = config_dict.get("video_processor_type", None)
video_processor_auto_map = None
if "AutoVideoProcessor" in config_dict.get("auto_map", {}):
video_processor_auto_map = config_dict["auto_map"]["AutoVideoProcessor"]
# If we still don't have the video processor class, check if we're loading from a previous image processor config
# and if so, infer the video processor class from there.
if video_processor_class is None and video_processor_auto_map is None:
image_processor_class = config_dict.pop("image_processor_type", None)
if image_processor_class is not None:
video_processor_class_inferred = image_processor_class.replace("ImageProcessor", "VideoProcessor")
# Some models have different image processors, e.g. InternVL uses GotOCRImageProcessor
# We cannot use GotOCRVideoProcessor when falling back for BC and should try to infer from config later on
if video_processor_class_from_name(video_processor_class_inferred) is not None:
video_processor_class = video_processor_class_inferred
if "AutoImageProcessor" in config_dict.get("auto_map", {}):
image_processor_auto_map = config_dict["auto_map"]["AutoImageProcessor"]
video_processor_auto_map = image_processor_auto_map.replace("ImageProcessor", "VideoProcessor")
# If we don't find the video processor class in the video processor config, let's try the model config.
if video_processor_class is None and video_processor_auto_map is None:
if not isinstance(config, PreTrainedConfig):
config = AutoConfig.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
)
# It could be in `config.video_processor_type``
video_processor_class = getattr(config, "video_processor_type", None)
if hasattr(config, "auto_map") and "AutoVideoProcessor" in config.auto_map:
video_processor_auto_map = config.auto_map["AutoVideoProcessor"]
if video_processor_class is not None:
video_processor_class = video_processor_class_from_name(video_processor_class)
has_remote_code = video_processor_auto_map is not None
has_local_code = video_processor_class is not None or type(config) in VIDEO_PROCESSOR_MAPPING
if has_remote_code:
if "--" in video_processor_auto_map:
upstream_repo = video_processor_auto_map.split("--")[0]
else:
upstream_repo = None
trust_remote_code = resolve_trust_remote_code(
trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code, upstream_repo
)
if has_remote_code and trust_remote_code:
class_ref = video_processor_auto_map
video_processor_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)
_ = kwargs.pop("code_revision", None)
video_processor_class.register_for_auto_class()
return video_processor_class.from_dict(config_dict, **kwargs)
elif video_processor_class is not None:
return video_processor_class.from_dict(config_dict, **kwargs)
# Last try: we use the VIDEO_PROCESSOR_MAPPING.
elif type(config) in VIDEO_PROCESSOR_MAPPING:
video_processor_class = VIDEO_PROCESSOR_MAPPING[type(config)]
if video_processor_class is not None:
return video_processor_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
raise ValueError(
"This video processor cannot be instantiated. Please make sure you have `torchvision` installed."
)
raise ValueError(
f"Unrecognized video processor in {pretrained_model_name_or_path}. Should have a "
f"`video_processor_type` key in its {VIDEO_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in VIDEO_PROCESSOR_MAPPING_NAMES)}"
)
@staticmethod
def register(
config_class,
video_processor_class,
exist_ok=False,
):
"""
Register a new video processor for this class.
Args:
config_class ([`PreTrainedConfig`]):
The configuration corresponding to the model to register.
video_processor_class ([`BaseVideoProcessor`]):
The video processor to register.
"""
VIDEO_PROCESSOR_MAPPING.register(config_class, video_processor_class, exist_ok=exist_ok)
__all__ = ["VIDEO_PROCESSOR_MAPPING", "AutoVideoProcessor"]
|
AutoVideoProcessor
|
python
|
great-expectations__great_expectations
|
great_expectations/expectations/core/expect_column_values_to_be_in_set.py
|
{
"start": 2383,
"end": 17631
}
|
class ____(ColumnMapExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectColumnValuesToBeInSet is a \
Column Map Expectation.
Column Map Expectations are one of the most common types of Expectation.
They are evaluated for a single column and ask a yes/no question for every row in that column.
Based on the result, they then calculate the percentage of rows that gave a positive answer. If the percentage is high enough, the Expectation considers that data valid.
Args:
column (str): \
{COLUMN_DESCRIPTION}
value_set (set-like): \
{VALUE_SET_DESCRIPTION}
Other Parameters:
mostly (None or a float between 0 and 1): \
Successful if at least mostly fraction of values match the expectation. \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly). Default 1.
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
See Also:
[ExpectColumnValuesToNotBeInSet](https://greatexpectations.io/expectations/expect_column_values_to_not_be_in_set)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 1 1
1 2 1
2 4 1
Code Examples:
Passing Case:
Input:
ExpectColumnValuesToBeInSet(
column="test",
value_set=[1, 2],
mostly=.5
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 1,
"unexpected_percent": 33.33333333333333,
"partial_unexpected_list": [
4
],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 33.33333333333333,
"unexpected_percent_nonmissing": 33.33333333333333
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectColumnValuesToBeInSet(
column="test2",
value_set=[2, 4],
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 3,
"unexpected_percent": 100.0,
"partial_unexpected_list": [
1,
1,
1
],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 100.0,
"unexpected_percent_nonmissing": 100.0
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
value_set: ValueSetField
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
map_metric = "column_values.in_set"
args_keys = (
"column",
"value_set",
)
domain_keys: ClassVar[Tuple[str, ...]] = (
"column",
"row_condition",
"condition_parser",
)
success_keys = (
"value_set",
"mostly",
)
class Config:
title = "Expect column values to be in set"
@staticmethod
def schema_extra(schema: Dict[str, Any], model: Type[ExpectColumnValuesToBeInSet]) -> None:
ColumnMapExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("value_set", RendererValueType.ARRAY),
("mostly", RendererValueType.NUMBER),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
template_str = ""
if params.value_set:
array_param_name = "value_set"
param_prefix = "v__"
renderer_configuration = cls._add_array_params(
array_param_name=array_param_name,
param_prefix=param_prefix,
renderer_configuration=renderer_configuration,
)
value_set_str: str = cls._get_array_string(
array_param_name=array_param_name,
param_prefix=param_prefix,
renderer_configuration=renderer_configuration,
)
template_str += f"values must belong to this set: {value_set_str}"
if params.mostly and params.mostly.value < 1.0:
renderer_configuration = cls._add_mostly_pct_param(
renderer_configuration=renderer_configuration
)
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
) -> List[RenderedStringTemplateContent]:
renderer_configuration: RendererConfiguration = RendererConfiguration(
configuration=configuration,
result=result,
runtime_configuration=runtime_configuration,
)
params = substitute_none_for_missing(
renderer_configuration.kwargs,
[
"column",
"value_set",
"mostly",
"row_condition",
"condition_parser",
],
)
if params["value_set"] is None or len(params["value_set"]) == 0:
values_string = "[ ]"
else:
for i, v in enumerate(params["value_set"]):
params[f"v__{i!s}"] = v
values_string = " ".join([f"$v__{i!s}" for i, v in enumerate(params["value_set"])])
template_str = f"values must belong to this set: {values_string}"
if params["mostly"] is not None:
if isinstance(params["mostly"], (int, float)) and params["mostly"] < 1.0:
params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".") # noqa: E501 # FIXME CoP
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
styling = runtime_configuration.get("styling", {}) if runtime_configuration else {}
if params["row_condition"] is not None:
conditional_template_str = parse_row_condition_string(params["row_condition"])
template_str, styling = _style_row_condition(
conditional_template_str,
template_str,
params,
styling,
)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
@classmethod
@renderer(renderer_type=LegacyDescriptiveRendererType.EXAMPLE_VALUES_BLOCK)
def _descriptive_example_values_block_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
) -> Optional[Union[RenderedBulletListContent, ValueListContent]]:
assert result, "Must pass in result."
if "partial_unexpected_counts" in result.result:
partial_unexpected_counts = result.result["partial_unexpected_counts"]
values = [str(v["value"]) for v in partial_unexpected_counts]
elif "partial_unexpected_list" in result.result:
values = [str(item) for item in result.result["partial_unexpected_list"]]
else:
return None
classes = ["col-3", "mt-1", "pl-1", "pr-1"]
if any(len(value) > 80 for value in values): # noqa: PLR2004 # FIXME CoP
content_block_type = "bullet_list"
content_block_class = RenderedBulletListContent
else:
content_block_type = "value_list"
content_block_class = ValueListContent
new_block = content_block_class(
**{
"content_block_type": content_block_type,
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Example Values",
"tooltip": {"content": "expect_column_values_to_be_in_set"},
"tag": "h6",
},
}
),
content_block_type: [
{
"content_block_type": "string_template",
"string_template": {
"template": "$value",
"params": {"value": value},
"styling": {
"default": {
"classes": ["badge", "badge-info"]
if content_block_type == "value_list"
else [],
"styles": {"word-break": "break-all"},
},
},
},
}
for value in values
],
"styling": {
"classes": classes,
},
}
)
return new_block
|
ExpectColumnValuesToBeInSet
|
python
|
spack__spack
|
lib/spack/spack/util/s3.py
|
{
"start": 4217,
"end": 5843
}
|
class ____(BufferedReader):
def __init__(self, raw):
# In botocore >=1.23.47, StreamingBody inherits from IOBase, so we
# only add missing attributes in older versions.
# https://github.com/boto/botocore/commit/a624815eabac50442ed7404f3c4f2664cd0aa784
if not isinstance(raw, IOBase):
raw.readable = lambda: True
raw.writable = lambda: False
raw.seekable = lambda: False
raw.closed = False
raw.flush = lambda: None
super().__init__(raw)
def detach(self):
self.raw = None
def read(self, *args, **kwargs):
return self.raw.read(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.raw, key)
def _s3_open(url, method="GET"):
parsed = urllib.parse.urlparse(url)
s3 = get_s3_session(url, method="fetch")
bucket = parsed.netloc
key = parsed.path
if key.startswith("/"):
key = key[1:]
if method not in ("GET", "HEAD"):
raise urllib.error.URLError(
"Only GET and HEAD verbs are currently supported for the s3:// scheme"
)
try:
if method == "GET":
obj = s3.get_object(Bucket=bucket, Key=key)
# NOTE(opadron): Apply workaround here (see above)
stream = WrapStream(obj["Body"])
elif method == "HEAD":
obj = s3.head_object(Bucket=bucket, Key=key)
stream = BytesIO()
except s3.ClientError as e:
raise urllib.error.URLError(e) from e
headers = obj["ResponseMetadata"]["HTTPHeaders"]
return url, headers, stream
|
WrapStream
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.