language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
apache__airflow
|
providers/fab/src/airflow/providers/fab/auth_manager/views/user.py
|
{
"start": 4055,
"end": 4869
}
|
class ____(MultiResourceUserMixin, UserRemoteUserModelView):
"""Customize permission names for FAB's builtin UserRemoteUserModelView."""
_class_permission_name = permissions.RESOURCE_USER
class_permission_name_mapping = {
"userinfoedit": permissions.RESOURCE_MY_PROFILE,
"userinfo": permissions.RESOURCE_MY_PROFILE,
}
method_permission_name = {
"add": "create",
"userinfo": "read",
"download": "read",
"show": "read",
"list": "read",
"edit": "edit",
"userinfoedit": "edit",
"delete": "delete",
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
|
CustomUserRemoteUserModelView
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/suite/transfers/gcs_to_gdrive.py
|
{
"start": 1339,
"end": 7947
}
|
class ____(BaseOperator):
"""
Copies objects from a Google Cloud Storage service to a Google Drive service, with renaming if requested.
Using this operator requires the following OAuth 2.0 scope:
.. code-block:: none
https://www.googleapis.com/auth/drive
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToGoogleDriveOperator`
:param source_bucket: The source Google Cloud Storage bucket where the object is. (templated)
:param source_object: The source name of the object to copy in the Google cloud
storage bucket. (templated)
You can use only one wildcard for objects (filenames) within your bucket. The wildcard can appear
inside the object name or at the end of the object name. Appending a wildcard to the bucket name
is unsupported.
:param destination_object: The destination name of the object in the destination Google Drive
service. (templated)
If a wildcard is supplied in the source_object argument, this is the prefix that will be prepended
to the final destination objects' paths.
Note that the source path's part before the wildcard will be removed;
if it needs to be retained it should be appended to destination_object.
For example, with prefix ``foo/*`` and destination_object ``blah/``, the file ``foo/baz`` will be
copied to ``blah/baz``; to retain the prefix write the destination_object as e.g. ``blah/foo``, in
which case the copied file will be named ``blah/foo/baz``.
:param destination_folder_id: The folder ID where the destination objects will be placed. It is
an additive prefix for anything specified in destination_object.
For example if folder ID ``xXyYzZ`` is called ``foo`` and the destination is ``bar/baz``, the file
will end up in `foo/bar/baz`.
This can be used to target an existing folder that is already visible to other users. The credentials
provided must have access to this folder.
:param move_object: When move object is True, the object is moved instead of copied to the new location.
This is the equivalent of a mv command as opposed to a cp command.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"source_bucket",
"source_object",
"destination_object",
"impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
*,
source_bucket: str,
source_object: str,
destination_object: str | None = None,
destination_folder_id: str = "root",
move_object: bool = False,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.destination_object = destination_object
self.destination_folder_id = destination_folder_id
self.move_object = move_object
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.gcs_hook: GCSHook | None = None
self.gdrive_hook: GoogleDriveHook | None = None
def execute(self, context: Context):
self.gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.gdrive_hook = GoogleDriveHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if WILDCARD in self.source_object:
total_wildcards = self.source_object.count(WILDCARD)
if total_wildcards > 1:
error_msg = (
"Only one wildcard '*' is allowed in source_object parameter. "
f"Found {total_wildcards} in {self.source_object}."
)
raise AirflowException(error_msg)
prefix, delimiter = self.source_object.split(WILDCARD, 1)
objects = self.gcs_hook.list(self.source_bucket, prefix=prefix, delimiter=delimiter)
# TODO: After deprecating delimiter and wildcards in source objects,
# remove the previous line and uncomment the following:
# match_glob = f"**/*{delimiter}" if delimiter else None
# objects = self.gcs_hook.list(self.source_bucket, prefix=prefix, match_glob=match_glob)
for source_object in objects:
if self.destination_object is None:
destination_object = source_object
else:
destination_object = source_object.replace(prefix, self.destination_object, 1)
self._copy_single_object(source_object=source_object, destination_object=destination_object)
else:
self._copy_single_object(
source_object=self.source_object, destination_object=self.destination_object
)
def _copy_single_object(self, source_object, destination_object):
self.log.info(
"Executing copy of gs://%s/%s to gdrive://%s",
self.source_bucket,
source_object,
destination_object,
)
with tempfile.NamedTemporaryFile() as file:
filename = file.name
self.gcs_hook.download(
bucket_name=self.source_bucket, object_name=source_object, filename=filename
)
self.gdrive_hook.upload_file(
local_location=filename,
remote_location=destination_object,
folder_id=self.destination_folder_id,
)
if self.move_object:
self.gcs_hook.delete(self.source_bucket, source_object)
|
GCSToGoogleDriveOperator
|
python
|
getsentry__sentry
|
src/sentry/lang/native/symbolicator.py
|
{
"start": 2498,
"end": 15574
}
|
class ____:
def __init__(
self,
task_kind: SymbolicatorTaskKind,
on_request: Callable[[], None],
project: Project,
event_id: str,
):
URLS = settings.SYMBOLICATOR_POOL_URLS
pool = pool_for_platform(task_kind.platform)
base_url = (
URLS.get(pool.value)
or URLS.get(SymbolicatorPools.default.value)
or options.get("symbolicator.options")["url"]
)
base_url = base_url.rstrip("/")
assert base_url
self.base_url = base_url
self.on_request = on_request
self.project = project
self.event_id = event_id
def _process(self, task_name: str, path: str, **kwargs):
"""
This function will submit a symbolication task to a Symbolicator and handle
polling it using the `SymbolicatorSession`.
It will also correctly handle `TaskIdNotFound` and `ServiceUnavailable` errors.
"""
session = SymbolicatorSession(
url=self.base_url,
project_id=str(self.project.id),
event_id=str(self.event_id),
timeout=settings.SYMBOLICATOR_POLL_TIMEOUT,
)
task_id: str | None = None
json_response = None
backoff = Backoff(BACKOFF_INITIAL, BACKOFF_MAX)
with session:
while True:
try:
if not task_id:
# We are submitting a new task to Symbolicator
json_response = session.create_task(path, **kwargs)
else:
# The task has already been submitted to Symbolicator and we are polling
json_response = session.query_task(task_id)
except TaskIdNotFound:
# We have started a task on Symbolicator and are polling, but the task went away.
# This can happen when Symbolicator was restarted or the load balancer routing changed in some way.
# We can just re-submit the task using the same `session` and try again. We use the same `session`
# to avoid the likelihood of this happening again. When Symbolicators are restarted due to a deploy
# in a staggered fashion, we do not want to create a new `session`, being assigned a different
# Symbolicator instance just to it restarted next.
task_id = None
backoff.reset()
continue
except ServiceUnavailable:
# This error means that the Symbolicator instance bound to our `session` is not healthy.
# By resetting the `worker_id`, the load balancer will route us to a different
# Symbolicator instance.
session.reset_worker_id()
task_id = None
# Backoff on repeated failures to create or query a task.
backoff.sleep_failure()
continue
finally:
self.on_request()
backoff.reset()
metrics.incr(
"events.symbolicator.response",
tags={
"response": json_response.get("status") or "null",
"task_name": task_name,
},
)
if json_response["status"] == "pending":
# Symbolicator was not able to process the whole task within one timeout period.
# Start polling using the `request_id`/`task_id`.
task_id = json_response["request_id"]
continue
# Otherwise, we are done processing, yay
return json_response
def process_minidump(
self, platform: str, minidump: CachedAttachment, rewrite_first_module: list[Any]
):
(sources, process_response) = sources_for_symbolication(self.project)
scraping_config = get_scraping_config(self.project)
force_stored_attachment = not minidump.stored_id and in_random_rollout(
"objectstore.force-stored-symbolication"
)
if force_stored_attachment:
session = get_attachments_session(self.project.organization_id, self.project.id)
minidump.stored_id = session.put(minidump.load_data(self.project))
if minidump.stored_id:
session = get_attachments_session(self.project.organization_id, self.project.id)
storage_url = get_symbolicator_url(session, minidump.stored_id)
json: dict[str, Any] = {
"platform": platform,
"sources": sources,
"scraping": scraping_config,
"options": {"dif_candidates": True},
"symbolicate": {
"type": "minidump",
"storage_url": storage_url,
"rewrite_first_module": rewrite_first_module,
},
}
try:
res = self._process("process_minidump", "symbolicate-any", json=json)
return process_response(res)
finally:
if force_stored_attachment:
session.delete(minidump.stored_id)
minidump.stored_id = None
data = {
"platform": orjson.dumps(platform).decode(),
"sources": orjson.dumps(sources).decode(),
"scraping": orjson.dumps(scraping_config).decode(),
"options": '{"dif_candidates": true}',
"rewrite_first_module": orjson.dumps(rewrite_first_module).decode(),
}
files = {"upload_file_minidump": minidump.load_data(self.project)}
res = self._process("process_minidump", "minidump", data=data, files=files)
return process_response(res)
def process_applecrashreport(self, platform: str, report: CachedAttachment):
(sources, process_response) = sources_for_symbolication(self.project)
scraping_config = get_scraping_config(self.project)
force_stored_attachment = not report.stored_id and in_random_rollout(
"objectstore.force-stored-symbolication"
)
if force_stored_attachment:
session = get_attachments_session(self.project.organization_id, self.project.id)
report.stored_id = session.put(report.load_data(self.project))
if report.stored_id:
session = get_attachments_session(self.project.organization_id, self.project.id)
storage_url = get_symbolicator_url(session, report.stored_id)
json: dict[str, Any] = {
"platform": platform,
"sources": sources,
"scraping": scraping_config,
"options": {"dif_candidates": True},
"symbolicate": {
"type": "applecrashreport",
"storage_url": storage_url,
},
}
try:
res = self._process("process_applecrashreport", "symbolicate-any", json=json)
return process_response(res)
finally:
if force_stored_attachment:
session.delete(report.stored_id)
report.stored_id = None
data = {
"platform": orjson.dumps(platform).decode(),
"sources": orjson.dumps(sources).decode(),
"scraping": orjson.dumps(scraping_config).decode(),
"options": '{"dif_candidates": true}',
}
files = {"apple_crash_report": report.load_data(self.project)}
res = self._process("process_applecrashreport", "applecrashreport", data=data, files=files)
return process_response(res)
def process_payload(
self, platform, stacktraces, modules, frame_order, signal=None, apply_source_context=True
):
"""
Process a native event by symbolicating its frames.
:param platform: The event's platform. This should be either unset or one of "objc", "cocoa", "swift", "native", "c", "csharp".
:param stacktraces: The event's stacktraces. Frames must contain an `instruction_address`.
Frames are expected to be ordered according to the frame_order (see below).
:param modules: ProGuard modules and source bundles. They must contain a `uuid` and have a
`type` of either "proguard" or "source".
:param frame_order: The order of frames within stacktraces. See the documentation of `FrameOrder`.
:param signal: A numeric crash signal value. This is optional.
:param apply_source_context: Whether to add source context to frames.
"""
(sources, process_response) = sources_for_symbolication(self.project)
scraping_config = get_scraping_config(self.project)
json = {
"platform": platform,
"sources": sources,
"options": {
"dif_candidates": True,
"apply_source_context": apply_source_context,
"frame_order": frame_order.value,
},
"stacktraces": stacktraces,
"modules": modules,
"scraping": scraping_config,
}
if signal:
json["signal"] = signal
res = self._process("symbolicate_stacktraces", "symbolicate", json=json)
return process_response(res)
def process_js(
self, platform, stacktraces, modules, release, dist, frame_order, apply_source_context=True
):
"""
Process a JS event by remapping its frames with sourcemaps.
:param platform: The event's platform. This should be unset, "javascript", or "node".
:param stacktraces: The event's stacktraces. Frames must contain a `function` and a `module`.
Frames are expected to be ordered according to the frame_order (see below).
:param modules: Minified source files/sourcemaps Thy must contain a `type` field with value "sourcemap",
a `code_file`, and a `debug_id`.
:param release: The event's release.
:param dist: The event's dist.
:param frame_order: The order of frames within stacktraces. See the documentation of `FrameOrder`.
:param apply_source_context: Whether to add source context to frames.
"""
source = get_internal_artifact_lookup_source(self.project)
scraping_config = get_scraping_config(self.project)
json = {
"platform": platform,
"source": source,
"stacktraces": stacktraces,
"modules": modules,
"options": {
"apply_source_context": apply_source_context,
"frame_order": frame_order.value,
},
"scraping": scraping_config,
}
if release is not None:
json["release"] = release
if dist is not None:
json["dist"] = dist
return self._process("symbolicate_js_stacktraces", "symbolicate-js", json=json)
def process_jvm(
self,
platform,
exceptions,
stacktraces,
modules,
release_package,
classes,
frame_order,
apply_source_context=True,
):
"""
Process a JVM event by remapping its frames and exceptions with
ProGuard.
:param platform: The event's platform. This should be either unset or "java".
:param exceptions: The event's exceptions. These must contain a `type` and a `module`.
:param stacktraces: The event's stacktraces. Frames must contain a `function` and a `module`.
Frames are expected to be ordered according to the frame_order (see below).
:param modules: ProGuard modules and source bundles. They must contain a `uuid` and have a
`type` of either "proguard" or "source".
:param release_package: The name of the release's package. This is optional.
Used for determining whether frames are in-app.
:param frame_order: The order of frames within stacktraces. See the documentation of `FrameOrder`.
:param apply_source_context: Whether to add source context to frames.
"""
source = get_internal_source(self.project)
json = {
"platform": platform,
"sources": [source],
"exceptions": exceptions,
"stacktraces": stacktraces,
"modules": modules,
"classes": classes,
"options": {
"apply_source_context": apply_source_context,
"frame_order": frame_order.value,
},
}
if release_package is not None:
json["release_package"] = release_package
return self._process("symbolicate_jvm_stacktraces", "symbolicate-jvm", json=json)
|
Symbolicator
|
python
|
huggingface__transformers
|
src/transformers/models/plbart/modeling_plbart.py
|
{
"start": 10710,
"end": 13723
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: PLBartConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PLBartAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
config=config,
layer_idx=layer_idx,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: torch.FloatTensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
PLBartEncoderLayer
|
python
|
aio-libs__aiohttp
|
aiohttp/multipart.py
|
{
"start": 38054,
"end": 40066
}
|
class ____:
def __init__(self, writer: Any) -> None:
self._writer = writer
self._encoding: str | None = None
self._compress: ZLibCompressor | None = None
self._encoding_buffer: bytearray | None = None
def enable_encoding(self, encoding: str) -> None:
if encoding == "base64":
self._encoding = encoding
self._encoding_buffer = bytearray()
elif encoding == "quoted-printable":
self._encoding = "quoted-printable"
def enable_compression(
self, encoding: str = "deflate", strategy: int | None = None
) -> None:
self._compress = ZLibCompressor(
encoding=encoding,
suppress_deflate_header=True,
strategy=strategy,
)
async def write_eof(self) -> None:
if self._compress is not None:
chunk = self._compress.flush()
if chunk:
self._compress = None
await self.write(chunk)
if self._encoding == "base64":
if self._encoding_buffer:
await self._writer.write(base64.b64encode(self._encoding_buffer))
async def write(self, chunk: bytes) -> None:
if self._compress is not None:
if chunk:
chunk = await self._compress.compress(chunk)
if not chunk:
return
if self._encoding == "base64":
buf = self._encoding_buffer
assert buf is not None
buf.extend(chunk)
if buf:
div, mod = divmod(len(buf), 3)
enc_chunk, self._encoding_buffer = (buf[: div * 3], buf[div * 3 :])
if enc_chunk:
b64chunk = base64.b64encode(enc_chunk)
await self._writer.write(b64chunk)
elif self._encoding == "quoted-printable":
await self._writer.write(binascii.b2a_qp(chunk))
else:
await self._writer.write(chunk)
|
MultipartPayloadWriter
|
python
|
jina-ai__jina
|
jina/serve/runtimes/gateway/gateway.py
|
{
"start": 1184,
"end": 1390
}
|
class ____(JAMLCompatible, metaclass=GatewayType):
"""
The base class of all custom Gateways, can be used to build a custom interface to a Jina Flow that supports
gateway logic
"""
|
BaseGateway
|
python
|
redis__redis-py
|
redis/cache.py
|
{
"start": 2510,
"end": 3451
}
|
class ____(ABC):
@property
@abstractmethod
def collection(self) -> OrderedDict:
pass
@property
@abstractmethod
def config(self) -> CacheConfigurationInterface:
pass
@property
@abstractmethod
def eviction_policy(self) -> EvictionPolicyInterface:
pass
@property
@abstractmethod
def size(self) -> int:
pass
@abstractmethod
def get(self, key: CacheKey) -> Union[CacheEntry, None]:
pass
@abstractmethod
def set(self, entry: CacheEntry) -> bool:
pass
@abstractmethod
def delete_by_cache_keys(self, cache_keys: List[CacheKey]) -> List[bool]:
pass
@abstractmethod
def delete_by_redis_keys(self, redis_keys: List[bytes]) -> List[bool]:
pass
@abstractmethod
def flush(self) -> int:
pass
@abstractmethod
def is_cachable(self, key: CacheKey) -> bool:
pass
|
CacheInterface
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/classes1.py
|
{
"start": 354,
"end": 391
}
|
class ____(metaclass=EMeta):
pass
|
E
|
python
|
altair-viz__altair
|
altair/utils/execeval.py
|
{
"start": 369,
"end": 2716
}
|
class ____:
"""Class to temporarily catch sys.displayhook."""
def __init__(self) -> None:
self.output: Any | None = None
def __enter__(self) -> Self:
self.old_hook: Callable[[object], Any] = sys.displayhook
sys.displayhook = self
return self
def __exit__(self, type, value, traceback) -> Literal[False]:
sys.displayhook = self.old_hook
# Returning False will cause exceptions to propagate
return False
def __call__(self, output: Any) -> None:
self.output = output
@overload
def eval_block(
code: str | Any,
namespace: dict[str, Any] | None = ...,
filename: str | ReadableBuffer | PathLike[Any] = ...,
*,
strict: Literal[False] = ...,
) -> Any | None: ...
@overload
def eval_block(
code: str | Any,
namespace: dict[str, Any] | None = ...,
filename: str | ReadableBuffer | PathLike[Any] = ...,
*,
strict: Literal[True],
) -> Any: ...
def eval_block(
code: str | Any,
namespace: dict[str, Any] | None = None,
filename: str | ReadableBuffer | PathLike[Any] = "<string>",
*,
strict: bool = False,
) -> Any | None:
"""
Execute a multi-line block of code in the given namespace.
If the final statement in the code is an expression, return
the result of the expression.
If ``strict``, raise a ``TypeError`` when the return value would be ``None``.
"""
tree = ast.parse(code, filename="<ast>", mode="exec")
if namespace is None:
namespace = {}
catch_display = _CatchDisplay()
if isinstance(tree.body[-1], ast.Expr):
to_exec, to_eval = tree.body[:-1], tree.body[-1:]
else:
to_exec, to_eval = tree.body, []
for node in to_exec:
compiled = compile(ast.Module([node], []), filename=filename, mode="exec")
exec(compiled, namespace)
with catch_display:
for node in to_eval:
compiled = compile(
ast.Interactive([node]), filename=filename, mode="single"
)
exec(compiled, namespace)
if strict:
output = catch_display.output
if output is None:
msg = f"Expected a non-None value but got {output!r}"
raise TypeError(msg)
else:
return output
else:
return catch_display.output
|
_CatchDisplay
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/datetimes/test_npfuncs.py
|
{
"start": 81,
"end": 384
}
|
class ____:
def test_split_non_utc(self):
# GH#14042
indices = date_range("2016-01-01 00:00:00+0200", freq="s", periods=10)
result = np.split(indices, indices_or_sections=[])[0]
expected = indices._with_freq(None)
tm.assert_index_equal(result, expected)
|
TestSplit
|
python
|
zarr-developers__zarr-python
|
src/zarr/testing/store.py
|
{
"start": 782,
"end": 20748
}
|
class ____(Generic[S, B]):
store_cls: type[S]
buffer_cls: type[B]
@abstractmethod
async def set(self, store: S, key: str, value: Buffer) -> None:
"""
Insert a value into a storage backend, with a specific key.
This should not use any store methods. Bypassing the store methods allows them to be
tested.
"""
...
@abstractmethod
async def get(self, store: S, key: str) -> Buffer:
"""
Retrieve a value from a storage backend, by key.
This should not use any store methods. Bypassing the store methods allows them to be
tested.
"""
...
@abstractmethod
@pytest.fixture
def store_kwargs(self, *args: Any, **kwargs: Any) -> dict[str, Any]:
"""Kwargs for instantiating a store"""
...
@abstractmethod
def test_store_repr(self, store: S) -> None: ...
@abstractmethod
def test_store_supports_writes(self, store: S) -> None: ...
def test_store_supports_partial_writes(self, store: S) -> None:
assert not store.supports_partial_writes
@abstractmethod
def test_store_supports_listing(self, store: S) -> None: ...
@pytest.fixture
def open_kwargs(self, store_kwargs: dict[str, Any]) -> dict[str, Any]:
return store_kwargs
@pytest.fixture
async def store(self, open_kwargs: dict[str, Any]) -> Store:
return await self.store_cls.open(**open_kwargs)
@pytest.fixture
async def store_not_open(self, store_kwargs: dict[str, Any]) -> Store:
return self.store_cls(**store_kwargs)
def test_store_type(self, store: S) -> None:
assert isinstance(store, Store)
assert isinstance(store, self.store_cls)
def test_store_eq(self, store: S, store_kwargs: dict[str, Any]) -> None:
# check self equality
assert store == store
# check store equality with same inputs
# asserting this is important for being able to compare (de)serialized stores
store2 = self.store_cls(**store_kwargs)
assert store == store2
async def test_serializable_store(self, store: S) -> None:
new_store: S = pickle.loads(pickle.dumps(store))
assert new_store == store
assert new_store.read_only == store.read_only
# quickly roundtrip data to a key to test that new store works
data_buf = self.buffer_cls.from_bytes(b"\x01\x02\x03\x04")
key = "foo"
await store.set(key, data_buf)
observed = await store.get(key, prototype=default_buffer_prototype())
assert_bytes_equal(observed, data_buf)
def test_store_read_only(self, store: S) -> None:
assert not store.read_only
with pytest.raises(AttributeError):
store.read_only = False # type: ignore[misc]
@pytest.mark.parametrize("read_only", [True, False])
async def test_store_open_read_only(self, open_kwargs: dict[str, Any], read_only: bool) -> None:
open_kwargs["read_only"] = read_only
store = await self.store_cls.open(**open_kwargs)
assert store._is_open
assert store.read_only == read_only
async def test_store_context_manager(self, open_kwargs: dict[str, Any]) -> None:
# Test that the context manager closes the store
with await self.store_cls.open(**open_kwargs) as store:
assert store._is_open
# Test trying to open an already open store
with pytest.raises(ValueError, match="store is already open"):
await store._open()
assert not store._is_open
async def test_read_only_store_raises(self, open_kwargs: dict[str, Any]) -> None:
kwargs = {**open_kwargs, "read_only": True}
store = await self.store_cls.open(**kwargs)
assert store.read_only
# set
with pytest.raises(
ValueError, match="store was opened in read-only mode and does not support writing"
):
await store.set("foo", self.buffer_cls.from_bytes(b"bar"))
# delete
with pytest.raises(
ValueError, match="store was opened in read-only mode and does not support writing"
):
await store.delete("foo")
async def test_with_read_only_store(self, open_kwargs: dict[str, Any]) -> None:
kwargs = {**open_kwargs, "read_only": True}
store = await self.store_cls.open(**kwargs)
assert store.read_only
# Test that you cannot write to a read-only store
with pytest.raises(
ValueError, match="store was opened in read-only mode and does not support writing"
):
await store.set("foo", self.buffer_cls.from_bytes(b"bar"))
# Check if the store implements with_read_only
try:
writer = store.with_read_only(read_only=False)
except NotImplementedError:
# Test that stores that do not implement with_read_only raise NotImplementedError with the correct message
with pytest.raises(
NotImplementedError,
match=f"with_read_only is not implemented for the {type(store)} store type.",
):
store.with_read_only(read_only=False)
return
# Test that you can write to a new store copy
assert not writer._is_open
assert not writer.read_only
await writer.set("foo", self.buffer_cls.from_bytes(b"bar"))
await writer.delete("foo")
# Test that you cannot write to the original store
assert store.read_only
with pytest.raises(
ValueError, match="store was opened in read-only mode and does not support writing"
):
await store.set("foo", self.buffer_cls.from_bytes(b"bar"))
with pytest.raises(
ValueError, match="store was opened in read-only mode and does not support writing"
):
await store.delete("foo")
# Test that you cannot write to a read-only store copy
reader = store.with_read_only(read_only=True)
assert reader.read_only
with pytest.raises(
ValueError, match="store was opened in read-only mode and does not support writing"
):
await reader.set("foo", self.buffer_cls.from_bytes(b"bar"))
with pytest.raises(
ValueError, match="store was opened in read-only mode and does not support writing"
):
await reader.delete("foo")
@pytest.mark.parametrize("key", ["c/0", "foo/c/0.0", "foo/0/0"])
@pytest.mark.parametrize(
("data", "byte_range"),
[
(b"\x01\x02\x03\x04", None),
(b"\x01\x02\x03\x04", RangeByteRequest(1, 4)),
(b"\x01\x02\x03\x04", OffsetByteRequest(1)),
(b"\x01\x02\x03\x04", SuffixByteRequest(1)),
(b"", None),
],
)
async def test_get(self, store: S, key: str, data: bytes, byte_range: ByteRequest) -> None:
"""
Ensure that data can be read from the store using the store.get method.
"""
data_buf = self.buffer_cls.from_bytes(data)
await self.set(store, key, data_buf)
observed = await store.get(key, prototype=default_buffer_prototype(), byte_range=byte_range)
start, stop = _normalize_byte_range_index(data_buf, byte_range=byte_range)
expected = data_buf[start:stop]
assert_bytes_equal(observed, expected)
async def test_get_not_open(self, store_not_open: S) -> None:
"""
Ensure that data can be read from the store that isn't yet open using the store.get method.
"""
assert not store_not_open._is_open
data_buf = self.buffer_cls.from_bytes(b"\x01\x02\x03\x04")
key = "c/0"
await self.set(store_not_open, key, data_buf)
observed = await store_not_open.get(key, prototype=default_buffer_prototype())
assert_bytes_equal(observed, data_buf)
async def test_get_raises(self, store: S) -> None:
"""
Ensure that a ValueError is raise for invalid byte range syntax
"""
data_buf = self.buffer_cls.from_bytes(b"\x01\x02\x03\x04")
await self.set(store, "c/0", data_buf)
with pytest.raises((ValueError, TypeError), match=r"Unexpected byte_range, got.*"):
await store.get("c/0", prototype=default_buffer_prototype(), byte_range=(0, 2)) # type: ignore[arg-type]
async def test_get_many(self, store: S) -> None:
"""
Ensure that multiple keys can be retrieved at once with the _get_many method.
"""
keys = tuple(map(str, range(10)))
values = tuple(f"{k}".encode() for k in keys)
for k, v in zip(keys, values, strict=False):
await self.set(store, k, self.buffer_cls.from_bytes(v))
observed_buffers = await _collect_aiterator(
store._get_many(
zip(
keys,
(default_buffer_prototype(),) * len(keys),
(None,) * len(keys),
strict=False,
)
)
)
observed_kvs = sorted(((k, b.to_bytes()) for k, b in observed_buffers)) # type: ignore[union-attr]
expected_kvs = sorted(((k, b) for k, b in zip(keys, values, strict=False)))
assert observed_kvs == expected_kvs
@pytest.mark.parametrize("key", ["c/0", "foo/c/0.0", "foo/0/0"])
@pytest.mark.parametrize("data", [b"\x01\x02\x03\x04", b""])
async def test_getsize(self, store: S, key: str, data: bytes) -> None:
"""
Test the result of store.getsize().
"""
data_buf = self.buffer_cls.from_bytes(data)
expected = len(data_buf)
await self.set(store, key, data_buf)
observed = await store.getsize(key)
assert observed == expected
async def test_getsize_prefix(self, store: S) -> None:
"""
Test the result of store.getsize_prefix().
"""
data_buf = self.buffer_cls.from_bytes(b"\x01\x02\x03\x04")
keys = ["c/0/0", "c/0/1", "c/1/0", "c/1/1"]
keys_values = [(k, data_buf) for k in keys]
await store._set_many(keys_values)
expected = len(data_buf) * len(keys)
observed = await store.getsize_prefix("c")
assert observed == expected
async def test_getsize_raises(self, store: S) -> None:
"""
Test that getsize() raise a FileNotFoundError if the key doesn't exist.
"""
with pytest.raises(FileNotFoundError):
await store.getsize("c/1000")
@pytest.mark.parametrize("key", ["zarr.json", "c/0", "foo/c/0.0", "foo/0/0"])
@pytest.mark.parametrize("data", [b"\x01\x02\x03\x04", b""])
async def test_set(self, store: S, key: str, data: bytes) -> None:
"""
Ensure that data can be written to the store using the store.set method.
"""
assert not store.read_only
data_buf = self.buffer_cls.from_bytes(data)
await store.set(key, data_buf)
observed = await self.get(store, key)
assert_bytes_equal(observed, data_buf)
async def test_set_not_open(self, store_not_open: S) -> None:
"""
Ensure that data can be written to the store that's not yet open using the store.set method.
"""
assert not store_not_open._is_open
data_buf = self.buffer_cls.from_bytes(b"\x01\x02\x03\x04")
key = "c/0"
await store_not_open.set(key, data_buf)
observed = await self.get(store_not_open, key)
assert_bytes_equal(observed, data_buf)
async def test_set_many(self, store: S) -> None:
"""
Test that a dict of key : value pairs can be inserted into the store via the
`_set_many` method.
"""
keys = ["zarr.json", "c/0", "foo/c/0.0", "foo/0/0"]
data_buf = [self.buffer_cls.from_bytes(k.encode()) for k in keys]
store_dict = dict(zip(keys, data_buf, strict=True))
await store._set_many(store_dict.items())
for k, v in store_dict.items():
assert (await self.get(store, k)).to_bytes() == v.to_bytes()
@pytest.mark.parametrize(
"key_ranges",
[
[],
[("zarr.json", RangeByteRequest(0, 2))],
[("c/0", RangeByteRequest(0, 2)), ("zarr.json", None)],
[
("c/0/0", RangeByteRequest(0, 2)),
("c/0/1", SuffixByteRequest(2)),
("c/0/2", OffsetByteRequest(2)),
],
],
)
async def test_get_partial_values(
self, store: S, key_ranges: list[tuple[str, ByteRequest]]
) -> None:
# put all of the data
for key, _ in key_ranges:
await self.set(store, key, self.buffer_cls.from_bytes(bytes(key, encoding="utf-8")))
# read back just part of it
observed_maybe = await store.get_partial_values(
prototype=default_buffer_prototype(), key_ranges=key_ranges
)
observed: list[Buffer] = []
expected: list[Buffer] = []
for obs in observed_maybe:
assert obs is not None
observed.append(obs)
for idx in range(len(observed)):
key, byte_range = key_ranges[idx]
result = await store.get(
key, prototype=default_buffer_prototype(), byte_range=byte_range
)
assert result is not None
expected.append(result)
assert all(
obs.to_bytes() == exp.to_bytes() for obs, exp in zip(observed, expected, strict=True)
)
async def test_exists(self, store: S) -> None:
assert not await store.exists("foo")
await store.set("foo/zarr.json", self.buffer_cls.from_bytes(b"bar"))
assert await store.exists("foo/zarr.json")
async def test_delete(self, store: S) -> None:
if not store.supports_deletes:
pytest.skip("store does not support deletes")
await store.set("foo/zarr.json", self.buffer_cls.from_bytes(b"bar"))
assert await store.exists("foo/zarr.json")
await store.delete("foo/zarr.json")
assert not await store.exists("foo/zarr.json")
async def test_delete_dir(self, store: S) -> None:
if not store.supports_deletes:
pytest.skip("store does not support deletes")
await store.set("zarr.json", self.buffer_cls.from_bytes(b"root"))
await store.set("foo-bar/zarr.json", self.buffer_cls.from_bytes(b"root"))
await store.set("foo/zarr.json", self.buffer_cls.from_bytes(b"bar"))
await store.set("foo/c/0", self.buffer_cls.from_bytes(b"chunk"))
await store.delete_dir("foo")
assert await store.exists("zarr.json")
assert await store.exists("foo-bar/zarr.json")
assert not await store.exists("foo/zarr.json")
assert not await store.exists("foo/c/0")
async def test_delete_nonexistent_key_does_not_raise(self, store: S) -> None:
if not store.supports_deletes:
pytest.skip("store does not support deletes")
await store.delete("nonexistent_key")
async def test_is_empty(self, store: S) -> None:
assert await store.is_empty("")
await self.set(
store, "foo/bar", self.buffer_cls.from_bytes(bytes("something", encoding="utf-8"))
)
assert not await store.is_empty("")
assert await store.is_empty("fo")
assert not await store.is_empty("foo/")
assert not await store.is_empty("foo")
assert await store.is_empty("spam/")
async def test_clear(self, store: S) -> None:
await self.set(
store, "key", self.buffer_cls.from_bytes(bytes("something", encoding="utf-8"))
)
await store.clear()
assert await store.is_empty("")
async def test_list(self, store: S) -> None:
assert await _collect_aiterator(store.list()) == ()
prefix = "foo"
data = self.buffer_cls.from_bytes(b"")
store_dict = {
prefix + "/zarr.json": data,
**{prefix + f"/c/{idx}": data for idx in range(10)},
}
await store._set_many(store_dict.items())
expected_sorted = sorted(store_dict.keys())
observed = await _collect_aiterator(store.list())
observed_sorted = sorted(observed)
assert observed_sorted == expected_sorted
async def test_list_prefix(self, store: S) -> None:
"""
Test that the `list_prefix` method works as intended. Given a prefix, it should return
all the keys in storage that start with this prefix.
"""
prefixes = ("", "a/", "a/b/", "a/b/c/")
data = self.buffer_cls.from_bytes(b"")
fname = "zarr.json"
store_dict = {p + fname: data for p in prefixes}
await store._set_many(store_dict.items())
for prefix in prefixes:
observed = tuple(sorted(await _collect_aiterator(store.list_prefix(prefix))))
expected: tuple[str, ...] = ()
for key in store_dict:
if key.startswith(prefix):
expected += (key,)
expected = tuple(sorted(expected))
assert observed == expected
async def test_list_empty_path(self, store: S) -> None:
"""
Verify that list and list_prefix work correctly when path is an empty string,
i.e. no unwanted replacement occurs.
"""
data = self.buffer_cls.from_bytes(b"")
store_dict = {
"foo/bar/zarr.json": data,
"foo/bar/c/1": data,
"foo/baz/c/0": data,
}
await store._set_many(store_dict.items())
# Test list()
observed_list = await _collect_aiterator(store.list())
observed_list_sorted = sorted(observed_list)
expected_list_sorted = sorted(store_dict.keys())
assert observed_list_sorted == expected_list_sorted
# Test list_prefix() with an empty prefix
observed_prefix_empty = await _collect_aiterator(store.list_prefix(""))
observed_prefix_empty_sorted = sorted(observed_prefix_empty)
expected_prefix_empty_sorted = sorted(store_dict.keys())
assert observed_prefix_empty_sorted == expected_prefix_empty_sorted
# Test list_prefix() with a non-empty prefix
observed_prefix = await _collect_aiterator(store.list_prefix("foo/bar/"))
observed_prefix_sorted = sorted(observed_prefix)
expected_prefix_sorted = sorted(k for k in store_dict if k.startswith("foo/bar/"))
assert observed_prefix_sorted == expected_prefix_sorted
async def test_list_dir(self, store: S) -> None:
root = "foo"
store_dict = {
root + "/zarr.json": self.buffer_cls.from_bytes(b"bar"),
root + "/c/1": self.buffer_cls.from_bytes(b"\x01"),
}
assert await _collect_aiterator(store.list_dir("")) == ()
assert await _collect_aiterator(store.list_dir(root)) == ()
await store._set_many(store_dict.items())
keys_observed = await _collect_aiterator(store.list_dir(root))
keys_expected = {k.removeprefix(root + "/").split("/")[0] for k in store_dict}
assert sorted(keys_observed) == sorted(keys_expected)
keys_observed = await _collect_aiterator(store.list_dir(root + "/"))
assert sorted(keys_expected) == sorted(keys_observed)
async def test_set_if_not_exists(self, store: S) -> None:
key = "k"
data_buf = self.buffer_cls.from_bytes(b"0000")
await self.set(store, key, data_buf)
new = self.buffer_cls.from_bytes(b"1111")
await store.set_if_not_exists("k", new) # no error
result = await store.get(key, default_buffer_prototype())
assert result == data_buf
await store.set_if_not_exists("k2", new) # no error
result = await store.get("k2", default_buffer_prototype())
assert result == new
|
StoreTests
|
python
|
pyparsing__pyparsing
|
examples/pythonGrammarParser.py
|
{
"start": 5883,
"end": 5942
}
|
class ____(SemanticGroup):
label = "AND"
pass
|
AndList
|
python
|
Textualize__textual
|
src/textual/demo/projects.py
|
{
"start": 656,
"end": 2604
}
|
class ____(Vertical, can_focus=True, can_focus_children=False):
"""Display project information and open repo links."""
ALLOW_MAXIMIZE = True
DEFAULT_CSS = """
Project {
width: 1fr;
height: auto;
padding: 0 1;
border: tall transparent;
box-sizing: border-box;
&:focus {
border: tall $text-primary;
background: $primary 20%;
&.link {
color: red !important;
}
}
#title { text-style: bold; width: 1fr; }
#author { text-style: italic; }
.stars {
color: $text-accent;
text-align: right;
text-style: bold;
width: auto;
}
.header { height: 1; }
.link {
color: $text-accent;
text-style: underline;
}
.description { color: $text-muted; }
&.-hover { opacity: 1; }
}
"""
BINDINGS = [
Binding(
"enter",
"open_repository",
"open repo",
tooltip="Open the GitHub repository in your browser",
)
]
def __init__(self, project_info: ProjectInfo) -> None:
self.project_info = project_info
super().__init__()
def compose(self) -> ComposeResult:
info = self.project_info
with Horizontal(classes="header"):
yield Label(info.title, id="title")
yield Label(f"★ {STARS[info.title]}", classes="stars")
yield Label(info.author, id="author")
yield Link(info.url, tooltip="Click to open project repository")
yield Static(info.description, classes="description")
@on(events.Enter)
@on(events.Leave)
def on_enter(self, event: events.Enter):
event.stop()
self.set_class(self.is_mouse_over, "-hover")
def action_open_repository(self) -> None:
self.app.open_url(self.project_info.url)
|
Project
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-artifacts-that-can-be-extracted.py
|
{
"start": 115,
"end": 611
}
|
class ____(object):
def digArtifacts(self, n, artifacts, dig):
"""
:type n: int
:type artifacts: List[List[int]]
:type dig: List[List[int]]
:rtype: int
"""
lookup = set(map(tuple, dig))
return sum(all((i, j) in lookup for i in xrange(r1, r2+1) for j in xrange(c1, c2+1)) for r1, c1, r2, c2 in artifacts)
# Time: O(a + d), a is the number of grids covered by artifacts, d is the size of dig
# Space: O(a)
# hash table
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/kosmos2/modeling_kosmos2.py
|
{
"start": 35187,
"end": 36132
}
|
class ____(nn.Module):
def __init__(self, config: Kosmos2TextConfig):
super().__init__()
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(config.embed_dim, config.ffn_dim)
self.fc2 = nn.Linear(config.ffn_dim, config.embed_dim)
self.ffn_layernorm = nn.LayerNorm(config.ffn_dim, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.ffn_layernorm(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
|
Kosmos2TextFFN
|
python
|
PrefectHQ__prefect
|
tests/cli/test_deploy.py
|
{
"start": 218681,
"end": 221677
}
|
class ____:
@pytest.fixture
async def work_pool(self, prefect_client):
await prefect_client.create_work_pool(
WorkPoolCreate(name="test-pool", type="test")
)
async def test_uses_job_variables(
self,
project_dir: Path,
work_pool: WorkPool,
prefect_client: PrefectClient,
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p test-pool --version"
" 1.0.0 -jv env=prod -t foo-bar --job-variable"
' \'{"resources":{"limits":{"cpu": 1}}}\''
),
expected_code=0,
expected_output_contains=[
"An important name/test-name",
"prefect worker start --pool 'test-pool'",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == "test-pool"
assert deployment.version == "1.0.0"
assert deployment.tags == ["foo-bar"]
assert deployment.job_variables == {
"env": "prod",
"resources": {"limits": {"cpu": 1}},
}
@pytest.mark.usefixtures("project_dir", "work_pool")
async def test_rejects_json_strings(self):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p test-pool --version"
" 1.0.0 -jv env=prod -t foo-bar --job-variable 'my-variable'"
),
expected_code=1,
expected_output_contains=[
"Could not parse variable",
],
)
@pytest.mark.usefixtures("project_dir", "work_pool")
async def test_rejects_json_arrays(self):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p test-pool --version"
" 1.0.0 -jv env=prod -t foo-bar --job-variable ['my-variable']"
),
expected_code=1,
expected_output_contains=[
"Could not parse variable",
],
)
@pytest.mark.usefixtures("project_dir", "work_pool")
async def test_rejects_invalid_json(self):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p test-pool --version"
" 1.0.0 -jv env=prod -t foo-bar --job-variable "
' \'{"resources":{"limits":{"cpu"}\''
),
expected_code=1,
expected_output_contains=[
"Could not parse variable",
],
)
@pytest.mark.usefixtures("project_dir", "interactive_console", "work_pool")
|
TestDeployInfraOverrides
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mssql/base.py
|
{
"start": 42373,
"end": 43573
}
|
class ____(sqltypes._Binary):
"""Implement the SQL Server TIMESTAMP type.
Note this is **completely different** than the SQL Standard
TIMESTAMP type, which is not supported by SQL Server. It
is a read-only datatype that does not support INSERT of values.
.. seealso::
:class:`_mssql.ROWVERSION`
"""
__visit_name__ = "TIMESTAMP"
# expected by _Binary to be present
length = None
def __init__(self, convert_int=False):
"""Construct a TIMESTAMP or ROWVERSION type.
:param convert_int: if True, binary integer values will
be converted to integers on read.
"""
self.convert_int = convert_int
def result_processor(self, dialect, coltype):
super_ = super().result_processor(dialect, coltype)
if self.convert_int:
def process(value):
if super_:
value = super_(value)
if value is not None:
# https://stackoverflow.com/a/30403242/34549
value = int(codecs.encode(value, "hex"), 16)
return value
return process
else:
return super_
|
TIMESTAMP
|
python
|
gevent__gevent
|
src/gevent/tests/test__pywsgi.py
|
{
"start": 24930,
"end": 34654
}
|
class ____(TestCase):
calls = 0
def setUp(self):
super().setUp()
self.calls = 0
def application(self, env, start_response):
self.calls += 1
self.assertTrue(env.get('wsgi.input_terminated'))
start_response('200 OK', [('Content-Type', 'text/plain')])
if env['PATH_INFO'] == '/a':
data = env['wsgi.input'].read(6)
return [data]
if env['PATH_INFO'] == '/b':
lines = list(iter(lambda: env['wsgi.input'].read(6), b''))
return lines
if env['PATH_INFO'] == '/c':
return list(iter(lambda: env['wsgi.input'].read(1), b''))
return [b'We should not get here', env['PATH_INFO'].encode('ascii')]
def test_014_chunked_post(self):
data = (b'POST /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n')
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
# self.close_opened() # XXX: Why?
with self.makefile() as fd:
fd.write(data.replace(b'/a', b'/b'))
read_http(fd, body='oh hai')
with self.makefile() as fd:
fd.write(data.replace(b'/a', b'/c'))
read_http(fd, body='oh hai')
def test_229_incorrect_chunk_no_newline(self):
# Giving both a Content-Length and a Transfer-Encoding,
# TE is preferred. But if the chunking is bad from the client,
# missing its terminating newline,
# the server doesn't hang
data = (b'POST /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Content-Length: 12\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'{"hi": "ho"}')
with self.makefile() as fd:
fd.write(data)
read_http(fd, code=400)
def test_229_incorrect_chunk_non_hex(self):
# Giving both a Content-Length and a Transfer-Encoding,
# TE is preferred. But if the chunking is bad from the client,
# the server doesn't hang
data = (b'POST /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Content-Length: 12\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'{"hi": "ho"}\r\n')
with self.makefile() as fd:
fd.write(data)
read_http(fd, code=400)
def test_229_correct_chunk_quoted_ext(self):
data = (b'POST /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2;token="oh hi"\r\noh\r\n4\r\n hai\r\n0\r\n\r\n')
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
def test_229_correct_chunk_token_ext(self):
data = (b'POST /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2;token=oh_hi\r\noh\r\n4\r\n hai\r\n0\r\n\r\n')
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
def test_229_incorrect_chunk_token_ext_too_long(self):
data = (b'POST /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2;token=oh_hi\r\noh\r\n4\r\n hai\r\n0\r\n\r\n')
data = data.replace(b'oh_hi', b'_oh_hi' * 4000)
with self.makefile() as fd:
fd.write(data)
read_http(fd, code=400)
# XXX: Not sure which one, but one (or more) of these is leading to a
# test timeout on Windows. Figure out what/why and solve.
@greentest.skipOnWindows('Maybe hangs')
def test_trailers_keepalive_ignored(self):
# Trailers after a chunk are ignored.
data1 = (
b'POST /a HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: keep-alive\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'2\r\noh\r\n'
b'4\r\n hai\r\n'
b'0\r\n' # last-chunk
# Normally the final CRLF would go here, but if you put in a
# trailer, it doesn't.
b'trailer1: value1\r\n'
b'trailer2: value2\r\n'
b'\r\n' # Really terminate the chunk.
)
data2 = (
b'POST /a HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'2\r\noh\r\n'
b'4\r\n bye\r\n'
b'0\r\n' # last-chunk
)
with self.makefile() as fd:
fd.write(data1)
read_http(fd, body='oh hai')
fd.write(data2)
read_http(fd, body='oh bye')
self.assertEqual(self.calls, 2)
@greentest.skipOnWindows('Maybe hangs')
def test_trailers_close_ignored(self):
data = (
b'POST /a HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'2\r\noh\r\n'
b'4\r\n hai\r\n'
b'0\r\n' # last-chunk
# Normally the final CRLF would go here, but if you put in a
# trailer, it doesn't.
# b'\r\n'
b'GETpath2a:123 HTTP/1.1\r\n'
b'Host: a.com\r\n'
b'Connection: close\r\n'
b'\r\n'
)
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
with self.assertRaises(ConnectionClosed):
read_http(fd)
@greentest.skipOnWindows('Maybe hangs')
def test_trailers_too_long(self):
# Trailers after a chunk are ignored.
data = (
b'POST /a HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: keep-alive\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'2\r\noh\r\n'
b'4\r\n hai\r\n'
b'0\r\n' # last-chunk
# Normally the final CRLF would go here, but if you put in a
# trailer, it doesn't.
b'trailer2: value2' # note lack of \r\n
)
data += b't' * pywsgi.MAX_REQUEST_LINE
# No termination, because we detect the trailer as being too
# long and abort the connection.
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
with self.assertRaises(ConnectionClosed):
read_http(fd, body='oh bye')
@greentest.skipOnWindows('Maybe hangs')
def test_trailers_request_smuggling_missing_last_chunk_keep_alive(self):
# When something that looks like a request line comes in the trailer
# as the first line, immediately after an invalid last chunk.
# We detect this and abort the connection, because the
# whitespace in the GET line isn't a legal part of a trailer.
# If we didn't abort the connection, then, because we specified
# keep-alive, the server would be hanging around waiting for more input.
data = (
b'POST /a HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: keep-alive\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'2\r\noh\r\n'
b'4\r\n hai\r\n'
b'0' # last-chunk, but missing the \r\n
# Normally the final CRLF would go here, but if you put in a
# trailer, it doesn't.
# b'\r\n'
b'GET /path2?a=:123 HTTP/1.1\r\n'
b'Host: a.com\r\n'
b'Connection: close\r\n'
b'\r\n'
)
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
with self.assertRaises(ConnectionClosed):
read_http(fd)
self.assertEqual(self.calls, 1)
@greentest.skipOnWindows('Maybe hangs')
def test_trailers_request_smuggling_header_first(self):
# When something that looks like a header comes in the first line.
data = (
b'POST /a HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: keep-alive\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'2\r\noh\r\n'
b'4\r\n hai\r\n'
b'0\r\n' # last-chunk, but only one CRLF
b'Header: value\r\n'
b'GET /path2?a=:123 HTTP/1.1\r\n'
b'Host: a.com\r\n'
b'Connection: close\r\n'
b'\r\n'
)
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
with self.assertRaises(ConnectionClosed):
read_http(fd, code=400)
self.assertEqual(self.calls, 1)
@greentest.skipOnWindows('Maybe hangs')
def test_trailers_request_smuggling_request_terminates_then_header(self):
data = (
b'POST /a HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: keep-alive\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'2\r\noh\r\n'
b'4\r\n hai\r\n'
b'0\r\n' # last-chunk
b'\r\n'
b'Header: value'
b'GET /path2?a=:123 HTTP/1.1\r\n'
b'Host: a.com\r\n'
b'Connection: close\r\n'
b'\r\n'
)
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
read_http(fd, code=400)
self.assertEqual(self.calls, 1)
|
TestChunkedPost
|
python
|
numba__numba
|
numba/cuda/stubs.py
|
{
"start": 6929,
"end": 7075
}
|
class ____(Stub):
"""
brev(x)
Returns the reverse of the bit pattern of x. For example, 0b10110110
becomes 0b01101101.
"""
|
brev
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_utils.py
|
{
"start": 119837,
"end": 119932
}
|
class ____:
# causes pytest to not recognize this class as a test
__test__ = False
|
NoTest
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_internal/utils/logging.py
|
{
"start": 6842,
"end": 11606
}
|
class ____(Filter):
"""
A logging Filter that excludes records from a logger (or its children).
"""
def filter(self, record: logging.LogRecord) -> bool:
# The base Filter class allows only records from a logger (or its
# children).
return not super().filter(record)
def setup_logging(verbosity: int, no_color: bool, user_log_file: Optional[str]) -> int:
"""Configures and sets up all of the logging
Returns the requested logging level, as its integer value.
"""
# Determine the level to be logging at.
if verbosity >= 2:
level_number = logging.DEBUG
elif verbosity == 1:
level_number = VERBOSE
elif verbosity == -1:
level_number = logging.WARNING
elif verbosity == -2:
level_number = logging.ERROR
elif verbosity <= -3:
level_number = logging.CRITICAL
else:
level_number = logging.INFO
level = logging.getLevelName(level_number)
# The "root" logger should match the "console" level *unless* we also need
# to log to a user log file.
include_user_log = user_log_file is not None
if include_user_log:
additional_log_file = user_log_file
root_level = "DEBUG"
else:
additional_log_file = "/dev/null"
root_level = level
# Disable any logging besides WARNING unless we have DEBUG level logging
# enabled for vendored libraries.
vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG"
# Shorthands for clarity
log_streams = {
"stdout": "ext://sys.stdout",
"stderr": "ext://sys.stderr",
}
handler_classes = {
"stream": "pip._internal.utils.logging.RichPipStreamHandler",
"file": "pip._internal.utils.logging.BetterRotatingFileHandler",
}
handlers = ["console", "console_errors", "console_subprocess"] + (
["user_log"] if include_user_log else []
)
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"filters": {
"exclude_warnings": {
"()": "pip._internal.utils.logging.MaxLevelFilter",
"level": logging.WARNING,
},
"restrict_to_subprocess": {
"()": "logging.Filter",
"name": subprocess_logger.name,
},
"exclude_subprocess": {
"()": "pip._internal.utils.logging.ExcludeLoggerFilter",
"name": subprocess_logger.name,
},
},
"formatters": {
"indent": {
"()": IndentingFormatter,
"format": "%(message)s",
},
"indent_with_timestamp": {
"()": IndentingFormatter,
"format": "%(message)s",
"add_timestamp": True,
},
},
"handlers": {
"console": {
"level": level,
"class": handler_classes["stream"],
"no_color": no_color,
"stream": log_streams["stdout"],
"filters": ["exclude_subprocess", "exclude_warnings"],
"formatter": "indent",
},
"console_errors": {
"level": "WARNING",
"class": handler_classes["stream"],
"no_color": no_color,
"stream": log_streams["stderr"],
"filters": ["exclude_subprocess"],
"formatter": "indent",
},
# A handler responsible for logging to the console messages
# from the "subprocessor" logger.
"console_subprocess": {
"level": level,
"class": handler_classes["stream"],
"stream": log_streams["stderr"],
"no_color": no_color,
"filters": ["restrict_to_subprocess"],
"formatter": "indent",
},
"user_log": {
"level": "DEBUG",
"class": handler_classes["file"],
"filename": additional_log_file,
"encoding": "utf-8",
"delay": True,
"formatter": "indent_with_timestamp",
},
},
"root": {
"level": root_level,
"handlers": handlers,
},
"loggers": {"pip._vendor": {"level": vendored_log_level}},
}
)
return level_number
|
ExcludeLoggerFilter
|
python
|
ray-project__ray
|
rllib/examples/envs/classes/multi_agent/footsies/game/footsies_binary.py
|
{
"start": 463,
"end": 1123
}
|
class ____:
# Uploaded 07.28.2025
S3_ROOT = "https://ray-example-data.s3.us-west-2.amazonaws.com/rllib/env-footsies/binaries/"
# Zip file names
ZIP_LINUX_SERVER = "footsies_linux_server_021725.zip"
ZIP_LINUX_WINDOWED = "footsies_linux_windowed_021725.zip"
ZIP_MAC_HEADLESS = "footsies_mac_headless_5709b6d.zip"
ZIP_MAC_WINDOWED = "footsies_mac_windowed_5709b6d.zip"
# Full URLs
URL_LINUX_SERVER_BINARIES = S3_ROOT + ZIP_LINUX_SERVER
URL_LINUX_WINDOWED_BINARIES = S3_ROOT + ZIP_LINUX_WINDOWED
URL_MAC_HEADLESS_BINARIES = S3_ROOT + ZIP_MAC_HEADLESS
URL_MAC_WINDOWED_BINARIES = S3_ROOT + ZIP_MAC_WINDOWED
|
BinaryUrls
|
python
|
ansible__ansible
|
test/units/module_utils/facts/system/test_lsb.py
|
{
"start": 1509,
"end": 4399
}
|
class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'lsb']
valid_subsets = ['lsb']
fact_namespace = 'ansible_lsb'
collector_class = LSBFactCollector
def _mock_module(self):
mock_module = Mock()
mock_module.params = {'gather_subset': self.gather_subset,
'gather_timeout': 10,
'filter': '*'}
mock_module.get_bin_path = Mock(return_value='/usr/bin/lsb_release')
mock_module.run_command = Mock(return_value=(0, lsb_release_a_fedora_output, ''))
return mock_module
def test_lsb_release_bin(self):
module = self._mock_module()
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module)
self.assertIsInstance(facts_dict, dict)
self.assertEqual(facts_dict['lsb']['release'], '25')
self.assertEqual(facts_dict['lsb']['id'], 'Fedora')
self.assertEqual(facts_dict['lsb']['description'], 'Fedora release 25 (Twenty Five)')
self.assertEqual(facts_dict['lsb']['codename'], 'TwentyFive')
self.assertEqual(facts_dict['lsb']['major_release'], '25')
def test_etc_lsb_release(self):
module = self._mock_module()
module.get_bin_path = Mock(return_value=None)
with patch('ansible.module_utils.facts.system.lsb.os.path.exists',
return_value=True):
with patch('ansible.module_utils.facts.system.lsb.get_file_lines',
return_value=etc_lsb_release_ubuntu14.splitlines()):
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module)
self.assertIsInstance(facts_dict, dict)
self.assertEqual(facts_dict['lsb']['release'], '14.04')
self.assertEqual(facts_dict['lsb']['id'], 'Ubuntu')
self.assertEqual(facts_dict['lsb']['description'], 'Ubuntu 14.04.3 LTS')
self.assertEqual(facts_dict['lsb']['codename'], 'trusty')
def test_etc_lsb_release_no_decimal_release(self):
module = self._mock_module()
module.get_bin_path = Mock(return_value=None)
with patch('ansible.module_utils.facts.system.lsb.os.path.exists',
return_value=True):
with patch('ansible.module_utils.facts.system.lsb.get_file_lines',
return_value=etc_lsb_release_no_decimal.splitlines()):
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module)
self.assertIsInstance(facts_dict, dict)
self.assertEqual(facts_dict['lsb']['release'], '11')
self.assertEqual(facts_dict['lsb']['id'], 'AwesomeOS')
self.assertEqual(facts_dict['lsb']['description'], 'AwesomeÖS 11')
self.assertEqual(facts_dict['lsb']['codename'], 'stonehenge')
|
TestLSBFacts
|
python
|
sphinx-doc__sphinx
|
sphinx/search/de.py
|
{
"start": 191,
"end": 589
}
|
class ____(SearchLanguage):
lang = 'de'
language_name = 'German'
js_stemmer_rawcode = 'german-stemmer.js'
stopwords = GERMAN_STOPWORDS
def __init__(self, options: dict[str, str]) -> None:
super().__init__(options)
self.stemmer = snowballstemmer.stemmer('german')
def stem(self, word: str) -> str:
return self.stemmer.stemWord(word.lower())
|
SearchGerman
|
python
|
pypa__setuptools
|
setuptools/_distutils/tests/support.py
|
{
"start": 1370,
"end": 4099
}
|
class ____:
"""Class to store options for retrieval via set_undefined_options()."""
def __init__(self, **kwargs):
vars(self).update(kwargs)
def ensure_finalized(self):
pass
def copy_xxmodule_c(directory):
"""Helper for tests that need the xxmodule.c source file.
Example use:
def test_compile(self):
copy_xxmodule_c(self.tmpdir)
self.assertIn('xxmodule.c', os.listdir(self.tmpdir))
If the source file can be found, it will be copied to *directory*. If not,
the test will be skipped. Errors during copy are not caught.
"""
shutil.copy(_get_xxmodule_path(), os.path.join(directory, 'xxmodule.c'))
def _get_xxmodule_path():
source_name = 'xxmodule.c' if sys.version_info > (3, 9) else 'xxmodule-3.8.c'
return os.path.join(os.path.dirname(__file__), source_name)
def fixup_build_ext(cmd):
"""Function needed to make build_ext tests pass.
When Python was built with --enable-shared on Unix, -L. is not enough to
find libpython<blah>.so, because regrtest runs in a tempdir, not in the
source directory where the .so lives.
When Python was built with in debug mode on Windows, build_ext commands
need their debug attribute set, and it is not done automatically for
some reason.
This function handles both of these things. Example use:
cmd = build_ext(dist)
support.fixup_build_ext(cmd)
cmd.ensure_finalized()
Unlike most other Unix platforms, Mac OS X embeds absolute paths
to shared libraries into executables, so the fixup is not needed there.
"""
if os.name == 'nt':
cmd.debug = sys.executable.endswith('_d.exe')
elif sysconfig.get_config_var('Py_ENABLE_SHARED'):
# To further add to the shared builds fun on Unix, we can't just add
# library_dirs to the Extension() instance because that doesn't get
# plumbed through to the final compiler command.
runshared = sysconfig.get_config_var('RUNSHARED')
if runshared is None:
cmd.library_dirs = ['.']
else:
if sys.platform == 'darwin':
cmd.library_dirs = []
else:
name, equals, value = runshared.partition('=')
cmd.library_dirs = [d for d in value.split(os.pathsep) if d]
def combine_markers(cls):
"""
pytest will honor markers as found on the class, but when
markers are on multiple subclasses, only one appears. Use
this decorator to combine those markers.
"""
cls.pytestmark = [
mark
for base in itertools.chain([cls], cls.__bases__)
for mark in getattr(base, 'pytestmark', [])
]
return cls
|
DummyCommand
|
python
|
astropy__astropy
|
astropy/io/fits/column.py
|
{
"start": 13810,
"end": 15223
}
|
class ____(str):
"""For P format in variable length table."""
# As far as I can tell from my reading of the FITS standard, a type code is
# *required* for P and Q formats; there is no default
_format_re_template = (
r"(?P<repeat>\d+)?{}(?P<dtype>[LXBIJKAEDCM])(?:\((?P<max>\d*)\))?"
)
_format_code = "P"
_format_re = re.compile(_format_re_template.format(_format_code))
_descriptor_format = "2i4"
def __new__(cls, dtype, repeat=None, max=None):
obj = super().__new__(cls, cls._descriptor_format)
obj.format = NUMPY2FITS[dtype]
obj.dtype = dtype
obj.repeat = repeat
obj.max = max
return obj
def __getnewargs__(self):
return (self.dtype, self.repeat, self.max)
@classmethod
def from_tform(cls, format):
m = cls._format_re.match(format)
if not m or m.group("dtype") not in FITS2NUMPY:
raise VerifyError(f"Invalid column format: {format}")
repeat = m.group("repeat")
array_dtype = m.group("dtype")
max = m.group("max")
if not max:
max = None
return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max)
@property
def tform(self):
repeat = "" if self.repeat is None else self.repeat
max = "" if self.max is None else self.max
return f"{repeat}{self._format_code}{self.format}({max})"
|
_FormatP
|
python
|
django-extensions__django-extensions
|
tests/management/commands/test_update_permissions.py
|
{
"start": 405,
"end": 2905
}
|
class ____(TestCase):
def setUp(self):
class PermModel(models.Model):
class Meta:
app_label = "django_extensions"
permissions = (("test_permission", "test_permission"),)
class TestModel(models.Model):
class Meta:
app_label = "testapp"
permissions = (("testapp_permission", "testapp_permission"),)
def test_works(self):
original_stdout = sys.stdout
out = sys.stdout = StringIO()
call_command("update_permissions", stdout=out, verbosity=3)
sys.stdout = original_stdout
self.assertIn("Can change perm model", out.getvalue())
def test_should_reload_permission_only_for_specified_apps(self):
original_stdout = sys.stdout
out = sys.stdout = StringIO()
call_command("update_permissions", "--apps=testapp", stdout=out, verbosity=3)
sys.stdout = original_stdout
self.assertNotIn(
f"{DJANGO_EXTENSIONS_NAME} | perm model | Can add perm model",
out.getvalue(),
)
self.assertIn(
f"{TESTAPP_NAME} | test model | Can add test model", out.getvalue()
)
def test_should_reload_permission_only_for_all_apps(self):
original_stdout = sys.stdout
out = sys.stdout = StringIO()
call_command("update_permissions", verbosity=3)
sys.stdout = original_stdout
self.assertIn(
f"{DJANGO_EXTENSIONS_NAME} | perm model | Can add perm model",
out.getvalue(),
)
self.assertIn(
f"{TESTAPP_NAME} | test model | Can add test model", out.getvalue()
)
def test_should_update_permission_if_name_changed(self):
original_stdout = sys.stdout
out = sys.stdout = StringIO()
call_command("update_permissions", verbosity=3, create_only=True)
self.assertIn(
f"{TESTAPP_NAME} | test model | testapp_permission", out.getvalue()
)
testapp_permission = Permission.objects.get(name="testapp_permission")
testapp_permission.name = "testapp_permission_wrong"
testapp_permission.save()
call_command("update_permissions", verbosity=3, update_only=True)
sys.stdout = original_stdout
self.assertIn(
f"'{TESTAPP_NAME} | test model | testapp_permission_wrong' to '{TESTAPP_NAME} | test model | testapp_permission'",
out.getvalue(),
)
|
UpdatePermissionsTests
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 730260,
"end": 730526
}
|
class ____(sgqlc.types.Type, Contribution):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("issue",)
issue = sgqlc.types.Field(sgqlc.types.non_null("Issue"), graphql_name="issue")
|
CreatedIssueContribution
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_scalar_ctors.py
|
{
"start": 2333,
"end": 3328
}
|
class ____(TestCase):
"""gh-15467"""
def _do_test(self, t1, t2):
x = t1(2)
arr = np.array(x, dtype=t2)
# type should be preserved exactly
if t2 is None:
assert arr.dtype.type is t1
else:
assert arr.dtype.type is t2
arr1 = np.asarray(x, dtype=t2)
if t2 is None:
assert arr1.dtype.type is t1
else:
assert arr1.dtype.type is t2
@parametrize("t1", int_types + uint_types)
@parametrize("t2", int_types + uint_types + [None])
def test_integers(self, t1, t2):
return self._do_test(t1, t2)
@parametrize("t1", float_types)
@parametrize("t2", float_types + [None])
def test_reals(self, t1, t2):
return self._do_test(t1, t2)
@parametrize("t1", cfloat_types)
@parametrize("t2", cfloat_types + [None])
def test_complex(self, t1, t2):
return self._do_test(t1, t2)
if __name__ == "__main__":
run_tests()
|
TestArrayFromScalar
|
python
|
pypa__pip
|
src/pip/_internal/utils/temp_dir.py
|
{
"start": 994,
"end": 2079
}
|
class ____:
"""Manages temp directory behavior"""
def __init__(self) -> None:
self._should_delete: dict[str, bool] = {}
def set_delete(self, kind: str, value: bool) -> None:
"""Indicate whether a TempDirectory of the given kind should be
auto-deleted.
"""
self._should_delete[kind] = value
def get_delete(self, kind: str) -> bool:
"""Get configured auto-delete flag for a given TempDirectory type,
default True.
"""
return self._should_delete.get(kind, True)
_tempdir_registry: TempDirectoryTypeRegistry | None = None
@contextmanager
def tempdir_registry() -> Generator[TempDirectoryTypeRegistry, None, None]:
"""Provides a scoped global tempdir registry that can be used to dictate
whether directories should be deleted.
"""
global _tempdir_registry
old_tempdir_registry = _tempdir_registry
_tempdir_registry = TempDirectoryTypeRegistry()
try:
yield _tempdir_registry
finally:
_tempdir_registry = old_tempdir_registry
|
TempDirectoryTypeRegistry
|
python
|
walkccc__LeetCode
|
solutions/2340. Minimum Adjacent Swaps to Make a Valid Array/2340.py
|
{
"start": 0,
"end": 714
}
|
class ____:
def minimumSwaps(self, nums: list[int]) -> int:
minIndex = self._getLeftmostMinIndex(nums)
maxIndex = self._getRightmostMaxIndex(nums)
swaps = minIndex + (len(nums) - 1 - maxIndex)
return swaps if minIndex <= maxIndex else swaps - 1
def _getLeftmostMinIndex(self, nums: list[int]) -> int:
mn = nums[0]
minIndex = 0
for i in range(1, len(nums)):
if nums[i] < mn:
mn = nums[i]
minIndex = i
return minIndex
def _getRightmostMaxIndex(self, nums: list[int]) -> int:
mx = nums[-1]
maxIndex = len(nums) - 1
for i in range(len(nums) - 2, -1, -1):
if nums[i] > mx:
mx = nums[i]
maxIndex = i
return maxIndex
|
Solution
|
python
|
google__jax
|
tests/api_test.py
|
{
"start": 259448,
"end": 260134
}
|
class ____(jtu.JaxTestCase):
def test_sharding_constraint_as_noop(self):
def f(x):
return jax.lax.with_sharding_constraint(
x, jax.sharding.SingleDeviceSharding(jax.devices()[0]))
def wsc_as_noop(ctx, operand, *args, **kwargs):
del ctx, args, kwargs
return [operand]
rules = ((jax.lax.sharding_constraint_p, wsc_as_noop),)
lowered_ir = (
jax.jit(f)
.trace(jax.ShapeDtypeStruct((2, 4), dtype=jnp.bfloat16))
.lower(_private_parameters=mlir.LoweringParameters(
override_lowering_rules=rules))
.as_text()
)
self.assertNotIn("stablehlo.custom_call @Sharding", lowered_ir)
|
OverrideLoweringTest
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyflakes/F841_0.py
|
{
"start": 3084,
"end": 3175
}
|
class ____:
def foo():
nonlocal __class__
__class__ = 1
|
NonlocalDunderClass
|
python
|
scipy__scipy
|
scipy/optimize/tests/test__shgo.py
|
{
"start": 7557,
"end": 7933
}
|
class ____(StructTestFunction):
def f(self, x):
return ((x[0] - 0.5) ** 2 + (x[1] - 0.5) ** 2
+ (x[2] - 0.5) ** 2 + (x[3] - 0.5) ** 2)
g = None
cons = wrap_constraints(g)
test_s = StructTestS(bounds=[(0, 2.0), ] * 4,
expected_fun=0.0,
expected_x=np.ones(4) - 0.5
)
|
StructTestS
|
python
|
keon__algorithms
|
tests/test_bfs.py
|
{
"start": 104,
"end": 848
}
|
class ____(unittest.TestCase):
def test_count_islands(self):
grid_1 = [[1, 1, 1, 1, 0], [1, 1, 0, 1, 0], [1, 1, 0, 0, 0],
[0, 0, 0, 0, 0]]
self.assertEqual(1, count_islands(grid_1))
grid_2 = [[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 1, 0, 0],
[0, 0, 0, 1, 1]]
self.assertEqual(3, count_islands(grid_2))
grid_3 = [[1, 1, 1, 0, 0, 0], [1, 1, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1],
[0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 0, 0]]
self.assertEqual(3, count_islands(grid_3))
grid_4 = [[1, 1, 0, 0, 1, 1], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 0]]
self.assertEqual(5, count_islands(grid_4))
|
TestCountIslands
|
python
|
pytorch__pytorch
|
test/inductor/test_lookup_table.py
|
{
"start": 5862,
"end": 26659
}
|
class ____(BaseLookupTableTest):
"""Consolidated tests for lookup table functionality"""
def test_lookup_mismatch(self):
"""Test mismatch scenario in lookup table"""
kernel_inputs = self.create_mock_mm_kernel_inputs()
lookup_table_data = {
self.create_lookup_key("mm", kernel_inputs): [self.create_config("triton")]
}
with patch.object(inductor_config.lookup_table, "table", lookup_table_data):
test_choices = LookupTableChoices()
# looking for addmm but created the entry with mm - should mismatch the key and return
# an empty result
result = test_choices.lookup_template_configs(
kernel_inputs, "addmm", ["triton"]
)
self.assertEqual(result, {})
def test_successful_lookup_with_template_filtering(self):
"""Test successful lookup that filters configs by template_id"""
kernel_inputs = self.create_mock_mm_kernel_inputs()
config_list = [
self.create_config("triton", BLOCK_M=128, BLOCK_N=128),
self.create_config("triton", BLOCK_M=64, BLOCK_N=64),
self.create_config("tma", BLOCK_M=256, BLOCK_N=128),
self.create_config("decompose_k", k_split=4),
]
lookup_table_data = {self.create_lookup_key("mm", kernel_inputs): config_list}
with patch.object(inductor_config.lookup_table, "table", lookup_table_data):
test_choices = LookupTableChoices()
# Test triton template filtering
result = test_choices.lookup_template_configs(
kernel_inputs, "mm", ["triton"]
)
assert result is not None, "Result should not be None"
self.assertEqual(len(result["triton"]), 2)
for config in result["triton"]:
self.assertNotIn("template_id", config)
self.assertIn("BLOCK_M", config)
# Test tma template filtering
result = test_choices.lookup_template_configs(kernel_inputs, "mm", ["tma"])
assert result is not None, "Result should not be None"
self.assertEqual(len(result["tma"]), 1)
self.assertNotIn("template_id", result["tma"][0])
self.assertEqual(result["tma"][0]["BLOCK_M"], 256)
# Test decompose_k template filtering
result = test_choices.lookup_template_configs(
kernel_inputs, "mm", ["decompose_k"]
)
assert result is not None, "Result should not be None"
self.assertEqual(len(result["decompose_k"]), 1)
self.assertNotIn("template_id", result["decompose_k"][0])
self.assertEqual(result["decompose_k"][0]["k_split"], 4)
def test_empty_table(self):
"""Test when template lookup table is empty"""
kernel_inputs = self.create_mock_mm_kernel_inputs()
with patch.object(inductor_config.lookup_table, "table", {}):
test_choices = LookupTableChoices()
result = test_choices.lookup_template_configs(
kernel_inputs, "mm", ["triton"]
)
self.assertEqual(result, {})
def test_validation_error(self):
"""Test validation error for invalid config"""
kernel_inputs = self.create_mock_mm_kernel_inputs()
invalid_config = {"BLOCK_M": 128} # missing template_id
lookup_table_data = {
self.create_lookup_key("mm", kernel_inputs): [invalid_config]
}
with patch.object(inductor_config.lookup_table, "table", lookup_table_data):
test_choices = LookupTableChoices()
with self.assertRaises(ValueError) as cm:
test_choices.lookup_template_configs(kernel_inputs, "mm", ["triton"])
self.assertIn("missing required 'template_id' field", str(cm.exception))
def test_cpu_input_returns_empty(self):
"""Test that CPU tensor input returns empty dict"""
# Create kernel inputs with CPU tensors
kernel_inputs = self.create_mock_mm_kernel_inputs(device=torch.device("cpu"))
lookup_table_data = {
self.create_lookup_key("mm", kernel_inputs): [self.create_config("triton")]
}
with patch.object(inductor_config.lookup_table, "table", lookup_table_data):
test_choices = LookupTableChoices()
result = test_choices.lookup_template_configs(
kernel_inputs, "mm", ["triton"]
)
self.assertEqual(result, {}) # Should return empty dict for CPU
def test_multiple_calls_work(self):
"""Test that calling lookup functions multiple times works correctly"""
kernel_inputs = self.create_mock_mm_kernel_inputs()
config_list = [
self.create_config("triton", BLOCK_M=128),
self.create_config("tma", BLOCK_M=256),
]
lookup_table_data = {self.create_lookup_key("mm", kernel_inputs): config_list}
with patch.object(inductor_config.lookup_table, "table", lookup_table_data):
test_choices = LookupTableChoices()
# First calls
result1 = test_choices.lookup_template_configs(
kernel_inputs, "mm", ["triton"]
)
result2 = test_choices.lookup_template_configs(kernel_inputs, "mm", ["tma"])
assert result1 is not None, "Result1 should not be None"
assert result2 is not None, "Result2 should not be None"
self.assertEqual(len(result1["triton"]), 1)
self.assertEqual(len(result2["tma"]), 1)
# Second calls should work the same
result3 = test_choices.lookup_template_configs(
kernel_inputs, "mm", ["triton"]
)
result4 = test_choices.lookup_template_configs(kernel_inputs, "mm", ["tma"])
assert result3 is not None, "Result3 should not be None"
assert result4 is not None, "Result4 should not be None"
self.assertEqual(len(result3["triton"]), 1)
self.assertEqual(len(result4["tma"]), 1)
def test_batch_lookup_mixed_entries(self):
"""Test batch lookup where some templates have entries and others don't"""
kernel_inputs = self.create_mock_mm_kernel_inputs()
config_list = [
self.create_config("triton", BLOCK_M=128),
self.create_config("tma", BLOCK_M=256),
# No decompose_k config in lookup table
]
lookup_table_data = {self.create_lookup_key("mm", kernel_inputs): config_list}
with patch.object(inductor_config.lookup_table, "table", lookup_table_data):
test_choices = LookupTableChoices()
# Test batch lookup with mixed results
result = test_choices.lookup_template_configs(
kernel_inputs, "mm", ["triton", "tma", "decompose_k"]
)
assert result is not None, "Result should not be None"
# Should have entries for triton and tma, but not decompose_k
self.assertIn("triton", result)
self.assertIn("tma", result)
self.assertNotIn("decompose_k", result)
self.assertEqual(len(result["triton"]), 1)
self.assertEqual(len(result["tma"]), 1)
self.assertEqual(result["triton"][0]["BLOCK_M"], 128)
self.assertEqual(result["tma"][0]["BLOCK_M"], 256)
@parametrize(
"config_hash,template_hash,expected_kept",
[
# Hash matching (config kept)
("hash123", "hash123", True),
# Hash mismatch (config filtered)
("hash123", "hash456", False),
# Config without hash (config kept)
(None, "hash123", True),
# Template without hash (config kept)
("hash123", None, True),
# Both None (config kept)
(None, None, True),
],
)
def test_template_hash_checking(self, config_hash, template_hash, expected_kept):
"""Test template hash validation behavior"""
kernel_inputs = self.create_mock_mm_kernel_inputs()
config = self.create_config("triton", BLOCK_M=128, BLOCK_N=64)
if config_hash is not None:
config["template_hash"] = config_hash
template_hash_map = (
{"triton": template_hash} if template_hash is not None else {}
)
lookup_table_data = {self.create_lookup_key("mm", kernel_inputs): [config]}
with (
patch.object(inductor_config.lookup_table, "table", lookup_table_data),
patch.object(inductor_config.lookup_table, "check_src_hash", True),
):
test_choices = LookupTableChoices()
result = test_choices.lookup_template_configs(
kernel_inputs, "mm", ["triton"], template_hash_map
)
if expected_kept:
assert result is not None, "Result should not be None"
self.assertIn("triton", result)
self.assertEqual(len(result["triton"]), 1)
# template_hash should be removed from returned config
self.assertNotIn("template_hash", result["triton"][0])
else:
# Config was filtered out due to hash mismatch
self.assertEqual(result, {})
def test_template_hash_checking_disabled(self):
"""Test that hash checking is skipped when config flag is disabled"""
kernel_inputs = self.create_mock_mm_kernel_inputs()
# Create config with mismatching hash
config = self.create_config("triton", BLOCK_M=128, template_hash="hash123")
# Provide different template hash that would normally cause filtering
template_hash_map = {"triton": "hash456"}
lookup_table_data = {self.create_lookup_key("mm", kernel_inputs): [config]}
with (
patch.object(inductor_config.lookup_table, "table", lookup_table_data),
patch.object(
inductor_config.lookup_table,
"check_src_hash",
False,
),
):
test_choices = LookupTableChoices()
result = test_choices.lookup_template_configs(
kernel_inputs, "mm", ["triton"], template_hash_map
)
# Should keep config even with mismatching hash since checking is disabled
assert result is not None, "Result should not be None"
self.assertIn("triton", result)
self.assertEqual(len(result["triton"]), 1)
# template_hash should still be removed from returned config
self.assertNotIn("template_hash", result["triton"][0])
def test_template_hash_mixed_scenarios(self):
"""Test mixed hash scenarios with multiple configs"""
kernel_inputs = self.create_mock_mm_kernel_inputs()
config_list = [
self.create_config(
"triton", BLOCK_M=128, template_hash="correct_hash"
), # Should be kept
self.create_config(
"triton", BLOCK_M=64, template_hash="wrong_hash"
), # Should be filtered
self.create_config("triton", BLOCK_M=32), # No hash, should be kept
]
template_hash_map = {"triton": "correct_hash"}
lookup_table_data = {self.create_lookup_key("mm", kernel_inputs): config_list}
with (
patch.object(inductor_config.lookup_table, "table", lookup_table_data),
patch.object(inductor_config.lookup_table, "check_src_hash", True),
):
test_choices = LookupTableChoices()
result = test_choices.lookup_template_configs(
kernel_inputs, "mm", ["triton"], template_hash_map
)
assert result is not None, "Result should not be None"
self.assertIn("triton", result)
# Should keep 2 configs: the one with correct hash and the one without hash
self.assertEqual(len(result["triton"]), 2)
# Check that kept configs have expected BLOCK_M values
kept_block_ms = [config["BLOCK_M"] for config in result["triton"]]
self.assertIn(128, kept_block_ms) # Config with correct hash
self.assertIn(32, kept_block_ms) # Config without hash
self.assertNotIn(
64, kept_block_ms
) # Config with wrong hash should be filtered
# template_hash should be removed from returned configs
for config in result["triton"]:
self.assertNotIn("template_hash", config)
@parametrize(
"config_hash,description",
[
("definitely_malformed_hash_!@#$%", "malformed hash"),
(12345, "non-string hash"),
("", "empty string hash"),
(None, "missing hash field"),
],
)
def test_hash_checking_disabled_edge_cases(self, config_hash, description):
"""Test that configs are kept when hash checking is disabled, regardless of hash validity"""
kernel_inputs = self.create_mock_mm_kernel_inputs()
# Create config with potentially problematic hash
config = self.create_config("triton", BLOCK_M=128)
if config_hash is not None:
config["template_hash"] = config_hash
# If config_hash is None, don't add template_hash field at all
# Provide a valid template hash that would normally be used for comparison
template_hash_map = {"triton": "valid_template_hash_abc123"}
lookup_table_data = {self.create_lookup_key("mm", kernel_inputs): [config]}
with (
patch.object(inductor_config.lookup_table, "table", lookup_table_data),
patch.object(inductor_config.lookup_table, "check_src_hash", False),
):
test_choices = LookupTableChoices()
result = test_choices.lookup_template_configs(
kernel_inputs, "mm", ["triton"], template_hash_map
)
# Should keep config regardless of hash validity since checking is disabled
assert result is not None, f"Result should not be None for {description}"
self.assertIn(
"triton", result, f"Should have triton result for {description}"
)
self.assertEqual(
len(result["triton"]), 1, f"Should have 1 config for {description}"
)
# template_hash should be removed from returned config
self.assertNotIn(
"template_hash",
result["triton"][0],
f"template_hash should be removed from result for {description}",
)
# Other config fields should be preserved
self.assertEqual(
result["triton"][0]["BLOCK_M"],
128,
f"BLOCK_M should be preserved for {description}",
)
@parametrize(
"table_has_device_key,lookup_device_matches,expected_found",
[
# Device-specific key in table, same device -> found
(True, True, True),
# Device-specific key in table, different device -> not found
(True, False, False),
# Device-agnostic key in table, same device -> found
(False, True, True),
# Device-agnostic key in table, different device -> found (device-agnostic)
(False, False, True),
],
)
def test_device_key_lookup_scenarios(
self, table_has_device_key, lookup_device_matches, expected_found
):
"""Test lookup behavior with device-specific vs device-agnostic keys"""
# Create kernel inputs for "device_1" (our reference device)
kernel_inputs_device1 = self.create_mock_mm_kernel_inputs()
# Create config
config = self.create_config("triton", BLOCK_M=128)
# Create a test choices class for generating the table key
class TableKeyChoices(LookupTableChoices):
@staticmethod
def _get_device_key(device):
if device.type != "cuda":
return None
return "device_1" # Always device_1 for table key generation
table_key_choices = TableKeyChoices()
# Generate table key based on whether it should include device
if table_has_device_key:
table_key = table_key_choices.make_lookup_key(
kernel_inputs_device1, "mm", include_device=True
)
else:
table_key = table_key_choices.make_lookup_key(
kernel_inputs_device1, "mm", include_device=False
)
lookup_table_data = {table_key: [config]}
# Create test choices class for the actual lookup with different device behavior
if lookup_device_matches:
class TestChoices(LookupTableChoices):
@staticmethod
def _get_device_key(device):
if device.type != "cuda":
return None
return "device_1"
else:
class TestChoices(LookupTableChoices):
@staticmethod
def _get_device_key(device):
if device.type != "cuda":
return None
return "device_2"
with patch.object(inductor_config.lookup_table, "table", lookup_table_data):
test_choices = TestChoices()
result = test_choices.lookup_template_configs(
kernel_inputs_device1, "mm", ["triton"]
)
if expected_found:
assert result is not None, (
f"Result should not be None when expected_found={expected_found}"
)
self.assertIn("triton", result, "Should have triton result when found")
self.assertEqual(len(result["triton"]), 1, "Should have exactly 1 config")
self.assertEqual(
result["triton"][0]["BLOCK_M"], 128, "Config should be preserved"
)
else:
self.assertEqual(
result,
{},
f"Should return empty dict when expected_found={expected_found}",
)
def test_device_key_priority(self):
"""Test that device-specific keys take priority over device-agnostic keys"""
kernel_inputs = self.create_mock_mm_kernel_inputs()
# Create two different configs
device_specific_config = self.create_config(
"triton", BLOCK_M=256
) # Different BLOCK_M
device_agnostic_config = self.create_config("triton", BLOCK_M=128)
# Create a test choices instance to generate keys
key_choices = LookupTableChoices()
# Create both key types for the same inputs
device_key = key_choices.make_lookup_key(
kernel_inputs, "mm", include_device=True
)
device_agnostic_key = key_choices.make_lookup_key(
kernel_inputs, "mm", include_device=False
)
# Put both in the table
lookup_table_data = {
device_key: [device_specific_config],
device_agnostic_key: [device_agnostic_config],
}
with patch.object(inductor_config.lookup_table, "table", lookup_table_data):
test_choices = LookupTableChoices()
result = test_choices.lookup_template_configs(
kernel_inputs, "mm", ["triton"]
)
# Should get device-specific config (BLOCK_M=256), not device-agnostic (BLOCK_M=128)
assert result is not None, "Result should not be None"
self.assertIn("triton", result)
self.assertEqual(len(result["triton"]), 1)
self.assertEqual(
result["triton"][0]["BLOCK_M"],
256,
"Should use device-specific config when both exist",
)
def test_make_lookup_key_variants(self):
"""Test the make_lookup_key_variants helper function"""
kernel_inputs = self.create_mock_mm_kernel_inputs()
test_choices = LookupTableChoices()
device_key, device_agnostic_key = test_choices.make_lookup_key_variants(
kernel_inputs, "mm"
)
# Both should be strings
self.assertIsInstance(device_key, str)
self.assertIsInstance(device_agnostic_key, str)
# Device key should be longer (contains device info)
self.assertGreater(len(device_key), len(device_agnostic_key))
# Device-agnostic key should be contained in device key (as a substring after device part)
self.assertIn(device_agnostic_key.split("+mm")[0], device_key)
|
TestLookupTable
|
python
|
PyCQA__pylint
|
pylint/checkers/design_analysis.py
|
{
"start": 9880,
"end": 24682
}
|
class ____(BaseChecker):
"""Checker of potential misdesigns.
Checks for sign of poor/misdesign:
* number of methods, attributes, local variables...
* size, complexity of functions, methods
"""
# configuration section name
name = "design"
# messages
msgs = MSGS
# configuration options
options = (
(
"max-args",
{
"default": 5,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of arguments for function / method.",
},
),
(
"max-positional-arguments",
{
"default": 5,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of positional arguments for function / method.",
},
),
(
"max-locals",
{
"default": 15,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of locals for function / method body.",
},
),
(
"max-returns",
{
"default": 6,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of return / yield for function / "
"method body.",
},
),
(
"max-branches",
{
"default": 12,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of branch for function / method body.",
},
),
(
"max-statements",
{
"default": 50,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of statements in function / method body.",
},
),
(
"max-parents",
{
"default": 7,
"type": "int",
"metavar": "<num>",
"help": "Maximum number of parents for a class (see R0901).",
},
),
(
"ignored-parents",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list of class names>",
"help": "List of qualified class names to ignore when counting class parents (see R0901)",
},
),
(
"max-attributes",
{
"default": 7,
"type": "int",
"metavar": "<num>",
"help": "Maximum number of attributes for a class \
(see R0902).",
},
),
(
"min-public-methods",
{
"default": 2,
"type": "int",
"metavar": "<num>",
"help": "Minimum number of public methods for a class \
(see R0903).",
},
),
(
"max-public-methods",
{
"default": 20,
"type": "int",
"metavar": "<num>",
"help": "Maximum number of public methods for a class \
(see R0904).",
},
),
(
"max-bool-expr",
{
"default": 5,
"type": "int",
"metavar": "<num>",
"help": "Maximum number of boolean expressions in an if "
"statement (see R0916).",
},
),
(
"exclude-too-few-public-methods",
{
"default": [],
"type": "regexp_csv",
"metavar": "<pattern>[,<pattern>...]",
"help": "List of regular expressions of class ancestor names "
"to ignore when counting public methods (see R0903)",
},
),
)
def __init__(self, linter: PyLinter) -> None:
super().__init__(linter)
self._returns: list[int]
self._branches: defaultdict[nodes.LocalsDictNodeNG, int]
self._stmts: list[int]
def open(self) -> None:
"""Initialize visit variables."""
self.linter.stats.reset_node_count()
self._returns = []
self._branches = defaultdict(int)
self._stmts = []
self._exclude_too_few_public_methods = (
self.linter.config.exclude_too_few_public_methods
)
def _inc_all_stmts(self, amount: int) -> None:
for i, _ in enumerate(self._stmts):
self._stmts[i] += amount
@only_required_for_messages(
"too-many-ancestors",
"too-many-instance-attributes",
"too-few-public-methods",
"too-many-public-methods",
)
def visit_classdef(self, node: nodes.ClassDef) -> None:
"""Check size of inheritance hierarchy and number of instance attributes."""
parents = _get_parents(
node,
STDLIB_CLASSES_IGNORE_ANCESTOR.union(self.linter.config.ignored_parents),
)
nb_parents = len(parents)
if nb_parents > self.linter.config.max_parents:
self.add_message(
"too-many-ancestors",
node=node,
args=(nb_parents, self.linter.config.max_parents),
)
# Something at inference time is modifying instance_attrs to add
# properties from parent classes. Given how much we cache inference
# results, mutating instance_attrs can become a real mess. Filter
# them out here until the root cause is solved.
# https://github.com/pylint-dev/astroid/issues/2273
root = node.root()
filtered_attrs = [
k for (k, v) in node.instance_attrs.items() if v[0].root() is root
]
if len(filtered_attrs) > self.linter.config.max_attributes:
self.add_message(
"too-many-instance-attributes",
node=node,
args=(len(filtered_attrs), self.linter.config.max_attributes),
)
@only_required_for_messages("too-few-public-methods", "too-many-public-methods")
def leave_classdef(self, node: nodes.ClassDef) -> None:
"""Check number of public methods."""
my_methods = sum(
1 for method in node.mymethods() if not method.name.startswith("_")
)
# Does the class contain less than n public methods ?
# This checks only the methods defined in the current class,
# since the user might not have control over the classes
# from the ancestors. It avoids some false positives
# for classes such as unittest.TestCase, which provides
# a lot of assert methods. It doesn't make sense to warn
# when the user subclasses TestCase to add his own tests.
if my_methods > self.linter.config.max_public_methods:
self.add_message(
"too-many-public-methods",
node=node,
args=(my_methods, self.linter.config.max_public_methods),
)
# Stop here if the class is excluded via configuration.
if node.type == "class" and self._exclude_too_few_public_methods:
for ancestor in node.ancestors():
if any(
pattern.match(ancestor.qname())
for pattern in self._exclude_too_few_public_methods
):
return
# Stop here for exception, metaclass, interface classes and other
# classes for which we don't need to count the methods.
if node.type != "class" or _is_exempt_from_public_methods(node):
return
# Does the class contain more than n public methods ?
# This checks all the methods defined by ancestors and
# by the current class.
all_methods = _count_methods_in_class(node)
if all_methods < self.linter.config.min_public_methods:
self.add_message(
"too-few-public-methods",
node=node,
args=(all_methods, self.linter.config.min_public_methods),
)
@only_required_for_messages(
"too-many-return-statements",
"too-many-branches",
"too-many-arguments",
"too-many-locals",
"too-many-positional-arguments",
"too-many-statements",
"keyword-arg-before-vararg",
)
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
"""Check function name, docstring, arguments, redefinition,
variable names, max locals.
"""
# init branch and returns counters
self._returns.append(0)
# check number of arguments
args = node.args.args + node.args.posonlyargs + node.args.kwonlyargs
pos_args = node.args.args + node.args.posonlyargs
ignored_argument_names = self.linter.config.ignored_argument_names
if args is not None:
ignored_args_num = 0
if ignored_argument_names:
ignored_pos_args_num = sum(
1 for arg in pos_args if ignored_argument_names.match(arg.name)
)
ignored_kwonly_args_num = sum(
1
for arg in node.args.kwonlyargs
if ignored_argument_names.match(arg.name)
)
ignored_args_num = ignored_pos_args_num + ignored_kwonly_args_num
argnum = len(args) - ignored_args_num
if argnum > self.linter.config.max_args:
self.add_message(
"too-many-arguments",
node=node,
args=(len(args), self.linter.config.max_args),
)
pos_args_count = (
len(args) - len(node.args.kwonlyargs) - ignored_pos_args_num
)
if pos_args_count > self.linter.config.max_positional_arguments:
self.add_message(
"too-many-positional-arguments",
node=node,
args=(pos_args_count, self.linter.config.max_positional_arguments),
confidence=HIGH,
)
else:
ignored_args_num = 0
# check number of local variables
locnum = len(node.locals) - ignored_args_num
# decrement number of local variables if '_' is one of them
if "_" in node.locals:
locnum -= 1
if locnum > self.linter.config.max_locals:
self.add_message(
"too-many-locals",
node=node,
args=(locnum, self.linter.config.max_locals),
)
# init new statements counter
self._stmts.append(1)
visit_asyncfunctiondef = visit_functiondef
@only_required_for_messages(
"too-many-return-statements",
"too-many-branches",
"too-many-arguments",
"too-many-locals",
"too-many-statements",
)
def leave_functiondef(self, node: nodes.FunctionDef) -> None:
"""Most of the work is done here on close:
checks for max returns, branch, return in __init__.
"""
returns = self._returns.pop()
if returns > self.linter.config.max_returns:
self.add_message(
"too-many-return-statements",
node=node,
args=(returns, self.linter.config.max_returns),
)
branches = self._branches[node]
if branches > self.linter.config.max_branches:
self.add_message(
"too-many-branches",
node=node,
args=(branches, self.linter.config.max_branches),
)
# check number of statements
stmts = self._stmts.pop()
if stmts > self.linter.config.max_statements:
self.add_message(
"too-many-statements",
node=node,
args=(stmts, self.linter.config.max_statements),
)
leave_asyncfunctiondef = leave_functiondef
def visit_return(self, _: nodes.Return) -> None:
"""Count number of returns."""
if not self._returns:
return # return outside function, reported by the base checker
self._returns[-1] += 1
def visit_default(self, node: nodes.NodeNG) -> None:
"""Default visit method -> increments the statements counter if
necessary.
"""
if node.is_statement:
self._inc_all_stmts(1)
def visit_try(self, node: nodes.Try) -> None:
"""Increments the branches counter."""
branches = len(node.handlers)
if node.orelse:
branches += 1
if node.finalbody:
branches += 1
self._inc_branch(node, branches)
self._inc_all_stmts(branches)
@only_required_for_messages("too-many-boolean-expressions", "too-many-branches")
def visit_if(self, node: nodes.If) -> None:
"""Increments the branches counter and checks boolean expressions."""
self._check_boolean_expressions(node)
branches = 1
# don't double count If nodes coming from some 'elif'
if node.orelse and not (
len(node.orelse) == 1 and isinstance(node.orelse[0], nodes.If)
):
branches += 1
self._inc_branch(node, branches)
self._inc_all_stmts(branches)
def _check_boolean_expressions(self, node: nodes.If) -> None:
"""Go through "if" node `node` and count its boolean expressions
if the 'if' node test is a BoolOp node.
"""
condition = node.test
if not isinstance(condition, nodes.BoolOp):
return
nb_bool_expr = _count_boolean_expressions(condition)
if nb_bool_expr > self.linter.config.max_bool_expr:
self.add_message(
"too-many-boolean-expressions",
node=condition,
args=(nb_bool_expr, self.linter.config.max_bool_expr),
)
def visit_while(self, node: nodes.While) -> None:
"""Increments the branches counter."""
branches = 1
if node.orelse:
branches += 1
self._inc_branch(node, branches)
visit_for = visit_while
def visit_match(self, node: nodes.Match) -> None:
"""Increments the branches counter."""
self._inc_all_stmts(1)
self._inc_branch(node, len(node.cases))
def _inc_branch(self, node: nodes.NodeNG, branchesnum: int = 1) -> None:
"""Increments the branches counter."""
self._branches[node.scope()] += branchesnum
def register(linter: PyLinter) -> None:
linter.register_checker(MisdesignChecker(linter))
|
MisdesignChecker
|
python
|
getsentry__sentry
|
src/sentry/search/events/fields.py
|
{
"start": 34059,
"end": 34764
}
|
class ____(NumberRange):
def __init__(self, name: str, start: float | None, end: float | None):
super().__init__(name, start, end)
self.has_default = True
def get_default(self, params: ParamsType) -> int:
if not params or not params.get("start") or not params.get("end"):
raise InvalidFunctionArgument("function called without default")
elif not isinstance(params.get("start"), datetime) or not isinstance(
params.get("end"), datetime
):
raise InvalidFunctionArgument("function called with invalid default")
interval = (params["end"] - params["start"]).total_seconds()
return int(interval)
|
IntervalDefault
|
python
|
plotly__plotly.py
|
plotly/graph_objs/histogram2d/_marker.py
|
{
"start": 233,
"end": 2783
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram2d"
_path_str = "histogram2d.marker"
_valid_props = {"color", "colorsrc"}
@property
def color(self):
"""
Sets the aggregation data.
The 'color' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the aggregation data.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
"""
def __init__(self, arg=None, color=None, colorsrc=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2d.Marker`
color
Sets the aggregation data.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram2d.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2d.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Marker
|
python
|
rq__rq
|
tests/test_timeouts.py
|
{
"start": 519,
"end": 2639
}
|
class ____(RQTestCase):
def test_timer_death_penalty(self):
"""Ensure TimerDeathPenalty works correctly."""
q = Queue(connection=self.connection)
q.empty()
finished_job_registry = FinishedJobRegistry(connection=self.connection)
failed_job_registry = FailedJobRegistry(connection=self.connection)
# make sure death_penalty_class persists
w = TimerBasedWorker([q], connection=self.connection)
self.assertIsNotNone(w)
self.assertEqual(w.death_penalty_class, TimerDeathPenalty)
# Test short-running job doesn't raise JobTimeoutException
job = q.enqueue(thread_friendly_sleep_func, args=(1,), job_timeout=3)
w.work(burst=True)
job.refresh()
self.assertIn(job, finished_job_registry)
# Test long-running job raises JobTimeoutException
job = q.enqueue(thread_friendly_sleep_func, args=(5,), job_timeout=3)
w.work(burst=True)
self.assertIn(job, failed_job_registry)
job.refresh()
self.assertIn('rq.timeouts.JobTimeoutException', job.exc_info)
# Test negative timeout doesn't raise JobTimeoutException,
# which implies an unintended immediate timeout.
job = q.enqueue(thread_friendly_sleep_func, args=(1,), job_timeout=-1)
w.work(burst=True)
job.refresh()
self.assertIn(job, finished_job_registry)
@patch('rq.timeouts.signal')
def test_get_default_death_penalty_class(self, mock_signal):
"""get_default_death_penalty_class() returns the correct class."""
# By default, the mock object has a SIGALRM attribute, so
# get_default_death_penalty_class returns UnixSignalDeathPenalty
self.assertTrue(hasattr(mock_signal, 'SIGALRM'))
self.assertEqual(get_default_death_penalty_class(), UnixSignalDeathPenalty)
# It should return TimerDeathPenalty when SIGALRM is not available
delattr(mock_signal, 'SIGALRM')
self.assertFalse(hasattr(mock_signal, 'SIGALRM'))
self.assertEqual(get_default_death_penalty_class(), TimerDeathPenalty)
|
TestTimeouts
|
python
|
apache__airflow
|
providers/openai/src/airflow/providers/openai/triggers/openai.py
|
{
"start": 1050,
"end": 4439
}
|
class ____(BaseTrigger):
"""Triggers OpenAI Batch API."""
def __init__(
self,
conn_id: str,
batch_id: str,
poll_interval: float,
end_time: float,
) -> None:
super().__init__()
self.conn_id = conn_id
self.poll_interval = poll_interval
self.batch_id = batch_id
self.end_time = end_time
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize OpenAIBatchTrigger arguments and class path."""
return (
"airflow.providers.openai.triggers.openai.OpenAIBatchTrigger",
{
"conn_id": self.conn_id,
"batch_id": self.batch_id,
"poll_interval": self.poll_interval,
"end_time": self.end_time,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Make connection to OpenAI Client, and poll the status of batch."""
hook = OpenAIHook(conn_id=self.conn_id)
try:
while (batch := hook.get_batch(self.batch_id)) and BatchStatus.is_in_progress(batch.status):
if self.end_time < time.time():
yield TriggerEvent(
{
"status": "error",
"message": f"Batch {self.batch_id} has not reached a terminal status after "
f"{time.time() - self.end_time} seconds.",
"batch_id": self.batch_id,
}
)
return
await asyncio.sleep(self.poll_interval)
if batch.status == BatchStatus.COMPLETED:
yield TriggerEvent(
{
"status": "success",
"message": f"Batch {self.batch_id} has completed successfully.",
"batch_id": self.batch_id,
}
)
elif batch.status in {BatchStatus.CANCELLED, BatchStatus.CANCELLING}:
yield TriggerEvent(
{
"status": "cancelled",
"message": f"Batch {self.batch_id} has been cancelled.",
"batch_id": self.batch_id,
}
)
elif batch.status == BatchStatus.FAILED:
yield TriggerEvent(
{
"status": "error",
"message": f"Batch failed:\n{self.batch_id}",
"batch_id": self.batch_id,
}
)
elif batch.status == BatchStatus.EXPIRED:
yield TriggerEvent(
{
"status": "error",
"message": f"Batch couldn't be completed within the hour time window :\n{self.batch_id}",
"batch_id": self.batch_id,
}
)
yield TriggerEvent(
{
"status": "error",
"message": f"Batch {self.batch_id} has failed.",
"batch_id": self.batch_id,
}
)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e), "batch_id": self.batch_id})
|
OpenAIBatchTrigger
|
python
|
encode__django-rest-framework
|
tests/test_renderers.py
|
{
"start": 20627,
"end": 21344
}
|
class ____(TestCase):
def setUp(self):
class TestSerializer(serializers.Serializer):
test_field = serializers.CharField()
self.renderer = HTMLFormRenderer()
self.serializer = TestSerializer(data={})
def test_render_with_default_args(self):
self.serializer.is_valid()
renderer = HTMLFormRenderer()
result = renderer.render(self.serializer.data)
self.assertIsInstance(result, SafeText)
def test_render_with_provided_args(self):
self.serializer.is_valid()
renderer = HTMLFormRenderer()
result = renderer.render(self.serializer.data, None, {})
self.assertIsInstance(result, SafeText)
|
TestHTMLFormRenderer
|
python
|
astropy__astropy
|
astropy/units/tests/test_logarithmic.py
|
{
"start": 840,
"end": 6020
}
|
class ____:
def test_logarithmic_units(self):
"""Check logarithmic units are set up correctly."""
assert u.dB.to(u.dex) == 0.1
assert u.dex.to(u.mag) == -2.5
assert u.mag.to(u.dB) == -4
@pytest.mark.parametrize("lu_unit, lu_cls", list(zip(lu_units, lu_subclasses)))
def test_callable_units(self, lu_unit, lu_cls):
assert isinstance(lu_unit, u.UnitBase)
assert callable(lu_unit)
assert lu_unit._function_unit_class is lu_cls
@pytest.mark.parametrize("lu_unit", lu_units)
def test_equality_to_normal_unit_for_dimensionless(self, lu_unit):
lu = lu_unit()
assert lu == lu._default_function_unit # eg, MagUnit() == u.mag
assert lu._default_function_unit == lu # and u.mag == MagUnit()
@pytest.mark.parametrize("physical_unit", pu_sample)
@pytest.mark.parametrize("lu_unit", lu_units)
def test_call_units(self, lu_unit, physical_unit):
"""Create a LogUnit subclass using the callable unit and physical unit,
and do basic check that output is right."""
lu1 = lu_unit(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
def test_call_invalid_unit(self):
with pytest.raises(TypeError):
u.mag([])
with pytest.raises(ValueError):
u.mag(u.mag())
@pytest.mark.parametrize("physical_unit", pu_sample)
@pytest.mark.parametrize("lu_cls", lu_subclasses + [u.LogUnit])
def test_subclass_creation(self, lu_cls, physical_unit):
"""Create a LogUnit subclass object for given physical unit,
and do basic check that output is right."""
lu1 = lu_cls(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
lu2 = lu_cls(physical_unit, function_unit=2 * lu1._default_function_unit)
assert lu2.physical_unit == physical_unit
assert lu2.function_unit == u.Unit(2 * lu2._default_function_unit)
with pytest.raises(ValueError):
lu_cls(physical_unit, u.m)
def test_lshift_magnitude(self):
mag = 1.0 << u.ABmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.ABmag
assert mag.value == 1.0
# same test for an array, which should produce a view
a2 = np.arange(10.0)
q2 = a2 << u.ABmag
assert isinstance(q2, u.Magnitude)
assert q2.unit == u.ABmag
assert np.all(q2.value == a2)
a2[9] = 0.0
assert np.all(q2.value == a2)
# a different magnitude unit
mag = 10.0 << u.STmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.STmag
assert mag.value == 10.0
def test_ilshift_magnitude(self):
# test in-place operation and conversion
mag_fnu_cgs = u.mag(u.erg / u.s / u.cm**2 / u.Hz)
m = np.arange(10.0) * u.mag(u.Jy)
jy = m.physical
m2 = m << mag_fnu_cgs
assert np.all(m2 == m.to(mag_fnu_cgs))
m2 = m
m <<= mag_fnu_cgs
assert m is m2 # Check it was done in-place!
assert np.all(m.value == m2.value)
assert m.unit == mag_fnu_cgs
# Check it works if equivalencies are in-place.
with u.add_enabled_equivalencies(u.spectral_density(5500 * u.AA)):
st = jy.to(u.ST)
m <<= u.STmag
assert m is m2
assert_quantity_allclose(m.physical, st)
assert m.unit == u.STmag
def test_lshift_errors(self):
m = np.arange(10.0) * u.mag(u.Jy)
with pytest.raises(u.UnitsError):
m << u.STmag
with pytest.raises(u.UnitsError):
m << u.Jy
with pytest.raises(u.UnitsError):
m <<= u.STmag
with pytest.raises(u.UnitsError):
m <<= u.Jy
def test_predefined_magnitudes():
assert_quantity_allclose(
(-21.1 * u.STmag).physical, 1.0 * u.erg / u.cm**2 / u.s / u.AA
)
assert_quantity_allclose(
(-48.6 * u.ABmag).physical, 1.0 * u.erg / u.cm**2 / u.s / u.Hz
)
assert_quantity_allclose((0 * u.M_bol).physical, c.L_bol0)
assert_quantity_allclose(
(0 * u.m_bol).physical, c.L_bol0 / (4.0 * np.pi * (10.0 * c.pc) ** 2)
)
def test_predefined_reinitialisation():
assert u.mag("STflux") == u.STmag
assert u.mag("ABflux") == u.ABmag
assert u.mag("Bol") == u.M_bol
assert u.mag("bol") == u.m_bol
# required for backwards-compatibility, at least unless deprecated
assert u.mag("ST") == u.STmag
assert u.mag("AB") == u.ABmag
def test_predefined_string_roundtrip():
"""Ensure round-tripping; see #5015"""
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regression for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
|
TestLogUnitCreation
|
python
|
huggingface__transformers
|
src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py
|
{
"start": 7166,
"end": 8549
}
|
class ____(DepthAnythingNeck):
def forward(
self,
hidden_states: list[torch.Tensor],
patch_height: Optional[int] = None,
patch_width: Optional[int] = None,
prompt_depth: Optional[torch.Tensor] = None,
) -> list[torch.Tensor]:
"""
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):
List of hidden states from the backbone.
"""
if not isinstance(hidden_states, (tuple, list)):
raise TypeError("hidden_states should be a tuple or list of tensors")
if len(hidden_states) != len(self.config.neck_hidden_sizes):
raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.")
# postprocess hidden states
hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)
features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]
# fusion blocks
output = self.fusion_stage(features, prompt_depth=prompt_depth)
return output
@auto_docstring(
custom_intro="""
Prompt Depth Anything Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2.
"""
)
|
PromptDepthAnythingNeck
|
python
|
ray-project__ray
|
rllib/connectors/connector.py
|
{
"start": 9571,
"end": 11287
}
|
class ____(Connector):
"""Action connector connects policy outputs including actions,
to user environments.
An action connector transforms a single piece of policy output in
ActionConnectorDataType format, which is basically PolicyOutputType plus env and
agent IDs.
Any functions that operate directly on PolicyOutputType can be easily adapted
into an ActionConnector by using register_lambda_action_connector.
Example:
.. testcode::
from ray.rllib.connectors.action.lambdas import (
register_lambda_action_connector
)
ZeroActionConnector = register_lambda_action_connector(
"ZeroActionsConnector",
lambda actions, states, fetches: (
np.zeros_like(actions), states, fetches
)
)
More complicated action connectors can also be implemented by sub-classing
this ActionConnector class.
"""
def __call__(self, ac_data: ActionConnectorDataType) -> ActionConnectorDataType:
"""Transform policy output before they are sent to a user environment.
Args:
ac_data: Env and agent IDs, plus policy output.
Returns:
The processed action connector data.
"""
return self.transform(ac_data)
def transform(self, ac_data: ActionConnectorDataType) -> ActionConnectorDataType:
"""Implementation of the actual transform.
Users should override transform instead of __call__ directly.
Args:
ac_data: Env and agent IDs, plus policy output.
Returns:
The processed action connector data.
"""
raise NotImplementedError
@OldAPIStack
|
ActionConnector
|
python
|
django__django
|
tests/forms_tests/tests/test_formsets.py
|
{
"start": 76962,
"end": 77021
}
|
class ____(TestIsBoundBehavior):
pass
|
TestIsBoundBehavior
|
python
|
getsentry__sentry
|
src/sentry/models/files/control_file.py
|
{
"start": 516,
"end": 1883
}
|
class ____(AbstractFile[ControlFileBlobIndex, ControlFileBlob]):
blobs = models.ManyToManyField("sentry.ControlFileBlob", through="sentry.ControlFileBlobIndex")
# Looking for the "blob" FK or the path attribute? These are deprecated and unavailable in the control silo
class Meta:
app_label = "sentry"
db_table = "sentry_controlfile"
FILE_BLOB_MODEL = ControlFileBlob
FILE_BLOB_INDEX_MODEL = ControlFileBlobIndex
DELETE_UNREFERENCED_BLOB_TASK = delete_unreferenced_blobs_control
def _blob_index_records(self) -> Sequence[ControlFileBlobIndex]:
return sorted(
ControlFileBlobIndex.objects.filter(file=self).select_related("blob"),
key=lambda fbi: fbi.offset,
)
def _create_blob_index(self, blob: ControlFileBlob, offset: int) -> ControlFileBlobIndex:
return ControlFileBlobIndex.objects.create(file=self, blob=blob, offset=offset)
def _create_blob_from_file(self, contents: ContentFile, logger: Any) -> ControlFileBlob:
return ControlFileBlob.from_file(contents, logger)
def _get_blobs_by_id(self, blob_ids: Sequence[int]) -> models.QuerySet[ControlFileBlob]:
return ControlFileBlob.objects.filter(id__in=blob_ids).all()
def _delete_unreferenced_blob_task(self) -> Task[Any, Any]:
return delete_unreferenced_blobs_control
|
ControlFile
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/oracle/cx_oracle.py
|
{
"start": 25017,
"end": 25135
}
|
class ____(_LOBDataType, oracle.LONG):
def get_dbapi_type(self, dbapi):
return dbapi.LONG_STRING
|
_OracleLong
|
python
|
python-openxml__python-docx
|
src/docx/section.py
|
{
"start": 14443,
"end": 16433
}
|
class ____(_BaseHeaderFooter):
"""Page footer, used for all three types (default, even-page, and first-page).
Note that, like a document or table cell, a footer must contain a minimum of one
paragraph and a new or otherwise "empty" footer contains a single empty paragraph.
This first paragraph can be accessed as `footer.paragraphs[0]` for purposes of
adding content to it. Using :meth:`add_paragraph()` by itself to add content will
leave an empty paragraph above the newly added one.
"""
def _add_definition(self) -> FooterPart:
"""Return newly-added footer part."""
footer_part, rId = self._document_part.add_footer_part()
self._sectPr.add_footerReference(self._hdrftr_index, rId)
return footer_part
@property
def _definition(self):
"""|FooterPart| object containing content of this footer."""
footerReference = self._sectPr.get_footerReference(self._hdrftr_index)
# -- currently this is never called when `._has_definition` evaluates False --
assert footerReference is not None
return self._document_part.footer_part(footerReference.rId)
def _drop_definition(self):
"""Remove footer definition (footer part) associated with this section."""
rId = self._sectPr.remove_footerReference(self._hdrftr_index)
self._document_part.drop_rel(rId)
@property
def _has_definition(self) -> bool:
"""True if a footer is defined for this section."""
footerReference = self._sectPr.get_footerReference(self._hdrftr_index)
return footerReference is not None
@property
def _prior_headerfooter(self):
"""|_Footer| proxy on prior sectPr element or None if this is first section."""
preceding_sectPr = self._sectPr.preceding_sectPr
return (
None
if preceding_sectPr is None
else _Footer(preceding_sectPr, self._document_part, self._hdrftr_index)
)
|
_Footer
|
python
|
kamyu104__LeetCode-Solutions
|
Python/sort-threats-by-severity-and-exploitability.py
|
{
"start": 40,
"end": 277
}
|
class ____(object):
def sortThreats(self, threats):
"""
:type threats: List[List[int]]
:rtype: List[List[int]]
"""
threats.sort(key=lambda x: (-(2*x[1]+x[2]), x[0]))
return threats
|
Solution
|
python
|
getsentry__sentry
|
src/django_picklefield/fields.py
|
{
"start": 1112,
"end": 2799
}
|
class ____:
"""
A class used to wrap object that have properties that may clash with the
ORM internals.
For example, objects with the `prepare_database_save` property such as
`django.db.Model` subclasses won't work under certain conditions and the
same apply for trying to retrieve any `callable` object.
"""
__slots__ = ("_obj",)
def __init__(self, obj: Any) -> None:
self._obj = obj
def wrap_conflictual_object(obj: Any) -> Any:
if hasattr(obj, "prepare_database_save") or callable(obj):
obj = _ObjectWrapper(obj)
return obj
def get_default_protocol() -> Any:
return getattr(settings, "PICKLEFIELD_DEFAULT_PROTOCOL", DEFAULT_PROTOCOL)
def dbsafe_encode(
value: Any, compress_object: bool = False, pickle_protocol: Any = None, copy: bool = True
) -> Any:
# We use deepcopy() here to avoid a problem with cPickle, where dumps
# can generate different character streams for same lookup value if
# they are referenced differently.
# The reason this is important is because we do all of our lookups as
# simple string matches, thus the character streams must be the same
# for the lookups to work properly. See tests.py for more information.
if pickle_protocol is None:
pickle_protocol = get_default_protocol()
if copy:
# Copy can be very expensive if users aren't going to perform lookups
# on the value anyway.
value = deepcopy(value)
value = dumps(value, protocol=pickle_protocol)
if compress_object:
value = compress(value)
value = b64encode(value).decode() # decode bytes to str
return PickledObject(value)
|
_ObjectWrapper
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/readers/string_iterable.py
|
{
"start": 214,
"end": 1203
}
|
class ____(BasePydanticReader):
"""
String Iterable Reader.
Gets a list of documents, given an iterable (e.g. list) of strings.
Example:
.. code-block:: python
from llama_index.core.legacy import StringIterableReader, TreeIndex
documents = StringIterableReader().load_data(
texts=["I went to the store", "I bought an apple"]
)
index = TreeIndex.from_documents(documents)
query_engine = index.as_query_engine()
query_engine.query("what did I buy?")
# response should be something like "You bought an apple."
"""
is_remote: bool = False
@classmethod
def class_name(cls) -> str:
return "StringIterableReader"
def load_data(self, texts: List[str]) -> List[Document]:
"""Load the data."""
results = []
for text in texts:
results.append(Document(text=text))
return results
|
StringIterableReader
|
python
|
apache__thrift
|
test/py/TestClient.py
|
{
"start": 15197,
"end": 15378
}
|
class ____(MultiplexedOptionalTest):
def get_protocol(self, transport):
return make_pedantic(TCompactProtocol.TCompactProtocolFactory().getProtocol(transport))
|
CompactTest
|
python
|
tensorflow__tensorflow
|
tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py
|
{
"start": 20906,
"end": 21769
}
|
class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testL2Loss(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant(
[1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x", dtype=dtype
)
l2loss = nn_ops.l2_loss(x)
value = self.evaluate(l2loss)
self.assertAllClose(7.0, value)
@test_util.run_deprecated_v1
def testGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
with self.cached_session():
x = constant_op.constant(x_val, name="x")
output = nn_ops.l2_loss(x)
err = gradient_checker.compute_gradient_error(x, x_shape, output, [1])
print("L2Loss gradient err = %g " % err)
err_tolerance = 1e-10
self.assertLess(err, err_tolerance)
|
L2LossTest
|
python
|
pytorch__pytorch
|
torch/_dynamo/source.py
|
{
"start": 36551,
"end": 37025
}
|
class ____(ChainedSource):
def name(self) -> str:
return f"___from_numpy({self.base.name()})"
def guard_source(self) -> GuardSource:
return self.base.guard_source()
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.add_push_null(lambda: codegen.load_import_from("torch", "as_tensor"))
codegen(self.base)
codegen.extend_output(create_call_function(1, False))
@dataclasses.dataclass(frozen=True)
|
NumpyTensorSource
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/test_engines.py
|
{
"start": 695,
"end": 1716
}
|
class ____:
@pytest.mark.parametrize(
"scalar",
[
pd.Timedelta(pd.Timestamp("2016-01-01").asm8.view("m8[ns]")),
pd.Timestamp("2016-01-01")._value,
pd.Timestamp("2016-01-01").to_pydatetime(),
pd.Timestamp("2016-01-01").to_datetime64(),
],
)
def test_not_contains_requires_timestamp(self, scalar):
dti1 = pd.date_range("2016-01-01", periods=3)
dti2 = dti1.insert(1, pd.NaT) # non-monotonic
dti3 = dti1.insert(3, dti1[0]) # non-unique
dti4 = pd.date_range("2016-01-01", freq="ns", periods=2_000_000)
dti5 = dti4.insert(0, dti4[0]) # over size threshold, not unique
msg = "|".join([re.escape(str(scalar)), re.escape(repr(scalar))])
for dti in [dti1, dti2, dti3, dti4, dti5]:
with pytest.raises(TypeError, match=msg):
scalar in dti._engine
with pytest.raises(KeyError, match=msg):
dti._engine.get_loc(scalar)
|
TestDatetimeEngine
|
python
|
lepture__authlib
|
authlib/integrations/base_client/sync_app.py
|
{
"start": 5916,
"end": 9729
}
|
class ____:
client_cls = None
def __init__(
self,
framework,
name=None,
fetch_token=None,
update_token=None,
client_id=None,
client_secret=None,
access_token_url=None,
access_token_params=None,
authorize_url=None,
authorize_params=None,
api_base_url=None,
client_kwargs=None,
server_metadata_url=None,
compliance_fix=None,
client_auth_methods=None,
user_agent=None,
**kwargs,
):
self.framework = framework
self.name = name
self.client_id = client_id
self.client_secret = client_secret
self.access_token_url = access_token_url
self.access_token_params = access_token_params
self.authorize_url = authorize_url
self.authorize_params = authorize_params
self.api_base_url = api_base_url
self.client_kwargs = client_kwargs or {}
self.compliance_fix = compliance_fix
self.client_auth_methods = client_auth_methods
self._fetch_token = fetch_token
self._update_token = update_token
self._user_agent = user_agent or default_user_agent
self._server_metadata_url = server_metadata_url
self.server_metadata = kwargs
def _on_update_token(self, token, refresh_token=None, access_token=None):
raise NotImplementedError()
def _get_oauth_client(self, **metadata):
client_kwargs = {}
client_kwargs.update(self.client_kwargs)
client_kwargs.update(metadata)
if self.authorize_url:
client_kwargs["authorization_endpoint"] = self.authorize_url
if self.access_token_url:
client_kwargs["token_endpoint"] = self.access_token_url
session = self.client_cls(
client_id=self.client_id,
client_secret=self.client_secret,
update_token=self._on_update_token,
**client_kwargs,
)
if self.client_auth_methods:
for f in self.client_auth_methods:
session.register_client_auth_method(f)
if self.compliance_fix:
self.compliance_fix(session)
session.headers["User-Agent"] = self._user_agent
return session
@staticmethod
def _format_state_params(state_data, params):
if state_data is None:
raise MismatchingStateError()
code_verifier = state_data.get("code_verifier")
if code_verifier:
params["code_verifier"] = code_verifier
redirect_uri = state_data.get("redirect_uri")
if redirect_uri:
params["redirect_uri"] = redirect_uri
return params
@staticmethod
def _create_oauth2_authorization_url(client, authorization_endpoint, **kwargs):
rv = {}
if client.code_challenge_method:
code_verifier = kwargs.get("code_verifier")
if not code_verifier:
code_verifier = generate_token(48)
kwargs["code_verifier"] = code_verifier
rv["code_verifier"] = code_verifier
log.debug(f"Using code_verifier: {code_verifier!r}")
scope = kwargs.get("scope", client.scope)
scope = (
(scope if isinstance(scope, (list, tuple)) else scope.split())
if scope
else None
)
if scope and "openid" in scope:
# this is an OpenID Connect service
nonce = kwargs.get("nonce")
if not nonce:
nonce = generate_token(20)
kwargs["nonce"] = nonce
rv["nonce"] = nonce
url, state = client.create_authorization_url(authorization_endpoint, **kwargs)
rv["url"] = url
rv["state"] = state
return rv
|
OAuth2Base
|
python
|
scipy__scipy
|
scipy/signal/tests/test_signaltools.py
|
{
"start": 96316,
"end": 101878
}
|
class ____:
def _setup_rank1(self, dt, xp):
a = xp.linspace(0, 3, 4, dtype=dt)
b = xp.linspace(1, 2, 2, dtype=dt)
y_r = xp.asarray([0, 2, 5, 8, 3], dtype=dt)
return a, b, y_r
def equal_tolerance(self, res_dt):
# default value of keyword
decimal = 6
try:
dt_info = np.finfo(res_dt)
if hasattr(dt_info, 'resolution'):
decimal = int(-0.5*np.log10(dt_info.resolution))
except Exception:
pass
return decimal
def equal_tolerance_fft(self, res_dt):
# FFT implementations convert longdouble arguments down to
# double so don't expect better precision, see gh-9520
if res_dt == np.longdouble:
return self.equal_tolerance(np.float64)
else:
return self.equal_tolerance(res_dt)
@skip_xp_backends(np_only=True, reason="order='F'")
def test_method(self, dt, xp):
dt = getattr(xp, dt)
a, b, y_r = self._setup_rank3(dt, xp)
y_fft = correlate(a, b, method='fft')
y_direct = correlate(a, b, method='direct')
assert_array_almost_equal(y_r, y_fft,
decimal=self.equal_tolerance_fft(y_fft.dtype),)
assert_array_almost_equal(y_r, y_direct,
decimal=self.equal_tolerance(y_direct.dtype),)
assert y_fft.dtype == dt
assert y_direct.dtype == dt
def test_rank1_valid(self, dt, xp):
if is_torch(xp) and dt in ["uint16", "uint32", "uint64"]:
pytest.skip("torch does not support unsigned ints")
dt = getattr(xp, dt) if isinstance(dt, str) else dt
a, b, y_r = self._setup_rank1(dt, xp)
y = correlate(a, b, 'valid')
assert_array_almost_equal(y, y_r[1:4])
assert y.dtype == dt
# See gh-5897
y = correlate(b, a, 'valid')
assert_array_almost_equal(y, xp.flip(y_r[1:4]))
assert y.dtype == dt
def test_rank1_same(self, dt, xp):
if is_torch(xp) and dt in ["uint16", "uint32", "uint64"]:
pytest.skip("torch does not support unsigned ints")
dt = getattr(xp, dt) if isinstance(dt, str) else dt
a, b, y_r = self._setup_rank1(dt, xp)
y = correlate(a, b, 'same')
assert_array_almost_equal(y, y_r[:-1])
assert y.dtype == dt
def test_rank1_full(self, dt, xp):
if is_torch(xp) and dt in ["uint16", "uint32", "uint64"]:
pytest.skip("torch does not support unsigned ints")
dt = getattr(xp, dt) if isinstance(dt, str) else dt
a, b, y_r = self._setup_rank1(dt, xp)
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r)
assert y.dtype == dt
def _setup_rank3(self, dt, xp):
a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype(
dt)
b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype(
dt)
y_r = np.array([[[0., 184., 504., 912., 1360., 888., 472., 160.],
[46., 432., 1062., 1840., 2672., 1698., 864., 266.],
[134., 736., 1662., 2768., 3920., 2418., 1168., 314.],
[260., 952., 1932., 3056., 4208., 2580., 1240., 332.],
[202., 664., 1290., 1984., 2688., 1590., 712., 150.],
[114., 344., 642., 960., 1280., 726., 296., 38.]],
[[23., 400., 1035., 1832., 2696., 1737., 904., 293.],
[134., 920., 2166., 3680., 5280., 3306., 1640., 474.],
[325., 1544., 3369., 5512., 7720., 4683., 2192., 535.],
[571., 1964., 3891., 6064., 8272., 4989., 2324., 565.],
[434., 1360., 2586., 3920., 5264., 3054., 1312., 230.],
[241., 700., 1281., 1888., 2496., 1383., 532., 39.]],
[[22., 214., 528., 916., 1332., 846., 430., 132.],
[86., 484., 1098., 1832., 2600., 1602., 772., 206.],
[188., 802., 1698., 2732., 3788., 2256., 1018., 218.],
[308., 1006., 1950., 2996., 4052., 2400., 1078., 230.],
[230., 692., 1290., 1928., 2568., 1458., 596., 78.],
[126., 354., 636., 924., 1212., 654., 234., 0.]]],
dtype=np.float64).astype(dt)
return a, b, y_r
@skip_xp_backends(np_only=True, reason="order='F'")
def test_rank3_valid(self, dt, xp):
dt = getattr(xp, dt) if isinstance(dt, str) else dt
a, b, y_r = self._setup_rank3(dt, xp)
y = correlate(a, b, "valid")
assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5])
assert y.dtype == dt
# See gh-5897
y = correlate(b, a, "valid")
assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1])
assert y.dtype == dt
@skip_xp_backends(np_only=True, reason="order='F'")
def test_rank3_same(self, dt, xp):
dt = getattr(xp, dt) if isinstance(dt, str) else dt
a, b, y_r = self._setup_rank3(dt, xp)
y = correlate(a, b, "same")
xp_assert_close(y, y_r[0:-1, 1:-1, 1:-2])
assert y.dtype == dt
@skip_xp_backends(np_only=True, reason="order='F'")
def test_rank3_all(self, dt, xp):
dt = getattr(xp, dt) if isinstance(dt, str) else dt
a, b, y_r = self._setup_rank3(dt, xp)
y = correlate(a, b)
xp_assert_close(y, y_r)
assert y.dtype == dt
@make_xp_test_case(correlate)
|
TestCorrelateReal
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_U.py
|
{
"start": 1090,
"end": 2587
}
|
class ____(Benchmark):
r"""
Ursem 3 objective function.
This class defines the Ursem 3 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Ursem03}}(x) = - \sin(2.2 \pi x_1 + 0.5 \pi)
\frac{2 - \lvert x_1 \rvert}{2}
\frac{3 - \lvert x_1 \rvert}{2}
- \sin(2.2 \pi x_2 + 0.5 \pi)
\frac{2 - \lvert x_2 \rvert}{2}
\frac{3 - \lvert x_2 \rvert}{2}
with :math:`x_1 \in [-2, 2]`, :math:`x_2 \in [-1.5, 1.5]`.
*Global optimum*: :math:`f(x) = -3` for :math:`x = [0, 0]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO Gavana and Jamil #157 disagree on the formulae here. Jamil squares the
x[1] term in the sine expression. Gavana doesn't. Go with Gavana here.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-2, 2), (-1.5, 1.5)]
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = -3.0
def fun(self, x, *args):
self.nfev += 1
u = -(sin(2.2 * pi * x[0] + 0.5 * pi)
* ((2.0 - abs(x[0])) / 2.0) * ((3.0 - abs(x[0])) / 2))
v = -(sin(2.2 * pi * x[1] + 0.5 * pi)
* ((2.0 - abs(x[1])) / 2) * ((3.0 - abs(x[1])) / 2))
return u + v
|
Ursem03
|
python
|
spack__spack
|
lib/spack/spack/repo.py
|
{
"start": 70061,
"end": 70741
}
|
class ____(RepoDescriptor):
"""A descriptor for a broken repository, used to indicate errors in the configuration that
aren't fatal untill the repository is used."""
def __init__(self, name: Optional[str], error: str) -> None:
super().__init__(name)
self.error = error
def initialize(
self, fetch: bool = True, git: Optional[spack.util.executable.Executable] = None
) -> None:
pass
def construct(
self, cache: spack.util.file_cache.FileCache, overrides: Optional[Dict[str, Any]] = None
) -> Dict[str, Union[Repo, Exception]]:
return {self.name or "<unknown>": Exception(self.error)}
|
BrokenRepoDescriptor
|
python
|
pytorch__pytorch
|
torch/ao/nn/quantized/reference/modules/conv.py
|
{
"start": 3480,
"end": 5176
}
|
class ____(_ConvNd, nn.Conv2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
device=None,
dtype=None,
weight_qparams: dict[str, Any] | None = None,
):
nn.Conv2d.__init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
# pyrefly: ignore [bad-argument-type]
padding_mode,
device,
dtype,
)
self._init_weight_qparams(weight_qparams, device)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
we have:
w(float) -- quant - dequant \
x(float) ------------- F.conv2d ---
In the full model, we will see
w(float) -- quant - *dequant \
x -- quant --- *dequant -- *F.conv2d --- *quant - dequant
and the backend should be able to fuse the ops with `*` into a quantized conv2d
"""
weight_quant_dequant = self.get_weight()
result = F.conv2d(
x,
weight_quant_dequant,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
return result
def _get_name(self):
return "QuantizedConv2d(Reference)"
@classmethod
def from_float(cls, float_conv, weight_qparams): # type: ignore[override]
return _ConvNd.from_float(cls, float_conv, weight_qparams)
|
Conv2d
|
python
|
getsentry__sentry
|
src/sentry/mail/forms/assigned_to.py
|
{
"start": 161,
"end": 405
}
|
class ____(MemberTeamForm[AssigneeTargetType]):
targetType = forms.ChoiceField(choices=ASSIGNEE_CHOICES)
teamValue = AssigneeTargetType.TEAM
memberValue = AssigneeTargetType.MEMBER
targetTypeEnum = AssigneeTargetType
|
AssignedToForm
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/test_associationproxy.py
|
{
"start": 43352,
"end": 47493
}
|
class ____(fixtures.MappedTest):
run_setup_mappers = "each"
run_setup_classes = "each"
@classmethod
def define_tables(cls, metadata):
Table(
"parents",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30)),
)
Table(
"children",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("parent_id", Integer, ForeignKey("parents.id")),
Column("name", String(30)),
)
@classmethod
def insert_data(cls, connection):
parents = cls.tables.parents
connection.execute(parents.insert(), dict(name="p1"))
@classmethod
def setup_classes(cls):
Parent.kids = association_proxy("children", "name")
def test_weak_identity_map(self):
self.mapper_registry.map_imperatively(
Parent,
self.tables.parents,
properties=dict(children=relationship(Child)),
)
self.mapper_registry.map_imperatively(Child, self.tables.children)
session = fixture_session()
def add_child(parent_name, child_name):
parent = session.query(Parent).filter_by(name=parent_name).one()
parent.kids.append(child_name)
add_child("p1", "c1")
gc_collect()
add_child("p1", "c2")
session.flush()
p = session.query(Parent).filter_by(name="p1").one()
assert set(p.kids) == {"c1", "c2"}, p.kids
def test_copy(self):
self.mapper_registry.map_imperatively(
Parent,
self.tables.parents,
properties=dict(children=relationship(Child)),
)
self.mapper_registry.map_imperatively(Child, self.tables.children)
p = Parent("p1")
p.kids.extend(["c1", "c2"])
p_copy = copy.copy(p)
del p
gc_collect()
assert set(p_copy.kids) == {"c1", "c2"}, p_copy.kids
def test_pickle_list(self):
self.mapper_registry.map_imperatively(
Parent,
self.tables.parents,
properties=dict(children=relationship(Child)),
)
self.mapper_registry.map_imperatively(Child, self.tables.children)
p = Parent("p1")
p.kids.extend(["c1", "c2"])
r1 = pickle.loads(pickle.dumps(p))
assert r1.kids == ["c1", "c2"]
# can't do this without parent having a cycle
# r2 = pickle.loads(pickle.dumps(p.kids))
# assert r2 == ['c1', 'c2']
def test_pickle_set(self):
self.mapper_registry.map_imperatively(
Parent,
self.tables.parents,
properties=dict(
children=relationship(Child, collection_class=set)
),
)
self.mapper_registry.map_imperatively(Child, self.tables.children)
p = Parent("p1")
p.kids.update(["c1", "c2"])
r1 = pickle.loads(pickle.dumps(p))
assert r1.kids == {"c1", "c2"}
# can't do this without parent having a cycle
# r2 = pickle.loads(pickle.dumps(p.kids))
# assert r2 == set(['c1', 'c2'])
def test_pickle_dict(self):
self.mapper_registry.map_imperatively(
Parent,
self.tables.parents,
properties=dict(
children=relationship(
KVChild,
collection_class=collections.keyfunc_mapping(
PickleKeyFunc("name")
),
)
),
)
self.mapper_registry.map_imperatively(KVChild, self.tables.children)
p = Parent("p1")
p.kids.update({"c1": "v1", "c2": "v2"})
assert p.kids == {"c1": "c1", "c2": "c2"}
r1 = pickle.loads(pickle.dumps(p))
assert r1.kids == {"c1": "c1", "c2": "c2"}
# can't do this without parent having a cycle
# r2 = pickle.loads(pickle.dumps(p.kids))
# assert r2 == {'c1': 'c1', 'c2': 'c2'}
|
ReconstitutionTest
|
python
|
django__django
|
tests/cache/closeable_cache.py
|
{
"start": 60,
"end": 162
}
|
class ____:
closed = False
def close(self, **kwargs):
self.closed = True
|
CloseHookMixin
|
python
|
pypa__warehouse
|
warehouse/accounts/models.py
|
{
"start": 15120,
"end": 16536
}
|
class ____(db.Model):
__tablename__ = "user_unique_logins"
__table_args__ = (
UniqueConstraint(
"user_id", "ip_address", name="_user_unique_logins_user_id_ip_address_uc"
),
Index(
"user_unique_logins_user_id_ip_address_idx",
"user_id",
"ip_address",
unique=True,
),
)
user_id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True),
ForeignKey("users.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
index=True,
)
user: Mapped[User] = orm.relationship(back_populates="unique_logins")
ip_address: Mapped[str] = mapped_column(String, nullable=False)
created: Mapped[datetime_now]
last_used: Mapped[datetime_now]
device_information: Mapped[dict | None] = mapped_column(JSONB, nullable=True)
status: Mapped[UniqueLoginStatus] = mapped_column(
Enum(UniqueLoginStatus, values_callable=lambda x: [e.value for e in x]),
nullable=False,
default=UniqueLoginStatus.PENDING,
server_default=UniqueLoginStatus.PENDING.value,
)
expires: Mapped[datetime.datetime | None] = mapped_column(TZDateTime)
def __repr__(self):
return (
f"<UserUniqueLogin(user={self.user.username!r}, "
f"ip_address={self.ip_address!r}, "
f"status={self.status!r})>"
)
|
UserUniqueLogin
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_inspect.py
|
{
"start": 315,
"end": 1428
}
|
class ____(fixtures.TestBase):
def test_table(self):
t = Table("t", MetaData(), Column("x", Integer))
is_(inspect(t), t)
assert t.is_selectable
is_(t.selectable, t)
def test_select(self):
t = Table("t", MetaData(), Column("x", Integer))
s = t.select()
is_(inspect(s), s)
assert s.is_selectable
is_(s.selectable, s)
def test_column_expr(self):
c = Column("x", Integer)
is_(inspect(c), c)
assert not c.is_selectable
assert not hasattr(c, "selectable")
def test_no_clause_element_on_clauseelement(self):
# re [ticket:3802], there are in the wild examples
# of looping over __clause_element__, therefore the
# absence of __clause_element__ as a test for "this is the clause
# element" must be maintained
class Foo(ClauseElement):
pass
assert not hasattr(Foo(), "__clause_element__")
def test_col_now_has_a_clauseelement(self):
x = Column("foo", Integer)
assert hasattr(x, "__clause_element__")
|
TestCoreInspection
|
python
|
explosion__spaCy
|
spacy/lang/bn/__init__.py
|
{
"start": 325,
"end": 540
}
|
class ____(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
suffixes = TOKENIZER_SUFFIXES
infixes = TOKENIZER_INFIXES
stop_words = STOP_WORDS
|
BengaliDefaults
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pylint/return_in_init.py
|
{
"start": 187,
"end": 245
}
|
class ____:
def __init__(self):
return 1
|
MyClass
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-critical-and-pseudo-critical-edges-in-minimum-spanning-tree.py
|
{
"start": 538,
"end": 1673
}
|
class ____(object):
def findCriticalAndPseudoCriticalEdges(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[List[int]]
"""
def MST(n, edges, unused=None, used=None):
union_find = UnionFind(n)
weight = 0
if used is not None:
u, v, w, _ = edges[used]
if union_find.union_set(u, v):
weight += w
for i, (u, v, w, _) in enumerate(edges):
if i == unused:
continue
if union_find.union_set(u, v):
weight += w
return weight if union_find.count == 1 else float("inf")
for i, edge in enumerate(edges):
edge.append(i)
edges.sort(key=lambda x: x[2])
mst = MST(n, edges)
result = [[], []]
for i, edge in enumerate(edges):
if mst < MST(n, edges, unused=i):
result[0].append(edge[3])
elif mst == MST(n, edges, used=i):
result[1].append(edge[3])
return result
|
Solution
|
python
|
apache__airflow
|
providers/redis/src/airflow/providers/redis/hooks/redis.py
|
{
"start": 1060,
"end": 5348
}
|
class ____(BaseHook):
"""
Wrapper for connection to interact with Redis in-memory data structure store.
You can set your db in the extra field of your connection as ``{"db": 3}``.
Also you can set ssl parameters as:
``{"ssl": true, "ssl_cert_reqs": "require", "ssl_certfile": "/path/to/cert.pem", etc}``.
"""
conn_name_attr = "redis_conn_id"
default_conn_name = "redis_default"
conn_type = "redis"
hook_name = "Redis"
def __init__(self, redis_conn_id: str = default_conn_name, **kwargs) -> None:
"""
Prepare hook to connect to a Redis database.
:param conn_id: the name of the connection that has the parameters
we need to connect to Redis.
"""
super().__init__()
self.redis_conn_id = redis_conn_id
self.redis = None
self.host = kwargs.get("host", None)
self.port = kwargs.get("port", None)
self.username = kwargs.get("username", None)
self.password = kwargs.get("password", None)
self.db = kwargs.get("db", None)
def get_conn(self):
"""Return a Redis connection."""
conn = self.get_connection(self.redis_conn_id)
self.host = conn.host
self.port = conn.port
self.username = conn.login
self.password = None if str(conn.password).lower() in ["none", "false", ""] else conn.password
self.db = conn.extra_dejson.get("db")
# check for ssl parameters in conn.extra
ssl_arg_names = [
"ssl",
"ssl_cert_reqs",
"ssl_ca_certs",
"ssl_keyfile",
"ssl_certfile",
"ssl_check_hostname",
]
ssl_args = {name: val for name, val in conn.extra_dejson.items() if name in ssl_arg_names}
if not self.redis:
self.log.debug(
'Initializing redis object for conn_id "%s" on %s:%s:%s',
self.redis_conn_id,
self.host,
self.port,
self.db,
)
self.redis = Redis(
host=self.host,
port=self.port,
username=self.username,
password=self.password,
db=self.db,
**ssl_args,
)
return self.redis
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom UI field behaviour for Redis connection."""
return {
"hidden_fields": ["schema", "extra"],
"relabeling": {},
}
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to Redis connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import BooleanField, IntegerField, StringField
from wtforms.validators import Optional, any_of
return {
"db": IntegerField(lazy_gettext("DB"), widget=BS3TextFieldWidget(), default=0),
"ssl": BooleanField(lazy_gettext("Enable SSL"), default=False),
"ssl_cert_reqs": StringField(
lazy_gettext("SSL verify mode"),
validators=[any_of(ALLOWED_SSL_CERT_REQS)],
widget=BS3TextFieldWidget(),
description=f"Must be one of: {', '.join(ALLOWED_SSL_CERT_REQS)}.",
default=DEFAULT_SSL_CERT_REQS,
),
"ssl_ca_certs": StringField(
lazy_gettext("CA certificate path"),
widget=BS3TextFieldWidget(),
validators=[Optional()],
default=None,
),
"ssl_keyfile": StringField(
lazy_gettext("Private key path"),
widget=BS3TextFieldWidget(),
validators=[Optional()],
default=None,
),
"ssl_certfile": StringField(
lazy_gettext("Certificate path"),
widget=BS3TextFieldWidget(),
validators=[Optional()],
default=None,
),
"ssl_check_hostname": BooleanField(lazy_gettext("Enable hostname check"), default=False),
}
|
RedisHook
|
python
|
walkccc__LeetCode
|
solutions/563. Binary Tree Tilt/563.py
|
{
"start": 0,
"end": 320
}
|
class ____:
def findTilt(self, root: TreeNode | None) -> int:
ans = 0
def summ(root: TreeNode | None) -> None:
nonlocal ans
if not root:
return 0
l = summ(root.left)
r = summ(root.right)
ans += abs(l - r)
return root.val + l + r
summ(root)
return ans
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/1183. Maximum Number of Ones/1183-2.py
|
{
"start": 0,
"end": 459
}
|
class ____:
def maximumNumberOfOnes(
self,
width: int,
height: int,
sideLength: int,
maxOnes: int,
) -> int:
subCount = []
def getCount(length: int, index: int) -> int:
return (length - index - 1) // sideLength + 1
for i in range(sideLength):
for j in range(sideLength):
subCount.append(getCount(width, i) * getCount(height, j))
return sum(sorted(subCount, reverse=True)[:maxOnes])
|
Solution
|
python
|
cherrypy__cherrypy
|
cherrypy/lib/covercp.py
|
{
"start": 8207,
"end": 12287
}
|
class ____(object):
"""HTTP handler for the coverage stats."""
def __init__(self, coverage, root=None):
"""Initialize the coverage stats application."""
self.coverage = coverage
if root is None:
# Guess initial depth. Files outside this path will not be
# reachable from the web interface.
root = os.path.dirname(cherrypy.__file__)
self.root = root
@cherrypy.expose
def index(self):
"""Render the coverage stats index page."""
return TEMPLATE_FRAMESET % self.root.lower()
@cherrypy.expose
def menu(
self,
base='/',
pct='50',
showpct='',
exclude=r'python\d\.\d|test|tut\d|tutorial',
):
"""Render HTML menu web page."""
# The coverage module uses all-lower-case names.
base = base.lower().rstrip(os.sep)
yield TEMPLATE_MENU
yield TEMPLATE_FORM % locals()
# Start by showing links for parent paths
yield "<div id='crumbs'>"
path = ''
atoms = base.split(os.sep)
atoms.pop()
for atom in atoms:
path += atom + os.sep
yield (
"<a href='menu?base=%s&exclude=%s'>%s</a> %s"
% (path, urllib.parse.quote_plus(exclude), atom, os.sep)
)
yield '</div>'
yield "<div id='tree'>"
# Then display the tree
tree = get_tree(base, exclude, self.coverage)
if not tree:
yield '<p>No modules covered.</p>'
else:
for chunk in _show_branch(
tree,
base,
'/',
pct,
showpct == 'checked',
exclude,
coverage=self.coverage,
):
yield chunk
yield '</div>'
yield '</body></html>'
def annotated_file(self, filename, statements, excluded, missing):
"""Annotate given file with coverage information."""
with open(filename, 'r') as source:
lines = source.readlines()
buffer = []
for lineno, line in enumerate(lines):
lineno += 1
line = line.strip('\n\r')
empty_the_buffer = True
if lineno in excluded:
template = TEMPLATE_LOC_EXCLUDED
elif lineno in missing:
template = TEMPLATE_LOC_NOT_COVERED
elif lineno in statements:
template = TEMPLATE_LOC_COVERED
else:
empty_the_buffer = False
buffer.append((lineno, line))
if empty_the_buffer:
for lno, pastline in buffer:
yield template % (lno, html.escape(pastline))
buffer = []
yield template % (lineno, html.escape(line))
@cherrypy.expose
def report(self, name):
"""Render coverage stats as HTML."""
filename, statements, excluded, missing, _ = self.coverage.analysis2(
name,
)
pc = _percent(statements, missing)
yield TEMPLATE_COVERAGE % dict(
name=os.path.basename(name),
fullpath=name,
pc=pc,
)
yield '<table>\n'
for line in self.annotated_file(
filename,
statements,
excluded,
missing,
):
yield line
yield '</table>'
yield '</body>'
yield '</html>'
def serve(path=localFile, port=8080, root=None):
"""Serve the coverage app over HTTP."""
if coverage is None:
raise ImportError('The coverage module could not be imported.')
from coverage import coverage
cov = coverage(data_file=path)
cov.load()
cherrypy.config.update(
{
'server.socket_port': int(port),
'server.thread_pool': 10,
'environment': 'production',
},
)
cherrypy.quickstart(CoverStats(cov, root))
if __name__ == '__main__':
serve(*tuple(sys.argv[1:]))
|
CoverStats
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/cpp/_ast.py
|
{
"start": 148100,
"end": 150450
}
|
class ____(ASTBase):
def __init__(
self, concept: ASTNestedName, params: list[ASTTemplateIntroductionParameter]
) -> None:
assert len(params) > 0
self.concept = concept
self.params = params
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTTemplateIntroduction):
return NotImplemented
return self.concept == other.concept and self.params == other.params
def __hash__(self) -> int:
return hash((self.concept, self.params))
def get_id(self, version: int) -> str:
assert version >= 2
return ''.join([
# first do the same as a normal template parameter list
'I',
*(param.get_id(version) for param in self.params),
'E',
# let's use X expr E, which is otherwise for constant template args
'X',
self.concept.get_id(version),
'I',
*(param.get_id_as_arg(version) for param in self.params),
'E',
'E',
])
def _stringify(self, transform: StringifyTransform) -> str:
res: list[str] = []
res.extend((
transform(self.concept),
'{',
', '.join(transform(param) for param in self.params),
'} ',
))
return ''.join(res)
def describe_signature_as_introducer(
self,
parentNode: desc_signature,
mode: str,
env: BuildEnvironment,
symbol: Symbol,
lineSpec: bool,
) -> None:
# Note: 'lineSpec' has no effect on template introductions.
signode = addnodes.desc_signature_line()
parentNode += signode
signode.sphinx_line_type = 'templateIntroduction'
self.concept.describe_signature(signode, 'markType', env, symbol)
signode += addnodes.desc_sig_punctuation('{', '{')
first = True
for param in self.params:
if not first:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_space()
first = False
param.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation('}', '}')
################################################################################
|
ASTTemplateIntroduction
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_moe/modular_qwen3_moe.py
|
{
"start": 2820,
"end": 3094
}
|
class ____(MixtralPreTrainedModel):
_can_record_outputs = {
"router_logits": OutputRecorder(Qwen3MoeTopKRouter, layer_name="mlp.router", index=0),
"hidden_states": Qwen3MoeDecoderLayer,
"attentions": Qwen3MoeAttention,
}
|
Qwen3MoePreTrainedModel
|
python
|
kamyu104__LeetCode-Solutions
|
Python/split-concatenated-strings.py
|
{
"start": 31,
"end": 658
}
|
class ____(object):
def splitLoopedString(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
tmp = []
for s in strs:
tmp += max(s, s[::-1])
s = "".join(tmp)
result, st = "a", 0
for i in xrange(len(strs)):
body = "".join([s[st + len(strs[i]):], s[0:st]])
for p in strs[i], strs[i][::-1]:
for j in xrange(len(strs[i])):
if p[j] >= result[0]:
result = max(result, "".join([p[j:], body, p[:j]]))
st += len(strs[i])
return result
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/status_checks/base.py
|
{
"start": 1831,
"end": 2022
}
|
class ____:
def check(self) -> list[Problem]:
"""
Perform required checks and return a list of ``Problem`` instances.
"""
raise NotImplementedError
|
StatusCheck
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/vertex_ai/test_experiment_service.py
|
{
"start": 4839,
"end": 5681
}
|
class ____:
@mock.patch(VERTEX_AI_PATH.format("ExperimentRunHook"))
def test_execute(self, mock_hook):
op = ListExperimentRunsOperator(
task_id=TASK_ID,
project_id=GCP_PROJECT,
location=GCP_LOCATION,
experiment_name=TEST_EXPERIMENT_NAME,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context={"ti": mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.list_experiment_runs.assert_called_once_with(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
experiment_name=TEST_EXPERIMENT_NAME,
)
|
TestVertexAIListExperimentRunsOperator
|
python
|
kamyu104__LeetCode-Solutions
|
Python/number-of-distinct-islands.py
|
{
"start": 37,
"end": 918
}
|
class ____(object):
def numDistinctIslands(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
directions = {'l':[-1, 0], 'r':[ 1, 0], \
'u':[ 0, 1], 'd':[ 0, -1]}
def dfs(i, j, grid, island):
if not (0 <= i < len(grid) and \
0 <= j < len(grid[0]) and \
grid[i][j] > 0):
return False
grid[i][j] *= -1
for k, v in directions.iteritems():
island.append(k)
dfs(i+v[0], j+v[1], grid, island)
return True
islands = set()
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
island = []
if dfs(i, j, grid, island):
islands.add("".join(island))
return len(islands)
|
Solution
|
python
|
coleifer__peewee
|
tests/cockroachdb.py
|
{
"start": 517,
"end": 569
}
|
class ____(TestModel):
data = JSONField()
|
JsonModel
|
python
|
openai__openai-python
|
src/openai/types/vector_store_update_params.py
|
{
"start": 313,
"end": 920
}
|
class ____(TypedDict, total=False):
expires_after: Optional[ExpiresAfter]
"""The expiration policy for a vector store."""
metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
name: Optional[str]
"""The name of the vector store."""
|
VectorStoreUpdateParams
|
python
|
doocs__leetcode
|
solution/0000-0099/0005.Longest Palindromic Substring/Solution2.py
|
{
"start": 0,
"end": 479
}
|
class ____:
def longestPalindrome(self, s: str) -> str:
def f(l, r):
while l >= 0 and r < n and s[l] == s[r]:
l, r = l - 1, r + 1
return r - l - 1
n = len(s)
start, mx = 0, 1
for i in range(n):
a = f(i, i)
b = f(i, i + 1)
t = max(a, b)
if mx < t:
mx = t
start = i - ((t - 1) >> 1)
return s[start : start + mx]
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/ops/batching.py
|
{
"start": 12914,
"end": 14358
}
|
class ____(dataset_ops.UnaryDataset):
"""A `Dataset` that batches ragged dense elements into `tf.sparse.SparseTensor`s."""
def __init__(self, input_dataset, batch_size, row_shape):
"""See `Dataset.dense_to_sparse_batch()` for more details."""
if not isinstance(
dataset_ops.get_legacy_output_types(input_dataset), dtypes.DType):
raise TypeError("`dense_to_sparse_batch` requires an input dataset whose "
"elements have a single component, but the given dataset "
"has the following component types: "
f"{dataset_ops.get_legacy_output_types(input_dataset)}.")
self._input_dataset = input_dataset
self._batch_size = batch_size
self._row_shape = row_shape
self._element_spec = sparse_tensor.SparseTensorSpec(
tensor_shape.TensorShape([None]).concatenate(self._row_shape),
dataset_ops.get_legacy_output_types(input_dataset))
variant_tensor = ged_ops.dense_to_sparse_batch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._batch_size,
row_shape=convert.partial_shape_to_tensor(self._row_shape),
**self._flat_structure)
super(_DenseToSparseBatchDataset, self).__init__(input_dataset,
variant_tensor)
@property
def element_spec(self):
return self._element_spec
|
_DenseToSparseBatchDataset
|
python
|
scikit-learn__scikit-learn
|
examples/calibration/plot_calibration_curve.py
|
{
"start": 6841,
"end": 11519
}
|
class ____(LinearSVC):
"""LinearSVC with `predict_proba` method that naively scales
`decision_function` output for binary classification."""
def fit(self, X, y):
super().fit(X, y)
df = self.decision_function(X)
self.df_min_ = df.min()
self.df_max_ = df.max()
def predict_proba(self, X):
"""Min-max scale output of `decision_function` to [0, 1]."""
df = self.decision_function(X)
calibrated_df = (df - self.df_min_) / (self.df_max_ - self.df_min_)
proba_pos_class = np.clip(calibrated_df, 0, 1)
proba_neg_class = 1 - proba_pos_class
proba = np.c_[proba_neg_class, proba_pos_class]
return proba
# %%
lr = LogisticRegression(C=1.0)
svc = NaivelyCalibratedLinearSVC(max_iter=10_000)
svc_isotonic = CalibratedClassifierCV(svc, cv=2, method="isotonic")
svc_sigmoid = CalibratedClassifierCV(svc, cv=2, method="sigmoid")
clf_list = [
(lr, "Logistic"),
(svc, "SVC"),
(svc_isotonic, "SVC + Isotonic"),
(svc_sigmoid, "SVC + Sigmoid"),
]
# %%
fig = plt.figure(figsize=(10, 10))
gs = GridSpec(4, 2)
ax_calibration_curve = fig.add_subplot(gs[:2, :2])
calibration_displays = {}
for i, (clf, name) in enumerate(clf_list):
clf.fit(X_train, y_train)
display = CalibrationDisplay.from_estimator(
clf,
X_test,
y_test,
n_bins=10,
name=name,
ax=ax_calibration_curve,
color=colors(i),
)
calibration_displays[name] = display
ax_calibration_curve.grid()
ax_calibration_curve.set_title("Calibration plots (SVC)")
# Add histogram
grid_positions = [(2, 0), (2, 1), (3, 0), (3, 1)]
for i, (_, name) in enumerate(clf_list):
row, col = grid_positions[i]
ax = fig.add_subplot(gs[row, col])
ax.hist(
calibration_displays[name].y_prob,
range=(0, 1),
bins=10,
label=name,
color=colors(i),
)
ax.set(title=name, xlabel="Mean predicted probability", ylabel="Count")
plt.tight_layout()
plt.show()
# %%
# :class:`~sklearn.svm.LinearSVC` shows the opposite
# behavior to :class:`~sklearn.naive_bayes.GaussianNB`; the calibration
# curve has a sigmoid shape, which is typical for an under-confident
# classifier. In the case of :class:`~sklearn.svm.LinearSVC`, this is caused
# by the margin property of the hinge loss, which focuses on samples that are
# close to the decision boundary (support vectors). Samples that are far
# away from the decision boundary do not impact the hinge loss. It thus makes
# sense that :class:`~sklearn.svm.LinearSVC` does not try to separate samples
# in the high confidence region regions. This leads to flatter calibration
# curves near 0 and 1 and is empirically shown with a variety of datasets
# in Niculescu-Mizil & Caruana [1]_.
#
# Both kinds of calibration (sigmoid and isotonic) can fix this issue and
# yield similar results.
#
# As before, we show the :ref:`brier_score_loss`, :ref:`log_loss`,
# :ref:`precision, recall, F1 score <precision_recall_f_measure_metrics>` and
# :ref:`ROC AUC <roc_metrics>`.
scores = defaultdict(list)
for i, (clf, name) in enumerate(clf_list):
clf.fit(X_train, y_train)
y_prob = clf.predict_proba(X_test)
y_pred = clf.predict(X_test)
scores["Classifier"].append(name)
for metric in [brier_score_loss, log_loss, roc_auc_score]:
score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize()
scores[score_name].append(metric(y_test, y_prob[:, 1]))
for metric in [precision_score, recall_score, f1_score]:
score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize()
scores[score_name].append(metric(y_test, y_pred))
score_df = pd.DataFrame(scores).set_index("Classifier")
score_df.round(decimals=3)
score_df
# %%
# As with :class:`~sklearn.naive_bayes.GaussianNB` above, calibration improves
# both :ref:`brier_score_loss` and :ref:`log_loss` but does not alter the
# prediction accuracy measures (precision, recall and F1 score) much.
#
# Summary
# -------
#
# Parametric sigmoid calibration can deal with situations where the calibration
# curve of the base classifier is sigmoid (e.g., for
# :class:`~sklearn.svm.LinearSVC`) but not where it is transposed-sigmoid
# (e.g., :class:`~sklearn.naive_bayes.GaussianNB`). Non-parametric
# isotonic calibration can deal with both situations but may require more
# data to produce good results.
#
# References
# ----------
#
# .. [1] `Predicting Good Probabilities with Supervised Learning
# <https://dl.acm.org/doi/pdf/10.1145/1102351.1102430>`_,
# A. Niculescu-Mizil & R. Caruana, ICML 2005
|
NaivelyCalibratedLinearSVC
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-stripe/unit_tests/integration/test_external_account_bank_accounts.py
|
{
"start": 8069,
"end": 14457
}
|
class ____(TestCase):
@HttpMocker()
def test_given_no_state_when_read_then_use_external_accounts_endpoint(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_external_accounts_request().with_object(_OBJECT).with_limit(100).build(),
_external_bank_accounts_response().with_record(_an_external_bank_account()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE), _NO_STATE)
most_recent_state = output.most_recent_state
assert most_recent_state.stream_descriptor == StreamDescriptor(name=_STREAM_NAME)
assert int(most_recent_state.stream_state.updated) == int(_NOW.timestamp())
@HttpMocker()
def test_given_state_when_read_then_query_events_using_types_and_state_value_plus_1(self, http_mocker: HttpMocker) -> None:
start_date = _NOW - timedelta(days=40)
state_datetime = _NOW - timedelta(days=5)
cursor_value = int(state_datetime.timestamp()) + 1
http_mocker.get(
_events_request().with_created_gte(state_datetime).with_created_lte(_NOW).with_limit(100).with_types(_EVENT_TYPES).build(),
_events_response()
.with_record(_an_event().with_cursor(cursor_value).with_field(_DATA_FIELD, _an_external_bank_account().build()))
.build(),
)
output = self._read(
_config().with_start_date(start_date),
StateBuilder().with_stream_state(_STREAM_NAME, {"updated": int(state_datetime.timestamp())}).build(),
)
most_recent_state = output.most_recent_state
assert most_recent_state.stream_descriptor == StreamDescriptor(name=_STREAM_NAME)
assert most_recent_state.stream_state.updated == str(cursor_value)
@HttpMocker()
def test_given_object_is_not_back_account_when_read_then_filter_out(self, http_mocker: HttpMocker) -> None:
start_date = _NOW - timedelta(days=40)
state_datetime = _NOW - timedelta(days=5)
http_mocker.get(
StripeRequestBuilder.events_endpoint(_ACCOUNT_ID, _CLIENT_SECRET).with_any_query_params().build(),
_events_response().with_record(_an_event().with_field(_DATA_FIELD, {"object": "not a bank account"})).build(),
)
output = self._read(
_config().with_start_date(start_date),
StateBuilder().with_stream_state(_STREAM_NAME, {"updated": int(state_datetime.timestamp())}).build(),
)
assert len(output.records) == 0
@HttpMocker()
def test_given_state_and_pagination_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
state_datetime = _NOW - timedelta(days=5)
http_mocker.get(
_events_request().with_created_gte(state_datetime).with_created_lte(_NOW).with_limit(100).with_types(_EVENT_TYPES).build(),
_events_response()
.with_pagination()
.with_record(_an_event().with_id("last_record_id_from_first_page").with_field(_DATA_FIELD, _an_external_bank_account().build()))
.build(),
)
http_mocker.get(
_events_request()
.with_starting_after("last_record_id_from_first_page")
.with_created_gte(state_datetime)
.with_created_lte(_NOW)
.with_limit(100)
.with_types(_EVENT_TYPES)
.build(),
_events_response().with_record(self._an_external_account_event()).build(),
)
output = self._read(
_config(),
StateBuilder().with_stream_state(_STREAM_NAME, {"updated": int(state_datetime.timestamp())}).build(),
)
assert len(output.records) == 2
@HttpMocker()
def test_given_state_and_small_slice_range_when_read_then_perform_multiple_queries(self, http_mocker: HttpMocker) -> None:
state_datetime = _NOW - timedelta(days=5)
slice_range = timedelta(days=3)
slice_datetime = state_datetime + slice_range
http_mocker.get(
_events_request()
.with_created_gte(state_datetime)
.with_created_lte(slice_datetime - _AVOIDING_INCLUSIVE_BOUNDARIES)
.with_limit(100)
.with_types(_EVENT_TYPES)
.build(),
_events_response().with_record(self._an_external_account_event()).build(),
)
http_mocker.get(
_events_request().with_created_gte(slice_datetime).with_created_lte(_NOW).with_limit(100).with_types(_EVENT_TYPES).build(),
_events_response().with_record(self._an_external_account_event()).with_record(self._an_external_account_event()).build(),
)
output = self._read(
_config().with_start_date(_NOW - timedelta(days=30)).with_slice_range_in_days(slice_range.days),
StateBuilder().with_stream_state(_STREAM_NAME, {"updated": int(state_datetime.timestamp())}).build(),
)
assert len(output.records) == 3
@HttpMocker()
def test_given_state_earlier_than_30_days_when_read_then_query_events_using_types_and_event_lower_boundary(
self, http_mocker: HttpMocker
) -> None:
# this seems odd as we would miss some data between start_date and events_lower_boundary. In that case, we should hit the
# external_accounts endpoint
start_date = _NOW - timedelta(days=40)
state_value = _NOW - timedelta(days=39)
events_lower_boundary = _NOW - timedelta(days=30)
http_mocker.get(
_events_request()
.with_created_gte(events_lower_boundary)
.with_created_lte(_NOW)
.with_limit(100)
.with_types(_EVENT_TYPES)
.build(),
_events_response().with_record(self._an_external_account_event()).build(),
)
self._read(
_config().with_start_date(start_date),
StateBuilder().with_stream_state(_STREAM_NAME, {"updated": int(state_value.timestamp())}).build(),
)
# request matched http_mocker
def _an_external_account_event(self) -> RecordBuilder:
return _an_event().with_field(_DATA_FIELD, _an_external_bank_account().build())
def _read(self, config: ConfigBuilder, state: Optional[Dict[str, Any]], expecting_exception: bool = False) -> EntrypointOutput:
return _read(config, SyncMode.incremental, state, expecting_exception)
|
IncrementalTest
|
python
|
doocs__leetcode
|
solution/1700-1799/1761.Minimum Degree of a Connected Trio in a Graph/Solution.py
|
{
"start": 63,
"end": 676
}
|
class ____:
def minTrioDegree(self, n: int, edges: List[List[int]]) -> int:
g = [[False] * n for _ in range(n)]
deg = [0] * n
for u, v in edges:
u, v = u - 1, v - 1
g[u][v] = g[v][u] = True
deg[u] += 1
deg[v] += 1
ans = inf
for i in range(n):
for j in range(i + 1, n):
if g[i][j]:
for k in range(j + 1, n):
if g[i][k] and g[j][k]:
ans = min(ans, deg[i] + deg[j] + deg[k] - 6)
return -1 if ans == inf else ans
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/core/workspace.py
|
{
"start": 495,
"end": 629
}
|
class ____:
commit_hash: Optional[str] = None
url: Optional[str] = None
@whitelist_for_serdes
@record(kw_only=False)
|
GitMetadata
|
python
|
joblib__joblib
|
joblib/externals/loky/initializers.py
|
{
"start": 961,
"end": 2567
}
|
class ____:
"""Compound worker initializer
This is meant to be used in conjunction with _chain_initializers to
produce the necessary chained_args list to be passed to __call__.
"""
def __init__(self, initializers):
self._initializers = initializers
def __call__(self, *chained_args):
for initializer, args in zip(self._initializers, chained_args):
initializer(*args)
def _chain_initializers(initializer_and_args):
"""Convenience helper to combine a sequence of initializers.
If some initializers are None, they are filtered out.
"""
filtered_initializers = []
filtered_initargs = []
for initializer, initargs in initializer_and_args:
if initializer is not None:
filtered_initializers.append(initializer)
filtered_initargs.append(initargs)
if not filtered_initializers:
return None, ()
elif len(filtered_initializers) == 1:
return filtered_initializers[0], filtered_initargs[0]
else:
return _ChainedInitializer(filtered_initializers), filtered_initargs
def _prepare_initializer(initializer, initargs):
if initializer is not None and not callable(initializer):
raise TypeError(
f"initializer must be a callable, got: {initializer!r}"
)
# Introspect runtime to determine if we need to propagate the viztracer
# profiler information to the workers:
return _chain_initializers(
[
(initializer, initargs),
_make_viztracer_initializer_and_initargs(),
]
)
|
_ChainedInitializer
|
python
|
neetcode-gh__leetcode
|
python/0210-course-schedule-ii.py
|
{
"start": 0,
"end": 789
}
|
class ____:
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:
prereq = {c: [] for c in range(numCourses)}
for crs, pre in prerequisites:
prereq[crs].append(pre)
output = []
visit, cycle = set(), set()
def dfs(crs):
if crs in cycle:
return False
if crs in visit:
return True
cycle.add(crs)
for pre in prereq[crs]:
if dfs(pre) == False:
return False
cycle.remove(crs)
visit.add(crs)
output.append(crs)
return True
for c in range(numCourses):
if dfs(c) == False:
return []
return output
|
Solution
|
python
|
pydata__xarray
|
xarray/plot/facetgrid.py
|
{
"start": 1556,
"end": 37980
}
|
class ____(Generic[T_DataArrayOrSet]):
"""
Initialize the Matplotlib figure and FacetGrid object.
The :class:`FacetGrid` is an object that links a xarray DataArray to
a Matplotlib figure with a particular structure.
In particular, :class:`FacetGrid` is used to draw plots with multiple
axes, where each axes shows the same relationship conditioned on
different levels of some dimension. It's possible to condition on up to
two variables by assigning variables to the rows and columns of the
grid.
The general approach to plotting here is called "small multiples",
where the same kind of plot is repeated multiple times, and the
specific use of small multiples to display the same relationship
conditioned on one or more other variables is often called a "trellis
plot".
The basic workflow is to initialize the :class:`FacetGrid` object with
the DataArray and the variable names that are used to structure the grid.
Then plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map_dataarray` or :meth:`FacetGrid.map`.
Attributes
----------
axs : ndarray of matplotlib.axes.Axes
Array containing axes in corresponding position, as returned from
:py:func:`matplotlib.pyplot.subplots`.
col_labels : list of matplotlib.text.Annotation
Column titles.
row_labels : list of matplotlib.text.Annotation
Row titles.
fig : matplotlib.figure.Figure
The figure containing all the axes.
name_dicts : ndarray of dict
Array containing dictionaries mapping coordinate names to values. ``None`` is
used as a sentinel value for axes that should remain empty, i.e.,
sometimes the rightmost grid positions in the bottom row.
"""
data: T_DataArrayOrSet
name_dicts: np.ndarray
fig: Figure
axs: np.ndarray
row_names: list[np.ndarray]
col_names: list[np.ndarray]
figlegend: Legend | None
quiverkey: QuiverKey | None
cbar: Colorbar | None
_single_group: bool | Hashable
_nrow: int
_row_var: Hashable | None
_ncol: int
_col_var: Hashable | None
_col_wrap: int | None
row_labels: list[Annotation | None]
col_labels: list[Annotation | None]
_x_var: None
_y_var: None
_hue_var: DataArray | None
_cmap_extend: Any | None
_mappables: list[ScalarMappable]
_finalized: bool
def __init__(
self,
data: T_DataArrayOrSet,
col: Hashable | None = None,
row: Hashable | None = None,
col_wrap: int | None = None,
sharex: bool = True,
sharey: bool = True,
figsize: Iterable[float] | None = None,
aspect: float = 1,
size: float = 3,
subplot_kws: dict[str, Any] | None = None,
) -> None:
"""
Parameters
----------
data : DataArray or Dataset
DataArray or Dataset to be plotted.
row, col : str
Dimension names that define subsets of the data, which will be drawn
on separate facets in the grid.
col_wrap : int, optional
"Wrap" the grid the for the column variable after this number of columns,
adding rows if ``col_wrap`` is less than the number of facets.
sharex : bool, optional
If true, the facets will share *x* axes.
sharey : bool, optional
If true, the facets will share *y* axes.
figsize : Iterable of float or None, optional
A tuple (width, height) of the figure in inches.
If set, overrides ``size`` and ``aspect``.
aspect : scalar, default: 1
Aspect ratio of each facet, so that ``aspect * size`` gives the
width of each facet in inches.
size : scalar, default: 3
Height (in inches) of each facet. See also: ``aspect``.
subplot_kws : dict, optional
Dictionary of keyword arguments for Matplotlib subplots
(:py:func:`matplotlib.pyplot.subplots`).
"""
import matplotlib.pyplot as plt
# Handle corner case of nonunique coordinates
rep_col = col is not None and not data[col].to_index().is_unique
rep_row = row is not None and not data[row].to_index().is_unique
if rep_col or rep_row:
raise ValueError(
"Coordinates used for faceting cannot "
"contain repeated (nonunique) values."
)
# single_group is the grouping variable, if there is exactly one
single_group: bool | Hashable
if col and row:
single_group = False
nrow = len(data[row])
ncol = len(data[col])
nfacet = nrow * ncol
if col_wrap is not None:
warnings.warn(
"Ignoring col_wrap since both col and row were passed", stacklevel=2
)
elif row and not col:
single_group = row
elif not row and col:
single_group = col
else:
raise ValueError("Pass a coordinate name as an argument for row or col")
# Compute grid shape
if single_group:
nfacet = len(data[single_group])
if col:
# idea - could add heuristic for nice shapes like 3x4
ncol = nfacet
if row:
ncol = 1
if col_wrap is not None:
# Overrides previous settings
ncol = col_wrap
nrow = int(np.ceil(nfacet / ncol))
# Set the subplot kwargs
subplot_kws = {} if subplot_kws is None else subplot_kws
if figsize is None:
# Calculate the base figure size with extra horizontal space for a
# colorbar
cbar_space = 1
figsize = (ncol * size * aspect + cbar_space, nrow * size)
fig, axs = plt.subplots(
nrow,
ncol,
sharex=sharex,
sharey=sharey,
squeeze=False,
figsize=figsize,
subplot_kw=subplot_kws,
)
# Set up the lists of names for the row and column facet variables
col_names = list(data[col].to_numpy()) if col else []
row_names = list(data[row].to_numpy()) if row else []
if single_group:
full: list[dict[Hashable, Any] | None] = [
{single_group: x} for x in data[single_group].to_numpy()
]
empty: list[dict[Hashable, Any] | None] = [
None for x in range(nrow * ncol - len(full))
]
name_dict_list = full + empty
else:
rowcols = itertools.product(row_names, col_names)
name_dict_list = [{row: r, col: c} for r, c in rowcols]
name_dicts = np.array(name_dict_list).reshape(nrow, ncol)
# Set up the class attributes
# ---------------------------
# First the public API
self.data = data
self.name_dicts = name_dicts
self.fig = fig
self.axs = axs
self.row_names = row_names
self.col_names = col_names
# guides
self.figlegend = None
self.quiverkey = None
self.cbar = None
# Next the private variables
self._single_group = single_group
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._col_wrap = col_wrap
self.row_labels = [None] * nrow
self.col_labels = [None] * ncol
self._x_var = None
self._y_var = None
self._hue_var = None
self._cmap_extend = None
self._mappables = []
self._finalized = False
@property
def axes(self) -> np.ndarray:
warnings.warn(
(
"self.axes is deprecated since 2022.11 in order to align with "
"matplotlibs plt.subplots, use self.axs instead."
),
DeprecationWarning,
stacklevel=2,
)
return self.axs
@axes.setter
def axes(self, axs: np.ndarray) -> None:
warnings.warn(
(
"self.axes is deprecated since 2022.11 in order to align with "
"matplotlibs plt.subplots, use self.axs instead."
),
DeprecationWarning,
stacklevel=2,
)
self.axs = axs
@property
def _left_axes(self) -> np.ndarray:
return self.axs[:, 0]
@property
def _bottom_axes(self) -> np.ndarray:
return self.axs[-1, :]
def map_dataarray(
self: T_FacetGrid,
func: Callable,
x: Hashable | None,
y: Hashable | None,
**kwargs: Any,
) -> T_FacetGrid:
"""
Apply a plotting function to a 2d facet's subset of the data.
This is more convenient and less general than ``FacetGrid.map``
Parameters
----------
func : callable
A plotting function with the same signature as a 2d xarray
plotting method such as `xarray.plot.imshow`
x, y : string
Names of the coordinates to plot on x, y axes
**kwargs
additional keyword arguments to func
Returns
-------
self : FacetGrid object
"""
if kwargs.get("cbar_ax") is not None:
raise ValueError("cbar_ax not supported by FacetGrid.")
cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(
func, self.data.to_numpy(), **kwargs
)
self._cmap_extend = cmap_params.get("extend")
# Order is important
func_kwargs = {
k: v
for k, v in kwargs.items()
if k not in {"cmap", "colors", "cbar_kwargs", "levels"}
}
func_kwargs.update(cmap_params)
# to avoid redundant calling, colorbar and labelling is instead handled
# by `_finalize_grid` at the end
func_kwargs["add_colorbar"] = False
if func.__name__ != "surface":
func_kwargs["add_labels"] = False
# Get x, y labels for the first subplot
x, y = _infer_xy_labels(
darray=self.data.loc[self.name_dicts.flat[0]],
x=x,
y=y,
imshow=func.__name__ == "imshow",
rgb=kwargs.get("rgb"),
)
for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
mappable = func(
subset, x=x, y=y, ax=ax, **func_kwargs, _is_facetgrid=True
)
self._mappables.append(mappable)
xlabel = label_from_attrs(self.data[x])
ylabel = label_from_attrs(self.data[y])
self._finalize_grid(xlabel, ylabel)
if kwargs.get("add_colorbar", True):
self.add_colorbar(**cbar_kwargs)
return self
def map_plot1d(
self: T_FacetGrid,
func: Callable,
x: Hashable | None,
y: Hashable | None,
*,
z: Hashable | None = None,
hue: Hashable | None = None,
markersize: Hashable | None = None,
linewidth: Hashable | None = None,
**kwargs: Any,
) -> T_FacetGrid:
"""
Apply a plotting function to a 1d facet's subset of the data.
This is more convenient and less general than ``FacetGrid.map``
Parameters
----------
func :
A plotting function with the same signature as a 1d xarray
plotting method such as `xarray.plot.scatter`
x, y :
Names of the coordinates to plot on x, y axes
**kwargs
additional keyword arguments to func
Returns
-------
self : FacetGrid object
"""
# Copy data to allow converting categoricals to integers and storing
# them in self.data. It is not possible to copy in the init
# unfortunately as there are tests that relies on self.data being
# mutable (test_names_appear_somewhere()). Maybe something to deprecate
# not sure how much that is used outside these tests.
self.data = self.data.copy()
if kwargs.get("cbar_ax") is not None:
raise ValueError("cbar_ax not supported by FacetGrid.")
if func.__name__ == "scatter":
size_ = kwargs.pop("_size", markersize)
size_r = _MARKERSIZE_RANGE
else:
size_ = kwargs.pop("_size", linewidth)
size_r = _LINEWIDTH_RANGE
# Guess what coords to use if some of the values in coords_to_plot are None:
coords_to_plot: MutableMapping[str, Hashable | None] = dict(
x=x, z=z, hue=hue, size=size_
)
coords_to_plot = _guess_coords_to_plot(self.data, coords_to_plot, kwargs)
# Handle hues:
hue = coords_to_plot["hue"]
hueplt = self.data.coords[hue] if hue else None # TODO: _infer_line_data2 ?
hueplt_norm = _Normalize(hueplt)
self._hue_var = hueplt
cbar_kwargs = kwargs.pop("cbar_kwargs", {})
if hueplt_norm.data is not None:
if not hueplt_norm.data_is_numeric:
# TODO: Ticks seems a little too hardcoded, since it will always
# show all the values. But maybe it's ok, since plotting hundreds
# of categorical data isn't that meaningful anyway.
cbar_kwargs.update(format=hueplt_norm.format, ticks=hueplt_norm.ticks)
kwargs.update(levels=hueplt_norm.levels)
cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(
func,
cast("DataArray", hueplt_norm.values).data,
cbar_kwargs=cbar_kwargs,
**kwargs,
)
self._cmap_extend = cmap_params.get("extend")
else:
cmap_params = {}
# Handle sizes:
size_ = coords_to_plot["size"]
sizeplt = self.data.coords[size_] if size_ else None
sizeplt_norm = _Normalize(data=sizeplt, width=size_r)
if sizeplt_norm.data is not None:
self.data[size_] = sizeplt_norm.values
# Add kwargs that are sent to the plotting function, # order is important ???
func_kwargs = {
k: v
for k, v in kwargs.items()
if k not in {"cmap", "colors", "cbar_kwargs", "levels"}
}
func_kwargs.update(cmap_params)
# Annotations will be handled later, skip those parts in the plotfunc:
func_kwargs["add_colorbar"] = False
func_kwargs["add_legend"] = False
func_kwargs["add_title"] = False
add_labels_ = np.zeros(self.axs.shape + (3,), dtype=bool)
if kwargs.get("z") is not None:
# 3d plots looks better with all labels. 3d plots can't sharex either so it
# is easy to get lost while rotating the plots:
add_labels_[:] = True
else:
# Subplots should have labels on the left and bottom edges only:
add_labels_[-1, :, 0] = True # x
add_labels_[:, 0, 1] = True # y
# add_labels_[:, :, 2] = True # z
# Set up the lists of names for the row and column facet variables:
if self._single_group:
full = tuple(
{self._single_group: x}
for x in range(self.data[self._single_group].size)
)
empty = tuple(None for x in range(self._nrow * self._ncol - len(full)))
name_d = full + empty
else:
rowcols = itertools.product(
range(self.data[self._row_var].size),
range(self.data[self._col_var].size),
)
name_d = tuple({self._row_var: r, self._col_var: c} for r, c in rowcols)
name_dicts = np.array(name_d).reshape(self._nrow, self._ncol)
# Plot the data for each subplot:
for add_lbls, d, ax in zip(
add_labels_.reshape((self.axs.size, -1)),
name_dicts.flat,
self.axs.flat,
strict=True,
):
func_kwargs["add_labels"] = add_lbls
# None is the sentinel value
if d is not None:
subset = self.data.isel(d)
mappable = func(
subset,
x=x,
y=y,
ax=ax,
hue=hue,
_size=size_,
**func_kwargs,
_is_facetgrid=True,
)
self._mappables.append(mappable)
# Add titles and some touch ups:
self._finalize_grid()
self._set_lims()
add_colorbar, add_legend = _determine_guide(
hueplt_norm,
sizeplt_norm,
kwargs.get("add_colorbar"),
kwargs.get("add_legend"),
# kwargs.get("add_guide", None),
# kwargs.get("hue_style", None),
)
if add_legend:
use_legend_elements = func.__name__ != "hist"
if use_legend_elements:
self.add_legend(
use_legend_elements=use_legend_elements,
hueplt_norm=hueplt_norm if not add_colorbar else _Normalize(None),
sizeplt_norm=sizeplt_norm,
primitive=self._mappables,
legend_ax=self.fig,
plotfunc=func.__name__,
)
else:
self.add_legend(use_legend_elements=use_legend_elements)
if add_colorbar:
# Colorbar is after legend so it correctly fits the plot:
if "label" not in cbar_kwargs:
cbar_kwargs["label"] = label_from_attrs(hueplt_norm.data)
self.add_colorbar(**cbar_kwargs)
return self
def map_dataarray_line(
self: T_FacetGrid,
func: Callable,
x: Hashable | None,
y: Hashable | None,
hue: Hashable | None,
add_legend: bool = True,
_labels=None,
**kwargs: Any,
) -> T_FacetGrid:
from xarray.plot.dataarray_plot import _infer_line_data
for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
mappable = func(
subset,
x=x,
y=y,
ax=ax,
hue=hue,
add_legend=False,
_labels=False,
**kwargs,
)
self._mappables.append(mappable)
xplt, yplt, hueplt, huelabel = _infer_line_data(
darray=self.data.loc[self.name_dicts.flat[0]], x=x, y=y, hue=hue
)
xlabel = label_from_attrs(xplt)
ylabel = label_from_attrs(yplt)
self._hue_var = hueplt
self._finalize_grid(xlabel, ylabel)
if add_legend and hueplt is not None and huelabel is not None:
self.add_legend(label=huelabel)
return self
def map_dataset(
self: T_FacetGrid,
func: Callable,
x: Hashable | None = None,
y: Hashable | None = None,
hue: Hashable | None = None,
hue_style: HueStyleOptions = None,
add_guide: bool | None = None,
**kwargs: Any,
) -> T_FacetGrid:
from xarray.plot.dataset_plot import _infer_meta_data
kwargs["add_guide"] = False
if kwargs.get("markersize"):
kwargs["size_mapping"] = _parse_size(
self.data[kwargs["markersize"]], kwargs.pop("size_norm", None)
)
meta_data = _infer_meta_data(
self.data, x, y, hue, hue_style, add_guide, funcname=func.__name__
)
kwargs["meta_data"] = meta_data
if hue and meta_data["hue_style"] == "continuous":
cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(
func, self.data[hue].to_numpy(), **kwargs
)
kwargs["meta_data"]["cmap_params"] = cmap_params
kwargs["meta_data"]["cbar_kwargs"] = cbar_kwargs
kwargs["_is_facetgrid"] = True
if func.__name__ == "quiver" and "scale" not in kwargs:
raise ValueError("Please provide scale.")
# TODO: come up with an algorithm for reasonable scale choice
for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
maybe_mappable = func(
ds=subset, x=x, y=y, hue=hue, hue_style=hue_style, ax=ax, **kwargs
)
# TODO: this is needed to get legends to work.
# but maybe_mappable is a list in that case :/
self._mappables.append(maybe_mappable)
self._finalize_grid(meta_data["xlabel"], meta_data["ylabel"])
if hue:
hue_label = meta_data.pop("hue_label", None)
self._hue_label = hue_label
if meta_data["add_legend"]:
self._hue_var = meta_data["hue"]
self.add_legend(label=hue_label)
elif meta_data["add_colorbar"]:
self.add_colorbar(label=hue_label, **cbar_kwargs)
if meta_data["add_quiverkey"]:
self.add_quiverkey(kwargs["u"], kwargs["v"])
return self
def _finalize_grid(self, *axlabels: Hashable) -> None:
"""Finalize the annotations and layout."""
if not self._finalized:
self.set_axis_labels(*axlabels)
self.set_titles()
self.fig.tight_layout()
for ax, namedict in zip(self.axs.flat, self.name_dicts.flat, strict=True):
if namedict is None:
ax.set_visible(False)
self._finalized = True
def _adjust_fig_for_guide(self, guide) -> None:
# Draw the plot to set the bounding boxes correctly
if hasattr(self.fig.canvas, "get_renderer"):
renderer = self.fig.canvas.get_renderer()
else:
raise RuntimeError("MPL backend has no renderer")
self.fig.draw(renderer)
# Calculate and set the new width of the figure so the legend fits
guide_width = guide.get_window_extent(renderer).width / self.fig.dpi
figure_width = self.fig.get_figwidth()
total_width = figure_width + guide_width
self.fig.set_figwidth(total_width)
# Draw the plot again to get the new transformations
self.fig.draw(renderer)
# Now calculate how much space we need on the right side
guide_width = guide.get_window_extent(renderer).width / self.fig.dpi
space_needed = guide_width / total_width + 0.02
# margin = .01
# _space_needed = margin + space_needed
right = 1 - space_needed
# Place the subplot axes to give space for the legend
self.fig.subplots_adjust(right=right)
def add_legend(
self,
*,
label: str | None = None,
use_legend_elements: bool = False,
**kwargs: Any,
) -> None:
if use_legend_elements:
self.figlegend = _add_legend(**kwargs)
else:
assert self._hue_var is not None
self.figlegend = self.fig.legend(
handles=self._mappables[-1],
labels=list(self._hue_var.to_numpy()),
title=label if label is not None else label_from_attrs(self._hue_var),
loc=kwargs.pop("loc", "center right"),
**kwargs,
)
self._adjust_fig_for_guide(self.figlegend)
def add_colorbar(self, **kwargs: Any) -> None:
"""Draw a colorbar."""
kwargs = kwargs.copy()
if self._cmap_extend is not None:
kwargs.setdefault("extend", self._cmap_extend)
# dont pass extend as kwarg if it is in the mappable
if hasattr(self._mappables[-1], "extend"):
kwargs.pop("extend", None)
if "label" not in kwargs:
from xarray import DataArray
assert isinstance(self.data, DataArray)
kwargs.setdefault("label", label_from_attrs(self.data))
self.cbar = self.fig.colorbar(
self._mappables[-1], ax=list(self.axs.flat), **kwargs
)
def add_quiverkey(self, u: Hashable, v: Hashable, **kwargs: Any) -> None:
kwargs = kwargs.copy()
magnitude = _get_nice_quiver_magnitude(self.data[u], self.data[v])
units = self.data[u].attrs.get("units", "")
self.quiverkey = self.axs.flat[-1].quiverkey(
self._mappables[-1],
X=0.8,
Y=0.9,
U=magnitude,
label=f"{magnitude}\n{units}",
labelpos="E",
coordinates="figure",
)
# TODO: does not work because self.quiverkey.get_window_extent(renderer) = 0
# https://github.com/matplotlib/matplotlib/issues/18530
# self._adjust_fig_for_guide(self.quiverkey.text)
def _get_largest_lims(self) -> dict[str, tuple[float, float]]:
"""
Get largest limits in the facetgrid.
Returns
-------
lims_largest : dict[str, tuple[float, float]]
Dictionary with the largest limits along each axis.
Examples
--------
>>> ds = xr.tutorial.scatter_example_dataset(seed=42)
>>> fg = ds.plot.scatter(x="A", y="B", hue="y", row="x", col="w")
>>> round(fg._get_largest_lims()["x"][0], 3)
np.float64(-0.334)
"""
lims_largest: dict[str, tuple[float, float]] = dict(
x=(np.inf, -np.inf), y=(np.inf, -np.inf), z=(np.inf, -np.inf)
)
for axis in ("x", "y", "z"):
# Find the plot with the largest xlim values:
lower, upper = lims_largest[axis]
for ax in self.axs.flat:
get_lim: Callable[[], tuple[float, float]] | None = getattr(
ax, f"get_{axis}lim", None
)
if get_lim:
lower_new, upper_new = get_lim()
lower, upper = (min(lower, lower_new), max(upper, upper_new))
lims_largest[axis] = (lower, upper)
return lims_largest
def _set_lims(
self,
x: tuple[float, float] | None = None,
y: tuple[float, float] | None = None,
z: tuple[float, float] | None = None,
) -> None:
"""
Set the same limits for all the subplots in the facetgrid.
Parameters
----------
x : tuple[float, float] or None, optional
x axis limits.
y : tuple[float, float] or None, optional
y axis limits.
z : tuple[float, float] or None, optional
z axis limits.
Examples
--------
>>> ds = xr.tutorial.scatter_example_dataset(seed=42)
>>> fg = ds.plot.scatter(x="A", y="B", hue="y", row="x", col="w")
>>> fg._set_lims(x=(-0.3, 0.3), y=(0, 2), z=(0, 4))
>>> fg.axs[0, 0].get_xlim(), fg.axs[0, 0].get_ylim()
((np.float64(-0.3), np.float64(0.3)), (np.float64(0.0), np.float64(2.0)))
"""
lims_largest = self._get_largest_lims()
# Set limits:
for ax in self.axs.flat:
for (axis, data_limit), parameter_limit in zip(
lims_largest.items(), (x, y, z), strict=True
):
set_lim = getattr(ax, f"set_{axis}lim", None)
if set_lim:
set_lim(data_limit if parameter_limit is None else parameter_limit)
def set_axis_labels(self, *axlabels: Hashable) -> None:
"""Set axis labels on the left column and bottom row of the grid."""
from xarray.core.dataarray import DataArray
for var, axis in zip(axlabels, ["x", "y", "z"], strict=False):
if var is not None:
if isinstance(var, DataArray):
getattr(self, f"set_{axis}labels")(label_from_attrs(var))
else:
getattr(self, f"set_{axis}labels")(str(var))
def _set_labels(
self, axis: str, axes: Iterable, label: str | None = None, **kwargs
) -> None:
if label is None:
label = label_from_attrs(self.data[getattr(self, f"_{axis}_var")])
for ax in axes:
getattr(ax, f"set_{axis}label")(label, **kwargs)
def set_xlabels(self, label: str | None = None, **kwargs: Any) -> None:
"""Label the x axis on the bottom row of the grid."""
self._set_labels("x", self._bottom_axes, label, **kwargs)
def set_ylabels(self, label: str | None = None, **kwargs: Any) -> None:
"""Label the y axis on the left column of the grid."""
self._set_labels("y", self._left_axes, label, **kwargs)
def set_zlabels(self, label: str | None = None, **kwargs: Any) -> None:
"""Label the z axis."""
self._set_labels("z", self._left_axes, label, **kwargs)
def set_titles(
self,
template: str = "{coord} = {value}",
maxchar: int = 30,
size=None,
**kwargs,
) -> None:
"""
Draw titles either above each facet or on the grid margins.
Parameters
----------
template : str, default: "{coord} = {value}"
Template for plot titles containing {coord} and {value}
maxchar : int, default: 30
Truncate titles at maxchar
**kwargs : keyword args
additional arguments to matplotlib.text
Returns
-------
self: FacetGrid object
"""
import matplotlib as mpl
if size is None:
size = mpl.rcParams["axes.labelsize"]
nicetitle = functools.partial(_nicetitle, maxchar=maxchar, template=template)
if self._single_group:
for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True):
# Only label the ones with data
if d is not None:
coord, value = list(d.items()).pop()
title = nicetitle(coord, value)
ax.set_title(title, size=size, **kwargs)
else:
# The row titles on the right edge of the grid
for index, (ax, row_name, handle) in enumerate(
zip(self.axs[:, -1], self.row_names, self.row_labels, strict=True)
):
title = nicetitle(coord=self._row_var, value=row_name)
if not handle:
self.row_labels[index] = ax.annotate(
title,
xy=(1.02, 0.5),
xycoords="axes fraction",
rotation=270,
ha="left",
va="center",
**kwargs,
)
else:
handle.set_text(title)
handle.update(kwargs)
# The column titles on the top row
for index, (ax, col_name, handle) in enumerate(
zip(self.axs[0, :], self.col_names, self.col_labels, strict=True)
):
title = nicetitle(coord=self._col_var, value=col_name)
if not handle:
self.col_labels[index] = ax.set_title(title, size=size, **kwargs)
else:
handle.set_text(title)
handle.update(kwargs)
def set_ticks(
self,
max_xticks: int = _NTICKS,
max_yticks: int = _NTICKS,
fontsize: str | int = _FONTSIZE,
) -> None:
"""
Set and control tick behavior.
Parameters
----------
max_xticks, max_yticks : int, optional
Maximum number of labeled ticks to plot on x, y axes
fontsize : string or int
Font size as used by matplotlib text
Returns
-------
self : FacetGrid object
"""
from matplotlib.ticker import MaxNLocator
# Both are necessary
x_major_locator = MaxNLocator(nbins=max_xticks)
y_major_locator = MaxNLocator(nbins=max_yticks)
for ax in self.axs.flat:
ax.xaxis.set_major_locator(x_major_locator)
ax.yaxis.set_major_locator(y_major_locator)
for tick in itertools.chain(
ax.xaxis.get_major_ticks(), ax.yaxis.get_major_ticks()
):
tick.label1.set_fontsize(fontsize)
def map(
self: T_FacetGrid, func: Callable, *args: Hashable, **kwargs: Any
) -> T_FacetGrid:
"""
Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
*args : Hashable
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
**kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : FacetGrid object
"""
import matplotlib.pyplot as plt
for ax, namedict in zip(self.axs.flat, self.name_dicts.flat, strict=True):
if namedict is not None:
data = self.data.loc[namedict]
plt.sca(ax)
innerargs = [data[a].to_numpy() for a in args]
maybe_mappable = func(*innerargs, **kwargs)
# TODO: better way to verify that an artist is mappable?
# https://stackoverflow.com/questions/33023036/is-it-possible-to-detect-if-a-matplotlib-artist-is-a-mappable-suitable-for-use-w#33023522
if maybe_mappable and hasattr(maybe_mappable, "autoscale_None"):
self._mappables.append(maybe_mappable)
self._finalize_grid(*args[:2])
return self
def _easy_facetgrid(
data: T_DataArrayOrSet,
plotfunc: Callable,
kind: Literal["line", "dataarray", "dataset", "plot1d"],
x: Hashable | None = None,
y: Hashable | None = None,
row: Hashable | None = None,
col: Hashable | None = None,
col_wrap: int | None = None,
sharex: bool = True,
sharey: bool = True,
aspect: float | None = None,
size: float | None = None,
subplot_kws: dict[str, Any] | None = None,
ax: Axes | None = None,
figsize: Iterable[float] | None = None,
**kwargs: Any,
) -> FacetGrid[T_DataArrayOrSet]:
"""
Convenience method to call xarray.plot.FacetGrid from 2d plotting methods
kwargs are the arguments to 2d plotting method
"""
if ax is not None:
raise ValueError("Can't use axes when making faceted plots.")
if aspect is None:
aspect = 1
if size is None:
size = 3
elif figsize is not None:
raise ValueError("cannot provide both `figsize` and `size` arguments")
if kwargs.get("z") is not None:
# 3d plots doesn't support sharex, sharey, reset to mpl defaults:
sharex = False
sharey = False
g = FacetGrid(
data=data,
col=col,
row=row,
col_wrap=col_wrap,
sharex=sharex,
sharey=sharey,
figsize=figsize,
aspect=aspect,
size=size,
subplot_kws=subplot_kws,
)
if kind == "line":
return g.map_dataarray_line(plotfunc, x, y, **kwargs)
if kind == "dataarray":
return g.map_dataarray(plotfunc, x, y, **kwargs)
if kind == "plot1d":
return g.map_plot1d(plotfunc, x, y, **kwargs)
if kind == "dataset":
return g.map_dataset(plotfunc, x, y, **kwargs)
raise ValueError(
f"kind must be one of `line`, `dataarray`, `dataset` or `plot1d`, got {kind}"
)
|
FacetGrid
|
python
|
huggingface__transformers
|
tests/models/chinese_clip/test_modeling_chinese_clip.py
|
{
"start": 14334,
"end": 17120
}
|
class ____(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as CHINESE_CLIP does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (ChineseCLIPVisionModel,) if is_torch_available() else ()
test_resize_embeddings = False
def setUp(self):
self.model_tester = ChineseCLIPVisionModelTester(self)
self.config_tester = ConfigTester(
self, config_class=ChineseCLIPVisionConfig, has_text_modality=False, hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="CHINESE_CLIP does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "OFA-Sys/chinese-clip-vit-base-patch16"
model = ChineseCLIPVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
|
ChineseCLIPVisionModelTest
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/strategies/_internal/misc.py
|
{
"start": 985,
"end": 3187
}
|
class ____(SampledFromStrategy[Ex]):
"""A strategy which always returns a single fixed value.
It's implemented as a length-one SampledFromStrategy so that all our
special-case logic for filtering and sets applies also to just(x).
The important difference from a SampledFromStrategy with only one
element to choose is that JustStrategy *never* touches the underlying
choice sequence, i.e. drawing neither reads from nor writes to `data`.
This is a reasonably important optimisation (or semantic distinction!)
for both JustStrategy and SampledFromStrategy.
"""
@property
def value(self) -> Ex:
return self.elements[0]
def __repr__(self) -> str:
suffix = "".join(
f".{name}({get_pretty_function_description(f)})"
for name, f in self._transformations
)
if self.value is None:
return "none()" + suffix
return f"just({get_pretty_function_description(self.value)}){suffix}"
def calc_is_cacheable(self, recur: RecurT) -> bool:
return is_hashable(self.value)
def do_filtered_draw(self, data: ConjectureData) -> Ex | UniqueIdentifier:
# The parent class's `do_draw` implementation delegates directly to
# `do_filtered_draw`, which we can greatly simplify in this case since
# we have exactly one value. (This also avoids drawing any data.)
return self._transform(self.value)
@defines_strategy(eager=True)
def just(value: T) -> SearchStrategy[T]:
"""Return a strategy which only generates ``value``.
Note: ``value`` is not copied. Be wary of using mutable values.
If ``value`` is the result of a callable, you can use
:func:`builds(callable) <hypothesis.strategies.builds>` instead
of ``just(callable())`` to get a fresh value each time.
Examples from this strategy do not shrink (because there is only one).
"""
return JustStrategy([value])
@defines_strategy(force_reusable_values=True)
def none() -> SearchStrategy[None]:
"""Return a strategy which only generates None.
Examples from this strategy do not shrink (because there is only
one).
"""
return just(None)
|
JustStrategy
|
python
|
huggingface__transformers
|
src/transformers/training_args.py
|
{
"start": 143292,
"end": 143554
}
|
class ____(Enum):
NOT_PARALLEL = "not_parallel"
NOT_DISTRIBUTED = "not_distributed"
DISTRIBUTED = "distributed"
SAGEMAKER_MODEL_PARALLEL = "sagemaker_model_parallel"
SAGEMAKER_DATA_PARALLEL = "sagemaker_data_parallel"
TPU = "tpu"
|
ParallelMode
|
python
|
pytorch__pytorch
|
torch/_subclasses/fake_tensor.py
|
{
"start": 2960,
"end": 3188
}
|
class ____:
def __init__(self) -> None:
global RECURSION_COUNT
RECURSION_COUNT += 1
def __del__(self) -> None:
global RECURSION_COUNT
RECURSION_COUNT -= 1
@dataclass
|
IncrementRecursionCount
|
python
|
numpy__numpy
|
numpy/_core/tests/test_dtype.py
|
{
"start": 37600,
"end": 39273
}
|
class ____:
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
a = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
b = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
assert_dtype_equal(a, b)
c = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
d = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
@pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size")
def test_list_recursion(self):
l = []
l.append(('f', l))
with pytest.raises(RecursionError):
np.dtype(l)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
@pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size")
def test_tuple_recursion(self):
d = np.int32
for i in range(100000):
d = (d, (1,))
with pytest.raises(RecursionError):
np.dtype(d)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
@pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size")
def test_dict_recursion(self):
d = {"names": ['self'], "formats": [None], "offsets": [0]}
d['formats'][0] = d
with pytest.raises(RecursionError):
np.dtype(d)
|
TestMonsterType
|
python
|
aimacode__aima-python
|
deep_learning4e.py
|
{
"start": 5973,
"end": 12998
}
|
class ____(Layer):
"""Batch normalization layer."""
def __init__(self, size, eps=0.001):
super().__init__(size)
self.eps = eps
# self.weights = [beta, gamma]
self.weights = [0, 0]
self.inputs = None
def forward(self, inputs):
# mean value of inputs
mu = sum(inputs) / len(inputs)
# standard error of inputs
stderr = statistics.stdev(inputs)
self.inputs = inputs
res = []
# get normalized value of each input
for i in range(len(self.nodes)):
val = [(inputs[i] - mu) * self.weights[0] / np.sqrt(self.eps + stderr ** 2) + self.weights[1]]
res.append(val)
self.nodes[i].value = val
return res
def init_examples(examples, idx_i, idx_t, o_units):
"""Init examples from dataset.examples."""
inputs, targets = {}, {}
for i, e in enumerate(examples):
# input values of e
inputs[i] = [e[i] for i in idx_i]
if o_units > 1:
# one-hot representation of e's target
t = [0 for i in range(o_units)]
t[e[idx_t]] = 1
targets[i] = t
else:
# target value of e
targets[i] = [e[idx_t]]
return inputs, targets
def stochastic_gradient_descent(dataset, net, loss, epochs=1000, l_rate=0.01, batch_size=1, verbose=False):
"""
Gradient descent algorithm to update the learnable parameters of a network.
:return: the updated network
"""
examples = dataset.examples # init data
for e in range(epochs):
total_loss = 0
random.shuffle(examples)
weights = [[node.weights for node in layer.nodes] for layer in net]
for batch in get_batch(examples, batch_size):
inputs, targets = init_examples(batch, dataset.inputs, dataset.target, len(net[-1].nodes))
# compute gradients of weights
gs, batch_loss = BackPropagation(inputs, targets, weights, net, loss)
# update weights with gradient descent
weights = [x + y for x, y in zip(weights, [np.array(tg) * -l_rate for tg in gs])]
total_loss += batch_loss
# update the weights of network each batch
for i in range(len(net)):
if weights[i].size != 0:
for j in range(len(weights[i])):
net[i].nodes[j].weights = weights[i][j]
if verbose:
print("epoch:{}, total_loss:{}".format(e + 1, total_loss))
return net
def adam(dataset, net, loss, epochs=1000, rho=(0.9, 0.999), delta=1 / 10 ** 8,
l_rate=0.001, batch_size=1, verbose=False):
"""
[Figure 19.6]
Adam optimizer to update the learnable parameters of a network.
Required parameters are similar to gradient descent.
:return the updated network
"""
examples = dataset.examples
# init s,r and t
s = [[[0] * len(node.weights) for node in layer.nodes] for layer in net]
r = [[[0] * len(node.weights) for node in layer.nodes] for layer in net]
t = 0
# repeat util converge
for e in range(epochs):
# total loss of each epoch
total_loss = 0
random.shuffle(examples)
weights = [[node.weights for node in layer.nodes] for layer in net]
for batch in get_batch(examples, batch_size):
t += 1
inputs, targets = init_examples(batch, dataset.inputs, dataset.target, len(net[-1].nodes))
# compute gradients of weights
gs, batch_loss = BackPropagation(inputs, targets, weights, net, loss)
# update s,r,s_hat and r_gat
s = vector_add(scalar_vector_product(rho[0], s),
scalar_vector_product((1 - rho[0]), gs))
r = vector_add(scalar_vector_product(rho[1], r),
scalar_vector_product((1 - rho[1]), element_wise_product(gs, gs)))
s_hat = scalar_vector_product(1 / (1 - rho[0] ** t), s)
r_hat = scalar_vector_product(1 / (1 - rho[1] ** t), r)
# rescale r_hat
r_hat = map_vector(lambda x: 1 / (np.sqrt(x) + delta), r_hat)
# delta weights
delta_theta = scalar_vector_product(-l_rate, element_wise_product(s_hat, r_hat))
weights = vector_add(weights, delta_theta)
total_loss += batch_loss
# update the weights of network each batch
for i in range(len(net)):
if weights[i]:
for j in range(len(weights[i])):
net[i].nodes[j].weights = weights[i][j]
if verbose:
print("epoch:{}, total_loss:{}".format(e + 1, total_loss))
return net
def BackPropagation(inputs, targets, theta, net, loss):
"""
The back-propagation algorithm for multilayer networks in only one epoch, to calculate gradients of theta.
:param inputs: a batch of inputs in an array. Each input is an iterable object
:param targets: a batch of targets in an array. Each target is an iterable object
:param theta: parameters to be updated
:param net: a list of predefined layer objects representing their linear sequence
:param loss: a predefined loss function taking array of inputs and targets
:return: gradients of theta, loss of the input batch
"""
assert len(inputs) == len(targets)
o_units = len(net[-1].nodes)
n_layers = len(net)
batch_size = len(inputs)
gradients = [[[] for _ in layer.nodes] for layer in net]
total_gradients = [[[0] * len(node.weights) for node in layer.nodes] for layer in net]
batch_loss = 0
# iterate over each example in batch
for e in range(batch_size):
i_val = inputs[e]
t_val = targets[e]
# forward pass and compute batch loss
for i in range(1, n_layers):
layer_out = net[i].forward(i_val)
i_val = layer_out
batch_loss += loss(t_val, layer_out)
# initialize delta
delta = [[] for _ in range(n_layers)]
previous = np.array([layer_out[i] - t_val[i] for i in range(o_units)])
h_layers = n_layers - 1
# backward pass
for i in range(h_layers, 0, -1):
layer = net[i]
derivative = np.array([layer.activation.derivative(node.value) for node in layer.nodes])
delta[i] = previous * derivative
# pass to layer i-1 in the next iteration
previous = np.matmul([delta[i]], theta[i])[0]
# compute gradient of layer i
gradients[i] = [scalar_vector_product(d, net[i].inputs) for d in delta[i]]
# add gradient of current example to batch gradient
total_gradients = vector_add(total_gradients, gradients)
return total_gradients, batch_loss
def get_batch(examples, batch_size=1):
"""Split examples into multiple batches"""
for i in range(0, len(examples), batch_size):
yield examples[i: i + batch_size]
|
BatchNormalizationLayer
|
python
|
huggingface__transformers
|
tests/models/glm4v/test_processor_glm4v.py
|
{
"start": 1038,
"end": 10848
}
|
class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = Glm4vProcessor
model_id = "THUDM/GLM-4.1V-9B-Thinking"
@classmethod
def _setup_test_attributes(cls, processor):
cls.image_token = processor.image_token
@classmethod
def _setup_from_pretrained(cls, model_id, **kwargs):
return super()._setup_from_pretrained(
model_id,
do_sample_frames=False,
patch_size=4,
size={"shortest_edge": 12 * 12, "longest_edge": 18 * 18},
**kwargs,
)
@require_torch
@require_av
def _test_apply_chat_template(
self,
modality: str,
batch_size: int,
return_tensors: str,
input_name: str,
processor_name: str,
input_data: list[str],
):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
if processor_name not in self.processor_class.get_attributes():
self.skipTest(f"{processor_name} attribute not present in {self.processor_class}")
batch_messages = [
[
{
"role": "user",
"content": [{"type": "text", "text": "Describe this."}],
},
]
] * batch_size
# Test that jinja can be applied
formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), batch_size)
# Test that tokenizing with template and directly with `self.tokenizer` gives same output
formatted_prompt_tokenized = processor.apply_chat_template(
batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors
)
add_special_tokens = True
if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token):
add_special_tokens = False
tok_output = processor.tokenizer(
formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens
)
expected_output = tok_output.input_ids
self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist())
# Test that kwargs passed to processor's `__call__` are actually used
tokenized_prompt_100 = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
padding="max_length",
truncation=True,
return_tensors=return_tensors,
max_length=100,
)
self.assertEqual(len(tokenized_prompt_100[0]), 100)
# Test that `return_dict=True` returns text related inputs in the dict
out_dict_text = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
)
self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"]))
self.assertEqual(len(out_dict_text["input_ids"]), batch_size)
self.assertEqual(len(out_dict_text["attention_mask"]), batch_size)
# Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict
for idx, url in enumerate(input_data[:batch_size]):
batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}]
out_dict = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
fps=2
if isinstance(input_data[0], str)
else None, # by default no more than 2 frames per second, otherwise too slow
do_sample_frames=bool(isinstance(input_data[0], str)), # don't sample frames if decoded video is used
)
input_name = getattr(self, input_name)
self.assertTrue(input_name in out_dict)
self.assertEqual(len(out_dict["input_ids"]), batch_size)
self.assertEqual(len(out_dict["attention_mask"]), batch_size)
if modality == "video":
# qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw
expected_video_token_count = 0
for thw in out_dict["video_grid_thw"]:
expected_video_token_count += thw[0] * thw[1] * thw[2]
mm_len = expected_video_token_count
else:
mm_len = batch_size * 4
self.assertEqual(len(out_dict[input_name]), mm_len)
return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list}
for k in out_dict:
self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors])
@require_av
def test_apply_chat_template_video_frame_sampling(self):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest("Processor doesn't accept videos at input")
messages = [
[
{
"role": "user",
"content": [
{"type": "video"},
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), 1)
formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids
self.assertListEqual(expected_output, formatted_prompt_tokenized)
out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True)
self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"])
# Add video URL for return dict and load with `num_frames` arg
messages[0][0]["content"][0] = {
"type": "video",
"url": url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/tiny_video.mp4"
),
}
# Load with `video_fps` arg
video_fps = 10
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=video_fps,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 8)
# Load the whole video
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=False,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 24)
# Load video as a list of frames (i.e. images). NOTE: each frame should have same size
# because we assume they come from one video
messages[0][0]["content"][0] = {
"type": "video",
"url": [
url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
],
}
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=False,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 4)
# When the inputs are frame URLs/paths we expect that those are already
# sampled and will raise an error is asked to sample again.
with self.assertRaisesRegex(
ValueError, "Sampling frames from a list of images is not supported! Set `do_sample_frames=False`"
):
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=True,
)
# def test_model_input_names(self):
# processor = self.get_processor()
# text = self.prepare_text_inputs(modalities=["image", "video"])
# image_input = self.prepare_image_inputs()
# video_inputs = self.prepare_video_inputs()
# inputs_dict = {"text": text, "images": image_input, "videos": video_inputs}
# inputs = processor(**inputs_dict, return_tensors="pt", do_sample_frames=False)
# self.assertSetEqual(set(inputs.keys()), set(processor.model_input_names))
|
Glm4vProcessorTest
|
python
|
huggingface__transformers
|
src/transformers/models/speecht5/number_normalizer.py
|
{
"start": 728,
"end": 6948
}
|
class ____:
def __init__(self):
self.ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
self.teens = [
"",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen",
]
self.tens = ["", "ten", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
self.thousands = [
"",
"thousand",
"million",
"billion",
"trillion",
"quadrillion",
"quintillion",
"sextillion",
"septillion",
"octillion",
"nonillion",
"decillion",
]
# Define a dictionary to map currency symbols to their names
# Top most traded currencies according to
# https://en.wikipedia.org/wiki/Template:Most_traded_currencies
self.currency_symbols = {
"$": " dollars",
"€": " euros",
"£": " pounds",
"¢": " cents",
"¥": " japanese yen",
"﷼": " saudi riyal",
"₹": " indian rupees",
"₽": " russian rubles",
"฿": " thai baht",
"₺": " turkish liras",
"₴": " ukrainian hryvnia",
"₣": " swiss francs",
"₡": " costa rican colon",
"₱": " philippine peso",
"₪": " israeli shekels",
"₮": " mongolian tögrög",
"₩": " south korean won",
"₦": " nigerian naira",
"₫": " vietnamese Đồng",
}
def spell_number(self, num):
if num == 0:
return "zero"
parts = []
for i in range(0, len(self.thousands)):
if num % 1000 != 0:
part = ""
hundreds = num % 1000 // 100
tens_units = num % 100
if hundreds > 0:
part += self.ones[hundreds] + " hundred"
if tens_units > 0:
part += " and "
if tens_units > 10 and tens_units < 20:
part += self.teens[tens_units - 10]
else:
tens_digit = self.tens[tens_units // 10]
ones_digit = self.ones[tens_units % 10]
if tens_digit:
part += tens_digit
if ones_digit:
if tens_digit:
part += " "
part += ones_digit
parts.append(part)
num //= 1000
return " ".join(reversed(parts))
def convert(self, number):
"""
Converts an individual number passed in string form to spelt-out form
"""
if "." in number:
integer_part, decimal_part = number.split(".")
else:
integer_part, decimal_part = number, "00"
# Extract currency symbol if present
currency_symbol = ""
for symbol, name in self.currency_symbols.items():
if integer_part.startswith(symbol):
currency_symbol = name
integer_part = integer_part[len(symbol) :]
break
if integer_part.startswith("-"):
if integer_part[1:].startswith(symbol):
currency_symbol = name
integer_part = "-" + integer_part[len(symbol) + 1 :]
break
# Extract 'minus' prefix for negative numbers
minus_prefix = ""
if integer_part.startswith("-"):
minus_prefix = "minus "
integer_part = integer_part[1:]
elif integer_part.startswith("minus"):
minus_prefix = "minus "
integer_part = integer_part[len("minus") :]
percent_suffix = ""
if "%" in integer_part or "%" in decimal_part:
percent_suffix = " percent"
integer_part = integer_part.replace("%", "")
decimal_part = decimal_part.replace("%", "")
integer_part = integer_part.zfill(3 * ((len(integer_part) - 1) // 3 + 1))
parts = []
for i in range(0, len(integer_part), 3):
chunk = int(integer_part[i : i + 3])
if chunk > 0:
part = self.spell_number(chunk)
unit = self.thousands[len(integer_part[i:]) // 3 - 1]
if unit:
part += " " + unit
parts.append(part)
spelled_integer = " ".join(parts)
# Format the spelt-out number based on conditions, such as:
# If it has decimal parts, currency symbol, minus prefix, etc
if decimal_part == "00":
return (
f"{minus_prefix}{spelled_integer}{percent_suffix}{currency_symbol}"
if minus_prefix or currency_symbol
else f"{spelled_integer}{percent_suffix}"
)
else:
spelled_decimal = " ".join([self.spell_number(int(digit)) for digit in decimal_part])
return (
f"{minus_prefix}{spelled_integer} point {spelled_decimal}{percent_suffix}{currency_symbol}"
if minus_prefix or currency_symbol
else f"{minus_prefix}{spelled_integer} point {spelled_decimal}{percent_suffix}"
)
def __call__(self, text):
"""
Convert numbers / number-like quantities in a string to their spelt-out counterparts
"""
# Form part of the pattern for all currency symbols
pattern = r"(?<!\w)(-?\$?\€?\£?\¢?\¥?\₹?\₽?\฿?\₺?\₴?\₣?\₡?\₱?\₪?\₮?\₩?\₦?\₫?\﷼?\d+(?:\.\d{1,2})?%?)(?!\w)"
# Find and replace commas in numbers (15,000 -> 15000, etc)
text = re.sub(r"(\d+,\d+)", lambda match: match.group(1).replace(",", ""), text)
# Use regex to find and replace numbers in the text
converted_text = re.sub(pattern, lambda match: self.convert(match.group(1)), text)
converted_text = re.sub(" +", " ", converted_text)
return converted_text
|
EnglishNumberNormalizer
|
python
|
apache__airflow
|
airflow-core/src/airflow/executors/workloads.py
|
{
"start": 3370,
"end": 4603
}
|
class ____(BaseDagBundleWorkload):
"""Execute the given Task."""
ti: TaskInstance
sentry_integration: str = ""
type: Literal["ExecuteTask"] = Field(init=False, default="ExecuteTask")
@classmethod
def make(
cls,
ti: TIModel,
dag_rel_path: Path | None = None,
generator: JWTGenerator | None = None,
bundle_info: BundleInfo | None = None,
sentry_integration: str = "",
) -> ExecuteTask:
from airflow.utils.helpers import log_filename_template_renderer
ser_ti = TaskInstance.model_validate(ti, from_attributes=True)
ser_ti.parent_context_carrier = ti.dag_run.context_carrier
if not bundle_info:
bundle_info = BundleInfo(
name=ti.dag_model.bundle_name,
version=ti.dag_run.bundle_version,
)
fname = log_filename_template_renderer()(ti=ti)
return cls(
ti=ser_ti,
dag_rel_path=dag_rel_path or Path(ti.dag_model.relative_fileloc or ""),
token=cls.generate_token(str(ti.id), generator),
log_path=fname,
bundle_info=bundle_info,
sentry_integration=sentry_integration,
)
|
ExecuteTask
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/wrapper.py
|
{
"start": 21876,
"end": 22660
}
|
class ____(WrapperLine):
wrapper: PythonWrapperCodegen
def plan(self, state: MemoryPlanningState) -> MemoryPlanningLine:
"""First pass to find reuse"""
return self
def codegen(self, code: IndentedBuffer) -> None:
"""Second pass to output code"""
def __str__(self) -> str:
"""
Emits a string representation that fits on one line.
"""
args: list[str] = []
for field in dataclasses.fields(self):
if field.name == "wrapper":
continue
val = getattr(self, field.name)
args.append(
f"{field.name}={val.get_name() if field.type is ir.Buffer else val}"
)
return f"{type(self).__name__}({', '.join(args)})"
|
MemoryPlanningLine
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.