language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pennersr__django-allauth
|
allauth/headless/account/views.py
|
{
"start": 10423,
"end": 12525
}
|
class ____(APIView):
input_class = ResetPasswordInput
def handle_invalid_input(self, input: ResetPasswordInput):
if self.process and "key" in input.errors:
self.process.record_invalid_attempt()
return super().handle_invalid_input(input)
def handle(self, request, *args, **kwargs):
self.process = None
if account_settings.PASSWORD_RESET_BY_CODE_ENABLED:
self.process = (
password_reset_by_code.PasswordResetVerificationProcess.resume(
self.request
)
)
if not self.process:
return ConflictResponse(request)
return super().handle(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
key = request.headers.get("X-Password-Reset-Key", "")
if self.process:
input = ResetPasswordKeyInput({"key": key}, code=self.process.code)
if not input.is_valid():
self.process.record_invalid_attempt()
return ErrorResponse(request, input=input)
self.process.confirm_code()
return response.PasswordResetKeyResponse(request, self.process.user)
else:
input = ResetPasswordKeyInput({"key": key})
if not input.is_valid():
return ErrorResponse(request, input=input)
return response.PasswordResetKeyResponse(request, input.user)
def get_input_kwargs(self):
ret = {}
if self.process:
ret.update({"code": self.process.code, "user": self.process.user})
return ret
def post(self, request, *args, **kwargs):
user = self.input.user
flows.password_reset.reset_password(user, self.input.cleaned_data["password"])
if self.process:
self.process.confirm_code()
self.process.finish()
else:
password_reset.finalize_password_reset(request, user)
return AuthenticationResponse(self.request)
@method_decorator(rate_limit(action="change_password"), name="handle")
|
ResetPasswordView
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/triggers/vertex_ai.py
|
{
"start": 6904,
"end": 7899
}
|
class ____(BaseVertexAIJobTrigger):
"""
Make async calls to Vertex AI to check the state of a running custom training job.
Return the job when it enters a completed state.
"""
job_type_verbose_name = "Custom Training Job"
job_serializer_class = types.TrainingPipeline
statuses_success = {
PipelineState.PIPELINE_STATE_PAUSED,
PipelineState.PIPELINE_STATE_SUCCEEDED,
}
@cached_property
def async_hook(self) -> CustomJobAsyncHook:
return CustomJobAsyncHook(
gcp_conn_id=self.conn_id,
impersonation_chain=self.impersonation_chain,
)
async def _wait_job(self) -> types.TrainingPipeline:
pipeline: types.TrainingPipeline = await self.async_hook.wait_for_training_pipeline(
project_id=self.project_id,
location=self.location,
pipeline_id=self.job_id,
poll_interval=self.poll_interval,
)
return pipeline
|
CustomTrainingJobTrigger
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/signal/fft_ops_test.py
|
{
"start": 1498,
"end": 5666
}
|
class ____(test.TestCase):
def _Compare_fftn(
self,
x,
fft_length=None,
axes=None,
norm=None,
use_placeholder=False,
rtol=1e-4,
atol=1e-4,
):
self._CompareForward_fftn(
x, fft_length, axes, norm, use_placeholder, rtol, atol
)
self._CompareBackward_fftn(
x, fft_length, axes, norm, use_placeholder, rtol, atol
)
def _CompareForward_fftn(
self,
x,
fft_length=None,
axes=None,
norm=None,
use_placeholder=False,
rtol=1e-4,
atol=1e-4,
):
x_np = self._np_fftn(x, fft_length, axes, norm)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tf_fftn(x_ph, fft_length, axes, norm, feed_dict={x_ph: x})
else:
x_tf = self._tf_fftn(x, fft_length, axes, norm)
self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)
def _CompareBackward_fftn(
self,
x,
fft_length=None,
axes=None,
norm=None,
use_placeholder=False,
rtol=1e-4,
atol=1e-4,
):
x_np = self._np_ifftn(x, fft_length, axes, norm)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tf_ifftn(x_ph, fft_length, axes, norm, feed_dict={x_ph: x})
else:
x_tf = self._tf_ifftn(x, fft_length, axes, norm)
self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)
def _compare(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
self._compare_forward(x, rank, fft_length, use_placeholder, rtol, atol)
self._compare_backward(x, rank, fft_length, use_placeholder, rtol, atol)
def _compare_forward(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
x_np = self._np_fft(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tf_fft(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tf_fft(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)
def _compare_backward(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
x_np = self._np_ifft(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tf_ifft(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tf_ifft(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)
def _check_memory_fail(self, x, rank):
config = config_pb2.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1e-2
with self.cached_session(config=config, force_gpu=True):
self._tf_fft(x, rank, fft_length=None)
def _check_grad_complex(self, func, x, y, result_is_complex=True,
rtol=1e-2, atol=1e-2):
with self.cached_session():
def f(inx, iny):
inx.set_shape(x.shape)
iny.set_shape(y.shape)
# func is a forward or inverse, real or complex, batched or unbatched
# FFT function with a complex input.
z = func(math_ops.complex(inx, iny))
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
return loss
((x_jacob_t, y_jacob_t), (x_jacob_n, y_jacob_n)) = (
gradient_checker_v2.compute_gradient(f, [x, y], delta=1e-2))
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=rtol, atol=atol)
def _check_grad_real(self, func, x, rtol=1e-2, atol=1e-2):
def f(inx):
inx.set_shape(x.shape)
# func is a forward RFFT function (batched or unbatched).
z = func(inx)
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
return loss
(x_jacob_t,), (x_jacob_n,) = gradient_checker_v2.compute_gradient(
f, [x], delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)
@test_util.run_all_in_graph_and_eager_modes
|
BaseFFTOpsTest
|
python
|
pypa__pip
|
src/pip/_vendor/pyproject_hooks/_in_process/_in_process.py
|
{
"start": 2201,
"end": 10093
}
|
class ____:
"""Implements the MetaPathFinder interface to locate modules in ``backend-path``.
Since the environment provided by the frontend can contain all sorts of
MetaPathFinders, the only way to ensure the backend is loaded from the
right place is to prepend our own.
"""
def __init__(self, backend_path, backend_module):
self.backend_path = backend_path
self.backend_module = backend_module
self.backend_parent, _, _ = backend_module.partition(".")
def find_spec(self, fullname, _path, _target=None):
if "." in fullname:
# Rely on importlib to find nested modules based on parent's path
return None
# Ignore other items in _path or sys.path and use backend_path instead:
spec = PathFinder.find_spec(fullname, path=self.backend_path)
if spec is None and fullname == self.backend_parent:
# According to the spec, the backend MUST be loaded from backend-path.
# Therefore, we can halt the import machinery and raise a clean error.
msg = f"Cannot find module {self.backend_module!r} in {self.backend_path!r}"
raise BackendUnavailable(msg)
return spec
if sys.version_info >= (3, 8):
def find_distributions(self, context=None):
# Delayed import: Python 3.7 does not contain importlib.metadata
from importlib.metadata import DistributionFinder, MetadataPathFinder
context = DistributionFinder.Context(path=self.backend_path)
return MetadataPathFinder.find_distributions(context=context)
def _supported_features():
"""Return the list of options features supported by the backend.
Returns a list of strings.
The only possible value is 'build_editable'.
"""
backend = _build_backend()
features = []
if hasattr(backend, "build_editable"):
features.append("build_editable")
return features
def get_requires_for_build_wheel(config_settings):
"""Invoke the optional get_requires_for_build_wheel hook
Returns [] if the hook is not defined.
"""
backend = _build_backend()
try:
hook = backend.get_requires_for_build_wheel
except AttributeError:
return []
else:
return hook(config_settings)
def get_requires_for_build_editable(config_settings):
"""Invoke the optional get_requires_for_build_editable hook
Returns [] if the hook is not defined.
"""
backend = _build_backend()
try:
hook = backend.get_requires_for_build_editable
except AttributeError:
return []
else:
return hook(config_settings)
def prepare_metadata_for_build_wheel(
metadata_directory, config_settings, _allow_fallback
):
"""Invoke optional prepare_metadata_for_build_wheel
Implements a fallback by building a wheel if the hook isn't defined,
unless _allow_fallback is False in which case HookMissing is raised.
"""
backend = _build_backend()
try:
hook = backend.prepare_metadata_for_build_wheel
except AttributeError:
if not _allow_fallback:
raise HookMissing()
else:
return hook(metadata_directory, config_settings)
# fallback to build_wheel outside the try block to avoid exception chaining
# which can be confusing to users and is not relevant
whl_basename = backend.build_wheel(metadata_directory, config_settings)
return _get_wheel_metadata_from_wheel(
whl_basename, metadata_directory, config_settings
)
def prepare_metadata_for_build_editable(
metadata_directory, config_settings, _allow_fallback
):
"""Invoke optional prepare_metadata_for_build_editable
Implements a fallback by building an editable wheel if the hook isn't
defined, unless _allow_fallback is False in which case HookMissing is
raised.
"""
backend = _build_backend()
try:
hook = backend.prepare_metadata_for_build_editable
except AttributeError:
if not _allow_fallback:
raise HookMissing()
try:
build_hook = backend.build_editable
except AttributeError:
raise HookMissing(hook_name="build_editable")
else:
whl_basename = build_hook(metadata_directory, config_settings)
return _get_wheel_metadata_from_wheel(
whl_basename, metadata_directory, config_settings
)
else:
return hook(metadata_directory, config_settings)
WHEEL_BUILT_MARKER = "PYPROJECT_HOOKS_ALREADY_BUILT_WHEEL"
def _dist_info_files(whl_zip):
"""Identify the .dist-info folder inside a wheel ZipFile."""
res = []
for path in whl_zip.namelist():
m = re.match(r"[^/\\]+-[^/\\]+\.dist-info/", path)
if m:
res.append(path)
if res:
return res
raise Exception("No .dist-info folder found in wheel")
def _get_wheel_metadata_from_wheel(whl_basename, metadata_directory, config_settings):
"""Extract the metadata from a wheel.
Fallback for when the build backend does not
define the 'get_wheel_metadata' hook.
"""
from zipfile import ZipFile
with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), "wb"):
pass # Touch marker file
whl_file = os.path.join(metadata_directory, whl_basename)
with ZipFile(whl_file) as zipf:
dist_info = _dist_info_files(zipf)
zipf.extractall(path=metadata_directory, members=dist_info)
return dist_info[0].split("/")[0]
def _find_already_built_wheel(metadata_directory):
"""Check for a wheel already built during the get_wheel_metadata hook."""
if not metadata_directory:
return None
metadata_parent = os.path.dirname(metadata_directory)
if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)):
return None
whl_files = glob(os.path.join(metadata_parent, "*.whl"))
if not whl_files:
print("Found wheel built marker, but no .whl files")
return None
if len(whl_files) > 1:
print(
"Found multiple .whl files; unspecified behaviour. "
"Will call build_wheel."
)
return None
# Exactly one .whl file
return whl_files[0]
def build_wheel(wheel_directory, config_settings, metadata_directory=None):
"""Invoke the mandatory build_wheel hook.
If a wheel was already built in the
prepare_metadata_for_build_wheel fallback, this
will copy it rather than rebuilding the wheel.
"""
prebuilt_whl = _find_already_built_wheel(metadata_directory)
if prebuilt_whl:
shutil.copy2(prebuilt_whl, wheel_directory)
return os.path.basename(prebuilt_whl)
return _build_backend().build_wheel(
wheel_directory, config_settings, metadata_directory
)
def build_editable(wheel_directory, config_settings, metadata_directory=None):
"""Invoke the optional build_editable hook.
If a wheel was already built in the
prepare_metadata_for_build_editable fallback, this
will copy it rather than rebuilding the wheel.
"""
backend = _build_backend()
try:
hook = backend.build_editable
except AttributeError:
raise HookMissing()
else:
prebuilt_whl = _find_already_built_wheel(metadata_directory)
if prebuilt_whl:
shutil.copy2(prebuilt_whl, wheel_directory)
return os.path.basename(prebuilt_whl)
return hook(wheel_directory, config_settings, metadata_directory)
def get_requires_for_build_sdist(config_settings):
"""Invoke the optional get_requires_for_build_wheel hook
Returns [] if the hook is not defined.
"""
backend = _build_backend()
try:
hook = backend.get_requires_for_build_sdist
except AttributeError:
return []
else:
return hook(config_settings)
|
_BackendPathFinder
|
python
|
getsentry__sentry-python
|
tests/test_tracing_utils.py
|
{
"start": 307,
"end": 5067
}
|
class ____:
id: str
is_sentry_sdk_frame: bool
namespace: Optional[str] = None
in_app_include: Optional[List[str]] = None
in_app_exclude: Optional[List[str]] = None
abs_path: Optional[str] = None
project_root: Optional[str] = None
@pytest.mark.parametrize(
"test_case, expected",
[
(
ShouldBeIncludedTestCase(
id="Frame from Sentry SDK",
is_sentry_sdk_frame=True,
),
False,
),
(
ShouldBeIncludedTestCase(
id="Frame from Django installed in virtualenv inside project root",
is_sentry_sdk_frame=False,
abs_path="/home/username/some_project/.venv/lib/python3.12/site-packages/django/db/models/sql/compiler",
project_root="/home/username/some_project",
namespace="django.db.models.sql.compiler",
in_app_include=["django"],
),
True,
),
(
ShouldBeIncludedTestCase(
id="Frame from project",
is_sentry_sdk_frame=False,
abs_path="/home/username/some_project/some_project/__init__.py",
project_root="/home/username/some_project",
namespace="some_project",
),
True,
),
(
ShouldBeIncludedTestCase(
id="Frame from project module in `in_app_exclude`",
is_sentry_sdk_frame=False,
abs_path="/home/username/some_project/some_project/exclude_me/some_module.py",
project_root="/home/username/some_project",
namespace="some_project.exclude_me.some_module",
in_app_exclude=["some_project.exclude_me"],
),
False,
),
(
ShouldBeIncludedTestCase(
id="Frame from system-wide installed Django",
is_sentry_sdk_frame=False,
abs_path="/usr/lib/python3.12/site-packages/django/db/models/sql/compiler",
project_root="/home/username/some_project",
namespace="django.db.models.sql.compiler",
),
False,
),
(
ShouldBeIncludedTestCase(
id="Frame from system-wide installed Django with `django` in `in_app_include`",
is_sentry_sdk_frame=False,
abs_path="/usr/lib/python3.12/site-packages/django/db/models/sql/compiler",
project_root="/home/username/some_project",
namespace="django.db.models.sql.compiler",
in_app_include=["django"],
),
True,
),
],
ids=id_function,
)
def test_should_be_included(test_case, expected):
# type: (ShouldBeIncludedTestCase, bool) -> None
"""Checking logic, see: https://github.com/getsentry/sentry-python/issues/3312"""
kwargs = asdict(test_case)
kwargs.pop("id")
assert _should_be_included(**kwargs) == expected
@pytest.mark.parametrize(
("header", "expected"),
(
("", ""),
("foo=bar", "foo=bar"),
(" foo=bar, baz = qux ", " foo=bar, baz = qux "),
("sentry-trace_id=123", ""),
(" sentry-trace_id = 123 ", ""),
("sentry-trace_id=123,sentry-public_key=456", ""),
("foo=bar,sentry-trace_id=123", "foo=bar"),
("foo=bar,sentry-trace_id=123,baz=qux", "foo=bar,baz=qux"),
(
"foo=bar,sentry-trace_id=123,baz=qux,sentry-public_key=456",
"foo=bar,baz=qux",
),
),
)
def test_strip_sentry_baggage(header, expected):
assert Baggage.strip_sentry_baggage(header) == expected
@pytest.mark.parametrize(
("baggage", "expected_repr"),
(
(Baggage(sentry_items={}), '<Baggage "", mutable=True>'),
(Baggage(sentry_items={}, mutable=False), '<Baggage "", mutable=False>'),
(
Baggage(sentry_items={"foo": "bar"}),
'<Baggage "sentry-foo=bar,", mutable=True>',
),
(
Baggage(sentry_items={"foo": "bar"}, mutable=False),
'<Baggage "sentry-foo=bar,", mutable=False>',
),
(
Baggage(sentry_items={"foo": "bar"}, third_party_items="asdf=1234,"),
'<Baggage "sentry-foo=bar,asdf=1234,", mutable=True>',
),
(
Baggage(
sentry_items={"foo": "bar"},
third_party_items="asdf=1234,",
mutable=False,
),
'<Baggage "sentry-foo=bar,asdf=1234,", mutable=False>',
),
),
)
def test_baggage_repr(baggage, expected_repr):
assert repr(baggage) == expected_repr
|
ShouldBeIncludedTestCase
|
python
|
realpython__materials
|
gemini-cli/todolist/src/todolist/database.py
|
{
"start": 335,
"end": 474
}
|
class ____(Model):
name = TextField(null=False, unique=True)
class Meta:
database = db
table_name = "lists"
|
TaskList
|
python
|
dask__distributed
|
distributed/pytest_resourceleaks.py
|
{
"start": 8723,
"end": 10414
}
|
class ____(ResourceChecker, name="tracemalloc"):
# Report a leak if the traced memory increased by at least this many bytes
LEAK_THRESHOLD = 2**20
# Report at most this many leaks
NDIFF = 5
# Report less than NDIFF leaks if they amount to less than this many bytes
MIN_SIZE_DIFF = 200 * 1024
def on_start_test(self) -> None:
tracemalloc.start(1)
def on_stop_test(self) -> None:
tracemalloc.stop()
def measure(self) -> tuple[int, tracemalloc.Snapshot]:
current, _ = tracemalloc.get_traced_memory()
snap = tracemalloc.take_snapshot()
return current, snap
def has_leak(
self,
before: tuple[int, tracemalloc.Snapshot],
after: tuple[int, tracemalloc.Snapshot],
) -> bool:
return after[0] > before[0] + self.LEAK_THRESHOLD
def format(
self,
before: tuple[int, tracemalloc.Snapshot],
after: tuple[int, tracemalloc.Snapshot],
) -> str:
bytes_before, snap_before = before
bytes_after, snap_after = after
diff = snap_after.compare_to(snap_before, "traceback")
lines = [
f"leaked {(bytes_after - bytes_before) / 2 ** 20:.1f} MiB "
"of traced Python memory"
]
for stat in diff[: self.NDIFF]:
size_diff = stat.size_diff or stat.size
if size_diff < self.MIN_SIZE_DIFF:
break
count = stat.count_diff or stat.count
lines += [f" - leaked {size_diff / 2**20:.1f} MiB in {count} calls at:"]
lines += [" " + line for line in stat.traceback.format()]
return "\n".join(lines)
|
TracemallocMemoryChecker
|
python
|
django__django
|
tests/generic_views/views.py
|
{
"start": 5475,
"end": 5546
}
|
class ____(BookConfig, generic.YearArchiveView):
pass
|
BookYearArchive
|
python
|
numba__numba
|
numba/tests/doc_examples/test_llvm_pass_timings.py
|
{
"start": 160,
"end": 953
}
|
class ____(unittest.TestCase):
def test_pass_timings(self):
with override_config('LLVM_PASS_TIMINGS', True):
with captured_stdout() as stdout:
# magictoken.ex_llvm_pass_timings.begin
import numba
@numba.njit
def foo(n):
c = 0
for i in range(n):
for j in range(i):
c += j
return c
foo(10)
md = foo.get_metadata(foo.signatures[0])
print(md['llvm_pass_timings'])
# magictoken.ex_llvm_pass_timings.end
self.assertIn("Finalize object", stdout.getvalue())
if __name__ == '__main__':
unittest.main()
|
DocsLLVMPassTimings
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
|
{
"start": 18610,
"end": 18932
}
|
class ____(graphene.ObjectType):
class Meta:
interfaces = (
GrapheneMessageEvent,
GrapheneDisplayableEvent,
GrapheneStepEvent,
GrapheneMarkerEvent,
GrapheneErrorEvent,
)
name = "ResourceInitFailureEvent"
|
GrapheneResourceInitFailureEvent
|
python
|
pypa__pip
|
src/pip/_internal/cli/parser.py
|
{
"start": 484,
"end": 3417
}
|
class ____(optparse.IndentedHelpFormatter):
"""A prettier/less verbose help formatter for optparse."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
# help position must be aligned with __init__.parseopts.description
kwargs["max_help_position"] = 30
kwargs["indent_increment"] = 1
kwargs["width"] = shutil.get_terminal_size()[0] - 2
super().__init__(*args, **kwargs)
def format_option_strings(self, option: optparse.Option) -> str:
return self._format_option_strings(option)
def _format_option_strings(
self, option: optparse.Option, mvarfmt: str = " <{}>", optsep: str = ", "
) -> str:
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
assert option.dest is not None
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt.format(metavar.lower()))
return "".join(opts)
def format_heading(self, heading: str) -> str:
if heading == "Options":
return ""
return heading + ":\n"
def format_usage(self, usage: str) -> str:
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = "\nUsage: {}\n".format(self.indent_lines(textwrap.dedent(usage), " "))
return msg
def format_description(self, description: str | None) -> str:
# leave full control over description to us
if description:
if hasattr(self.parser, "main"):
label = "Commands"
else:
label = "Description"
# some doc strings have initial newlines, some don't
description = description.lstrip("\n")
# some doc strings have final newlines and spaces, some don't
description = description.rstrip()
# dedent, then reindent
description = self.indent_lines(textwrap.dedent(description), " ")
description = f"{label}:\n{description}\n"
return description
else:
return ""
def format_epilog(self, epilog: str | None) -> str:
# leave full control over epilog to us
if epilog:
return epilog
else:
return ""
def indent_lines(self, text: str, indent: str) -> str:
new_lines = [indent + line for line in text.split("\n")]
return "\n".join(new_lines)
|
PrettyHelpFormatter
|
python
|
getsentry__sentry
|
src/sentry/integrations/bitbucket_server/webhook.py
|
{
"start": 2056,
"end": 6136
}
|
class ____(BitbucketServerWebhook):
@property
def event_type(self) -> IntegrationWebhookEventType:
return IntegrationWebhookEventType.PUSH
def __call__(self, event: Mapping[str, Any], **kwargs) -> None:
authors = {}
if not (
(organization := kwargs.get("organization"))
and (integration_id := kwargs.get("integration_id"))
):
raise ValueError("Organization and integration_id must be provided")
with IntegrationWebhookEvent(
interaction_type=self.event_type,
domain=IntegrationDomain.SOURCE_CODE_MANAGEMENT,
provider_key=self.provider,
).capture() as lifecycle:
try:
repo = Repository.objects.get(
organization_id=organization.id,
provider=PROVIDER_NAME,
external_id=str(event["repository"]["id"]),
)
except Repository.DoesNotExist as e:
lifecycle.record_halt(halt_reason=e)
raise Http404()
provider = repo.get_provider()
try:
installation = provider.get_installation(integration_id, organization.id)
except Integration.DoesNotExist as e:
lifecycle.record_halt(halt_reason=e)
raise Http404()
try:
client = installation.get_client()
except IntegrationError as e:
lifecycle.record_halt(halt_reason=e)
raise BadRequest()
# while we're here, make sure repo data is up to date
self.update_repo_data(repo, event)
[project_name, repo_name] = repo.name.split("/")
for change in event["changes"]:
from_hash = None if change.get("fromHash") == "0" * 40 else change.get("fromHash")
try:
commits = client.get_commits(
project_name, repo_name, from_hash, change.get("toHash")
)
except ApiHostError as e:
lifecycle.record_halt(halt_reason=e)
raise BadRequest(detail="Unable to reach host")
except ApiUnauthorized as e:
lifecycle.record_halt(halt_reason=e)
raise BadRequest()
except Exception as e:
sentry_sdk.capture_exception(e)
raise
for commit in commits:
if IntegrationRepositoryProvider.should_ignore_commit(commit["message"]):
continue
author_email = commit["author"]["emailAddress"]
# its optional, lets just throw it out for now
if author_email is None or len(author_email) > 75:
author = None
elif author_email not in authors:
authors[author_email] = author = CommitAuthor.objects.get_or_create(
organization_id=organization.id,
email=author_email,
defaults={"name": commit["author"]["name"]},
)[0]
else:
author = authors[author_email]
try:
with transaction.atomic(router.db_for_write(Commit)):
Commit.objects.create(
repository_id=repo.id,
organization_id=organization.id,
key=commit["id"],
message=commit["message"],
author=author,
date_added=datetime.fromtimestamp(
commit["authorTimestamp"] / 1000, timezone.utc
),
)
except IntegrityError:
pass
@region_silo_view
|
PushEventWebhook
|
python
|
sphinx-doc__sphinx
|
sphinx/environment/collectors/toctree.py
|
{
"start": 797,
"end": 17603
}
|
class ____(EnvironmentCollector):
def clear_doc(self, app: Sphinx, env: BuildEnvironment, docname: str) -> None:
env.tocs.pop(docname, None)
env.toc_secnumbers.pop(docname, None)
env.toc_fignumbers.pop(docname, None)
env.toc_num_entries.pop(docname, None)
env.toctree_includes.pop(docname, None)
env.glob_toctrees.discard(docname)
env.numbered_toctrees.discard(docname)
for subfn, fnset in list(env.files_to_rebuild.items()):
fnset.discard(docname)
if not fnset:
del env.files_to_rebuild[subfn]
def merge_other(
self,
app: Sphinx,
env: BuildEnvironment,
docnames: Set[str],
other: BuildEnvironment,
) -> None:
for docname in docnames:
env.tocs[docname] = other.tocs[docname]
env.toc_num_entries[docname] = other.toc_num_entries[docname]
if docname in other.toctree_includes:
env.toctree_includes[docname] = other.toctree_includes[docname]
if docname in other.glob_toctrees:
env.glob_toctrees.add(docname)
if docname in other.numbered_toctrees:
env.numbered_toctrees.add(docname)
for subfn, fnset in other.files_to_rebuild.items():
env.files_to_rebuild.setdefault(subfn, set()).update(fnset & set(docnames))
def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:
"""Build a TOC from the doctree and store it in the inventory."""
docname = app.env.current_document.docname
numentries = [0] # nonlocal again...
def build_toc(
node: Element | Sequence[Element],
depth: int = 1,
) -> nodes.bullet_list | None:
# list of table of contents entries
entries: list[Element] = []
for sectionnode in node:
# find all toctree nodes in this section and add them
# to the toc (just copying the toctree node which is then
# resolved in self.get_and_resolve_doctree)
if isinstance(sectionnode, nodes.section):
title = sectionnode[0]
# copy the contents of the section title, but without references
# and unnecessary stuff
visitor = SphinxContentsFilter(doctree)
title.walkabout(visitor)
nodetext = visitor.get_entry_text() # type: ignore[no-untyped-call]
anchorname = _make_anchor_name(sectionnode['ids'], numentries)
# make these nodes:
# list_item -> compact_paragraph -> reference
reference = nodes.reference(
'',
'',
internal=True,
refuri=docname,
anchorname=anchorname,
*nodetext,
)
para = addnodes.compact_paragraph('', '', reference)
item: Element = nodes.list_item('', para)
sub_item = build_toc(sectionnode, depth + 1)
if sub_item:
item += sub_item
entries.append(item)
# Wrap items under an ``.. only::`` directive in a node for
# post-processing
elif isinstance(sectionnode, addnodes.only):
onlynode = addnodes.only(expr=sectionnode['expr'])
blist = build_toc(sectionnode, depth)
if blist:
onlynode += blist.children
entries.append(onlynode)
# check within the section for other node types
elif isinstance(sectionnode, nodes.Element):
# cache of parent node -> list item
memo_parents: dict[nodes.Element, nodes.list_item] = {}
toctreenode: nodes.Node
for toctreenode in sectionnode.findall():
if isinstance(toctreenode, nodes.section):
continue
if isinstance(toctreenode, addnodes.toctree):
item = toctreenode.copy()
entries.append(item)
# important: do the inventory stuff
note_toctree(app.env, docname, toctreenode)
# add object signatures within a section to the ToC
elif isinstance(toctreenode, addnodes.desc):
# The desc has one or more nested desc_signature,
# and then a desc_content, which again may have desc nodes.
# Thus, desc is the one we can bubble up to through parents.
entry: nodes.list_item | None = None
for sig_node in toctreenode:
if not isinstance(sig_node, addnodes.desc_signature):
continue
# Skip if no name set
if not sig_node.get('_toc_name', ''):
continue
# Skip if explicitly disabled
if sig_node.parent.get('no-contents-entry'):
continue
# Skip entries with no ID (e.g. with :no-index: set)
ids = sig_node['ids']
if not ids:
continue
anchorname = _make_anchor_name(ids, numentries)
reference = nodes.reference(
'',
'',
nodes.literal('', sig_node['_toc_name']),
internal=True,
refuri=docname,
anchorname=anchorname,
)
para = addnodes.compact_paragraph(
'', '', reference, skip_section_number=True
)
entry = nodes.list_item('', para)
# Find parent node
parent = sig_node.parent
while (
parent not in memo_parents and parent != sectionnode
):
parent = parent.parent
# Note, it may both be the limit and in memo_parents,
# prefer memo_parents, so we get the nesting.
if parent in memo_parents:
root_entry = memo_parents[parent]
if isinstance(root_entry[-1], nodes.bullet_list):
root_entry[-1].append(entry)
else:
root_entry.append(nodes.bullet_list('', entry))
else:
assert parent == sectionnode
entries.append(entry)
# Save the latest desc_signature as the one we put sub entries in.
# If there are multiple signatures, then the latest is used.
if entry is not None:
# are there any desc nodes without desc_signature nodes?
memo_parents[toctreenode] = entry
if entries:
return nodes.bullet_list('', *entries)
return None
toc = build_toc(doctree)
if toc:
app.env.tocs[docname] = toc
else:
app.env.tocs[docname] = nodes.bullet_list('')
app.env.toc_num_entries[docname] = numentries[0]
def get_updated_docs(self, app: Sphinx, env: BuildEnvironment) -> list[str]:
return self.assign_section_numbers(env) + self.assign_figure_numbers(env)
def assign_section_numbers(self, env: BuildEnvironment) -> list[str]:
"""Assign a section number to each heading under a numbered toctree."""
# a list of all docnames whose section numbers changed
rewrite_needed = []
assigned: set[str] = set()
old_secnumbers = env.toc_secnumbers
env.toc_secnumbers = {}
def _walk_toc(
node: Element,
secnums: dict[str, tuple[int, ...]],
depth: int,
titlenode: nodes.title | None = None,
) -> None:
# titlenode is the title of the document, it will get assigned a
# secnumber too, so that it shows up in next/prev/parent rellinks
for subnode in node.children:
if isinstance(subnode, nodes.bullet_list):
numstack.append(0)
_walk_toc(subnode, secnums, depth - 1, titlenode)
numstack.pop()
titlenode = None
elif isinstance(subnode, nodes.list_item):
_walk_toc(subnode, secnums, depth, titlenode)
titlenode = None
elif isinstance(subnode, addnodes.only):
# at this stage we don't know yet which sections are going
# to be included; just include all of them, even if it leads
# to gaps in the numbering
_walk_toc(subnode, secnums, depth, titlenode)
titlenode = None
elif isinstance(subnode, addnodes.compact_paragraph):
if 'skip_section_number' in subnode:
continue
numstack[-1] += 1
reference = cast('nodes.reference', subnode[0])
if depth > 0:
number = numstack.copy()
secnums[reference['anchorname']] = tuple(numstack)
else:
number = None
secnums[reference['anchorname']] = ()
reference['secnumber'] = number
if titlenode:
titlenode['secnumber'] = number
titlenode = None
elif isinstance(subnode, addnodes.toctree):
_walk_toctree(subnode, depth)
def _walk_toctree(toctreenode: addnodes.toctree, depth: int) -> None:
if depth == 0:
return
for _title, ref in toctreenode['entries']:
if url_re.match(ref) or ref == 'self':
# don't mess with those
continue
if ref in assigned:
logger.warning(
__(
'%s is already assigned section numbers '
'(nested numbered toctree?)'
),
ref,
location=toctreenode,
type='toc',
subtype='secnum',
)
elif ref in env.tocs:
secnums: dict[str, tuple[int, ...]] = {}
env.toc_secnumbers[ref] = secnums
assigned.add(ref)
_walk_toc(env.tocs[ref], secnums, depth, env.titles.get(ref))
if secnums != old_secnumbers.get(ref):
rewrite_needed.append(ref)
for docname in env.numbered_toctrees:
assigned.add(docname)
doctree = env.get_doctree(docname)
for toctreenode in doctree.findall(addnodes.toctree):
depth = toctreenode.get('numbered', 0)
if depth:
# every numbered toctree gets new numbering
numstack = [0]
_walk_toctree(toctreenode, depth)
return rewrite_needed
def assign_figure_numbers(self, env: BuildEnvironment) -> list[str]:
"""Assign a figure number to each figure under a numbered toctree."""
generated_docnames = frozenset(env.domains.standard_domain._virtual_doc_names)
rewrite_needed = []
assigned: set[str] = set()
old_fignumbers = env.toc_fignumbers
env.toc_fignumbers = {}
fignum_counter: dict[str, dict[tuple[int, ...], int]] = {}
def get_figtype(node: Node) -> str | None:
for domain in env.domains.sorted():
figtype = domain.get_enumerable_node_type(node)
if isinstance(domain, StandardDomain) and not domain.get_numfig_title(
node
):
# Skip if uncaptioned node
continue
if figtype:
return figtype
return None
def get_section_number(docname: str, section: nodes.section) -> tuple[int, ...]:
anchorname = '#' + section['ids'][0]
secnumbers = env.toc_secnumbers.get(docname, {})
if anchorname in secnumbers:
secnum = secnumbers.get(anchorname)
else:
secnum = secnumbers.get('')
return secnum or ()
def get_next_fignumber(
figtype: str, secnum: tuple[int, ...]
) -> tuple[int, ...]:
counter = fignum_counter.setdefault(figtype, {})
secnum = secnum[: env.config.numfig_secnum_depth]
counter[secnum] = counter.get(secnum, 0) + 1
return (*secnum, counter[secnum])
def register_fignumber(
docname: str, secnum: tuple[int, ...], figtype: str, fignode: Element
) -> None:
env.toc_fignumbers.setdefault(docname, {})
fignumbers = env.toc_fignumbers[docname].setdefault(figtype, {})
figure_id = fignode['ids'][0]
fignumbers[figure_id] = get_next_fignumber(figtype, secnum)
def _walk_doctree(
docname: str, doctree: Element, secnum: tuple[int, ...]
) -> None:
nonlocal generated_docnames
for subnode in doctree.children:
if isinstance(subnode, nodes.section):
next_secnum = get_section_number(docname, subnode)
if next_secnum:
_walk_doctree(docname, subnode, next_secnum)
else:
_walk_doctree(docname, subnode, secnum)
elif isinstance(subnode, addnodes.toctree):
for _title, subdocname in subnode['entries']:
if url_re.match(subdocname) or subdocname == 'self':
# don't mess with those
continue
if subdocname in generated_docnames:
# or these
continue
_walk_doc(subdocname, secnum)
elif isinstance(subnode, nodes.Element):
figtype = get_figtype(subnode)
if figtype and subnode['ids']:
register_fignumber(docname, secnum, figtype, subnode)
_walk_doctree(docname, subnode, secnum)
def _walk_doc(docname: str, secnum: tuple[int, ...]) -> None:
if docname not in assigned:
assigned.add(docname)
doctree = env.get_doctree(docname)
_walk_doctree(docname, doctree, secnum)
if env.config.numfig:
_walk_doc(env.config.root_doc, ())
for docname, fignums in env.toc_fignumbers.items():
if fignums != old_fignumbers.get(docname):
rewrite_needed.append(docname)
return rewrite_needed
def _make_anchor_name(ids: list[str], num_entries: list[int]) -> str:
if not num_entries[0]:
# for the very first toc entry, don't add an anchor
# as it is the file's title anyway
anchorname = ''
else:
anchorname = '#' + ids[0]
num_entries[0] += 1
return anchorname
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_env_collector(TocTreeCollector)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
TocTreeCollector
|
python
|
Unity-Technologies__ml-agents
|
ml-agents/mlagents/trainers/torch_entities/components/reward_providers/rnd_reward_provider.py
|
{
"start": 616,
"end": 2110
}
|
class ____(BaseRewardProvider):
"""
Implementation of Random Network Distillation : https://arxiv.org/pdf/1810.12894.pdf
"""
def __init__(self, specs: BehaviorSpec, settings: RNDSettings) -> None:
super().__init__(specs, settings)
self._ignore_done = True
self._random_network = RNDNetwork(specs, settings)
self._training_network = RNDNetwork(specs, settings)
self.optimizer = torch.optim.Adam(
self._training_network.parameters(), lr=settings.learning_rate
)
def evaluate(self, mini_batch: AgentBuffer) -> np.ndarray:
with torch.no_grad():
target = self._random_network(mini_batch)
prediction = self._training_network(mini_batch)
rewards = torch.sum((prediction - target) ** 2, dim=1)
return rewards.detach().cpu().numpy()
def update(self, mini_batch: AgentBuffer) -> Dict[str, np.ndarray]:
with torch.no_grad():
target = self._random_network(mini_batch)
prediction = self._training_network(mini_batch)
loss = torch.mean(torch.sum((prediction - target) ** 2, dim=1))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {"Losses/RND Loss": loss.detach().cpu().numpy()}
def get_modules(self):
return {
f"Module:{self.name}-pred": self._training_network,
f"Module:{self.name}-target": self._random_network,
}
|
RNDRewardProvider
|
python
|
apache__thrift
|
test/py.twisted/test_suite.py
|
{
"start": 2705,
"end": 5718
}
|
class ____(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.handler = TestHandler()
self.processor = ThriftTest.Processor(self.handler)
self.pfactory = TBinaryProtocol.TBinaryProtocolFactory()
self.server = reactor.listenTCP(
0, TTwisted.ThriftServerFactory(self.processor, self.pfactory), interface="127.0.0.1")
self.portNo = self.server.getHost().port
self.txclient = yield ClientCreator(reactor,
TTwisted.ThriftClientProtocol,
ThriftTest.Client,
self.pfactory).connectTCP("127.0.0.1", self.portNo)
self.client = self.txclient.client
@defer.inlineCallbacks
def tearDown(self):
yield self.server.stopListening()
self.txclient.transport.loseConnection()
@defer.inlineCallbacks
def testVoid(self):
self.assertEquals((yield self.client.testVoid()), None)
@defer.inlineCallbacks
def testString(self):
self.assertEquals((yield self.client.testString('Python')), 'Python')
@defer.inlineCallbacks
def testByte(self):
self.assertEquals((yield self.client.testByte(63)), 63)
@defer.inlineCallbacks
def testI32(self):
self.assertEquals((yield self.client.testI32(-1)), -1)
self.assertEquals((yield self.client.testI32(0)), 0)
@defer.inlineCallbacks
def testI64(self):
self.assertEquals((yield self.client.testI64(-34359738368)), -34359738368)
@defer.inlineCallbacks
def testDouble(self):
self.assertEquals((yield self.client.testDouble(-5.235098235)), -5.235098235)
# TODO: def testBinary(self) ...
@defer.inlineCallbacks
def testStruct(self):
x = Xtruct()
x.string_thing = "Zero"
x.byte_thing = 1
x.i32_thing = -3
x.i64_thing = -5
y = yield self.client.testStruct(x)
self.assertEquals(y.string_thing, "Zero")
self.assertEquals(y.byte_thing, 1)
self.assertEquals(y.i32_thing, -3)
self.assertEquals(y.i64_thing, -5)
@defer.inlineCallbacks
def testException(self):
try:
yield self.client.testException('Xception')
self.fail("should have gotten exception")
except Xception as x:
self.assertEquals(x.errorCode, 1001)
self.assertEquals(x.message, 'Xception')
try:
yield self.client.testException("throw_undeclared")
self.fail("should have gotten exception")
except TApplicationException:
pass
yield self.client.testException('Safe')
@defer.inlineCallbacks
def testOneway(self):
yield self.client.testOneway(1)
start, end, seconds = yield self.handler.onewaysQueue.get()
self.assertAlmostEquals(seconds, (end - start), places=1)
self.assertEquals((yield self.client.testI32(-1)), -1)
|
ThriftTestCase
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/group.py
|
{
"start": 29904,
"end": 30248
}
|
class ____(Protocol):
def __call__(
self,
project_ids: Sequence[int],
group_id_list: Sequence[int],
environment_ids: Sequence[int],
key: str,
value: str,
tenant_ids: dict[str, str | int] | None = None,
) -> Mapping[int, GroupTagValue]: ...
@register(Group)
|
_EnvironmentSeenStatsFunc
|
python
|
python__mypy
|
test-data/unit/plugins/attrhook2.py
|
{
"start": 164,
"end": 1165
}
|
class ____(Plugin):
def get_attribute_hook(self, fullname: str) -> Callable[[AttributeContext], Type] | None:
if fullname == "m.Magic.magic_field":
return magic_field_callback
if fullname == "m.Magic.nonexistent_field":
return nonexistent_field_callback
if fullname == "m.Magic.no_assignment_field":
return no_assignment_field_callback
return None
def magic_field_callback(ctx: AttributeContext) -> Type:
return ctx.api.named_generic_type("builtins.str", [])
def nonexistent_field_callback(ctx: AttributeContext) -> Type:
ctx.api.fail("Field does not exist", ctx.context)
return AnyType(TypeOfAny.from_error)
def no_assignment_field_callback(ctx: AttributeContext) -> Type:
if ctx.is_lvalue:
ctx.api.fail(f"Cannot assign to field", ctx.context)
return AnyType(TypeOfAny.from_error)
return ctx.default_attr_type
def plugin(version: str) -> type[AttrPlugin]:
return AttrPlugin
|
AttrPlugin
|
python
|
python-excel__xlwt
|
xlwt/antlr.py
|
{
"start": 21406,
"end": 22100
}
|
class ____(CommonToken):
def __init__(self,*args):
CommonToken.__init__(self,*args)
self.hiddenBefore = None
self.hiddenAfter = None
def getHiddenAfter(self):
return self.hiddenAfter
def getHiddenBefore(self):
return self.hiddenBefore
def setHiddenAfter(self,t):
self.hiddenAfter = t
def setHiddenBefore(self, t):
self.hiddenBefore = t
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### Queue ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
## Shall be a circular buffer on tokens ..
|
CommonHiddenStreamToken
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/beta/beta_text_editor_code_execution_view_result_block.py
|
{
"start": 247,
"end": 548
}
|
class ____(BaseModel):
content: str
file_type: Literal["text", "image", "pdf"]
num_lines: Optional[int] = None
start_line: Optional[int] = None
total_lines: Optional[int] = None
type: Literal["text_editor_code_execution_view_result"]
|
BetaTextEditorCodeExecutionViewResultBlock
|
python
|
aio-libs__aiohttp
|
tests/test_multipart.py
|
{
"start": 2740,
"end": 4274
}
|
class ____:
def test_at_eof(self) -> None:
m_resp = mock.create_autospec(aiohttp.ClientResponse, spec_set=True)
m_stream = mock.create_autospec(MultipartReader, spec_set=True)
wrapper = MultipartResponseWrapper(m_resp, m_stream)
wrapper.at_eof()
assert m_resp.content.at_eof.called
async def test_next(self) -> None:
m_resp = mock.create_autospec(aiohttp.ClientResponse, spec_set=True)
m_stream = mock.create_autospec(MultipartReader, spec_set=True)
wrapper = MultipartResponseWrapper(m_resp, m_stream)
m_stream.next.return_value = b""
m_stream.at_eof.return_value = False
await wrapper.next()
assert m_stream.next.called
async def test_release(self) -> None:
m_resp = mock.create_autospec(aiohttp.ClientResponse, spec_set=True)
m_stream = mock.create_autospec(MultipartReader, spec_set=True)
wrapper = MultipartResponseWrapper(m_resp, m_stream)
await wrapper.release()
assert m_resp.release.called
async def test_release_when_stream_at_eof(self) -> None:
m_resp = mock.create_autospec(aiohttp.ClientResponse, spec_set=True)
m_stream = mock.create_autospec(MultipartReader, spec_set=True)
wrapper = MultipartResponseWrapper(m_resp, m_stream)
m_stream.next.return_value = b""
m_stream.at_eof.return_value = True
await wrapper.next()
assert m_stream.next.called
assert m_resp.release.called
|
TestMultipartResponseWrapper
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/_config.py
|
{
"start": 65470,
"end": 66460
}
|
class ____(TypedDict, total=False):
"""
:class:`altair.BindDirect` ``TypedDict`` wrapper.
Parameters
----------
element
An input element that exposes a *value* property and supports the `EventTarget
<https://developer.mozilla.org/en-US/docs/Web/API/EventTarget>`__ interface, or a
CSS selector string to such an element. When the element updates and dispatches an
event, the *value* property will be used as the new, bound signal value. When the
signal updates independent of the element, the *value* property will be set to the
signal value and a new event will be dispatched on the element.
debounce
If defined, delays event handling until the specified milliseconds have elapsed
since the last event was fired.
event
The event (default ``"input"``) to listen for to track changes on the external
element.
"""
element: str
debounce: float
event: str
|
BindDirectKwds
|
python
|
django__django
|
tests/forms_tests/tests/test_forms.py
|
{
"start": 208121,
"end": 208277
}
|
class ____(DjangoTemplates):
form_template_name = "forms_tests/form_snippet.html"
field_template_name = "forms_tests/custom_field.html"
|
CustomRenderer
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-aws/tests/observers/test_ecs_observer.py
|
{
"start": 33759,
"end": 41424
}
|
class ____:
@pytest.fixture
def sample_event(self):
return {
"detail": {
"taskArn": "arn:aws:ecs:us-east-1:123456789:task/cluster/task-id",
"containers": [
{"name": "prefect", "exitCode": 1},
{"name": "sidecar", "exitCode": 0},
],
}
}
@pytest.fixture
def sample_tags(self):
return {"prefect.io/flow-run-id": str(uuid.uuid4())}
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_with_non_zero_exit_codes(
self, mock_propose_state, mock_get_client, sample_event, sample_tags
):
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
# Mock a running flow run
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="RUNNING", name="Running"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(sample_event, sample_tags)
mock_client.read_flow_run.assert_called_once_with(flow_run_id=flow_run_id)
mock_propose_state.assert_called_once()
# Verify the proposed state is a Crashed state
call_args = mock_propose_state.call_args[1]
proposed_state = call_args["state"]
assert proposed_state.type == StateType.CRASHED
assert proposed_state.name == "Crashed"
assert call_args["flow_run_id"] == flow_run_id
assert call_args["client"] == mock_client
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_with_all_zero_exit_codes(
self, mock_propose_state, mock_get_client, sample_tags
):
event = {
"detail": {
"taskArn": "arn:aws:ecs:us-east-1:123456789:task/cluster/task-id",
"containers": [
{"name": "prefect", "exitCode": 0},
{"name": "sidecar", "exitCode": 0},
],
}
}
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
# Mock a running flow run
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="RUNNING", name="Running"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(event, sample_tags)
mock_client.read_flow_run.assert_called_once_with(flow_run_id=flow_run_id)
# Should not propose crashed state when all containers have exit code 0
mock_propose_state.assert_not_called()
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_with_none_exit_codes(
self, mock_propose_state, mock_get_client, sample_tags
):
event = {
"detail": {
"taskArn": "arn:aws:ecs:us-east-1:123456789:task/cluster/task-id",
"containers": [
{"name": "prefect", "exitCode": None},
{"name": "sidecar", "exitCode": 0},
],
}
}
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
# Mock a running flow run
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="RUNNING", name="Running"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(event, sample_tags)
mock_client.read_flow_run.assert_called_once_with(flow_run_id=flow_run_id)
# Should propose crashed state when exit code is None (undefined exit)
mock_propose_state.assert_called_once()
@patch("prefect_aws.observers.ecs.prefect.get_client")
async def test_mark_runs_as_crashed_missing_task_arn(
self, mock_get_client, sample_tags
):
event = {"detail": {}}
await mark_runs_as_crashed(event, sample_tags)
# Should exit early without creating client
mock_get_client.assert_not_called()
@patch("prefect_aws.observers.ecs.prefect.get_client")
async def test_mark_runs_as_crashed_flow_run_not_found(
self, mock_get_client, sample_event, sample_tags
):
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
mock_client.read_flow_run.side_effect = ObjectNotFound("Flow run not found")
await mark_runs_as_crashed(sample_event, sample_tags)
# Should handle the exception gracefully
mock_client.read_flow_run.assert_called_once()
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_skips_final_states(
self, mock_propose_state, mock_get_client, sample_event, sample_tags
):
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
# Mock a completed flow run (final state)
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="COMPLETED", name="Completed"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(sample_event, sample_tags)
mock_client.read_flow_run.assert_called_once_with(flow_run_id=flow_run_id)
# Should not propose state for final states
mock_propose_state.assert_not_called()
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_skips_scheduled_states(
self, mock_propose_state, mock_get_client, sample_event, sample_tags
):
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
# Mock a scheduled flow run
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="SCHEDULED", name="Scheduled"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(sample_event, sample_tags)
mock_client.read_flow_run.assert_called_once_with(flow_run_id=flow_run_id)
# Should not propose state for scheduled states
mock_propose_state.assert_not_called()
|
TestMarkRunsAsCrashed
|
python
|
ray-project__ray
|
python/ray/tune/tests/tutorial.py
|
{
"start": 491,
"end": 6333
}
|
class ____(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
# In this example, we don't change the model architecture
# due to simplicity.
self.conv1 = nn.Conv2d(1, 3, kernel_size=3)
self.fc = nn.Linear(192, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 3))
x = x.view(-1, 192)
x = self.fc(x)
return F.log_softmax(x, dim=1)
# __model_def_end__
# fmt: on
# fmt: off
# __train_def_begin__
# Change these values if you want the training to run quicker or slower.
EPOCH_SIZE = 512
TEST_SIZE = 256
def train_func(model, optimizer, train_loader):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
# We set this just for the example to run quickly.
if batch_idx * len(data) > EPOCH_SIZE:
return
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test_func(model, data_loader):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(data_loader):
# We set this just for the example to run quickly.
if batch_idx * len(data) > TEST_SIZE:
break
data, target = data.to(device), target.to(device)
outputs = model(data)
_, predicted = torch.max(outputs.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return correct / total
# __train_def_end__
# __train_func_begin__
import os
import tempfile
from ray.tune import Checkpoint
def train_mnist(config):
# Data Setup
mnist_transforms = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))])
train_loader = DataLoader(
datasets.MNIST("~/data", train=True, download=True, transform=mnist_transforms),
batch_size=64,
shuffle=True)
test_loader = DataLoader(
datasets.MNIST("~/data", train=False, transform=mnist_transforms),
batch_size=64,
shuffle=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = ConvNet()
model.to(device)
optimizer = optim.SGD(
model.parameters(), lr=config["lr"], momentum=config["momentum"])
for i in range(10):
train_func(model, optimizer, train_loader)
acc = test_func(model, test_loader)
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
checkpoint = None
if (i + 1) % 5 == 0:
# This saves the model to the trial directory
torch.save(
model.state_dict(),
os.path.join(temp_checkpoint_dir, "model.pth")
)
checkpoint = Checkpoint.from_directory(temp_checkpoint_dir)
# Send the current training result back to Tune
tune.report({"mean_accuracy": acc}, checkpoint=checkpoint)
# __train_func_end__
# fmt: on
# __eval_func_begin__
search_space = {
"lr": tune.sample_from(lambda spec: 10 ** (-10 * np.random.rand())),
"momentum": tune.uniform(0.1, 0.9),
}
# Uncomment this to enable distributed execution
# `ray.init(address="auto")`
# Download the dataset first
datasets.MNIST("~/data", train=True, download=True)
tuner = tune.Tuner(
train_mnist,
param_space=search_space,
)
results = tuner.fit()
# __eval_func_end__
# __plot_begin__
dfs = {result.path: result.metrics_dataframe for result in results}
[d.mean_accuracy.plot() for d in dfs.values()]
# __plot_end__
# __run_scheduler_begin__
tuner = tune.Tuner(
train_mnist,
tune_config=tune.TuneConfig(
num_samples=20,
scheduler=ASHAScheduler(metric="mean_accuracy", mode="max"),
),
param_space=search_space,
)
results = tuner.fit()
# Obtain a trial dataframe from all run trials of this `tune.run` call.
dfs = {result.path: result.metrics_dataframe for result in results}
# __run_scheduler_end__
# fmt: off
# __plot_scheduler_begin__
# Plot by epoch
ax = None # This plots everything on the same plot
for d in dfs.values():
ax = d.mean_accuracy.plot(ax=ax, legend=False)
# __plot_scheduler_end__
# fmt: on
# __run_searchalg_begin__
from hyperopt import hp
from ray.tune.search.hyperopt import HyperOptSearch
space = {
"lr": hp.loguniform("lr", -10, -1),
"momentum": hp.uniform("momentum", 0.1, 0.9),
}
hyperopt_search = HyperOptSearch(space, metric="mean_accuracy", mode="max")
tuner = tune.Tuner(
train_mnist,
tune_config=tune.TuneConfig(
num_samples=10,
search_alg=hyperopt_search,
),
)
results = tuner.fit()
# To enable GPUs, use this instead:
# analysis = tune.run(
# train_mnist, config=search_space, resources_per_trial={'gpu': 1})
# __run_searchalg_end__
# __run_analysis_begin__
best_result = results.get_best_result("mean_accuracy", mode="max")
with best_result.checkpoint.as_directory() as checkpoint_dir:
state_dict = torch.load(os.path.join(checkpoint_dir, "model.pth"))
model = ConvNet()
model.load_state_dict(state_dict)
# __run_analysis_end__
from ray.tune.examples.mnist_pytorch_trainable import TrainMNIST
# __trainable_run_begin__
search_space = {
"lr": tune.sample_from(lambda spec: 10 ** (-10 * np.random.rand())),
"momentum": tune.uniform(0.1, 0.9),
}
tuner = tune.Tuner(
TrainMNIST,
run_config=tune.RunConfig(stop={"training_iteration": 10}),
param_space=search_space,
)
results = tuner.fit()
# __trainable_run_end__
|
ConvNet
|
python
|
scrapy__scrapy
|
tests/test_pipelines.py
|
{
"start": 772,
"end": 1009
}
|
class ____:
def open_spider(self, spider):
pass
def close_spider(self, spider):
pass
def process_item(self, item, spider):
item["pipeline_passed"] = True
return item
|
DeprecatedSpiderArgPipeline
|
python
|
encode__django-rest-framework
|
rest_framework/fields.py
|
{
"start": 51578,
"end": 53695
}
|
class ____(Field):
default_error_messages = {
'invalid_choice': _('"{input}" is not a valid choice.')
}
html_cutoff = None
html_cutoff_text = _('More than {count} items...')
def __init__(self, choices, **kwargs):
self.choices = choices
self.html_cutoff = kwargs.pop('html_cutoff', self.html_cutoff)
self.html_cutoff_text = kwargs.pop('html_cutoff_text', self.html_cutoff_text)
self.allow_blank = kwargs.pop('allow_blank', False)
super().__init__(**kwargs)
def to_internal_value(self, data):
if data == '' and self.allow_blank:
return ''
if isinstance(data, Enum) and str(data) != str(data.value):
data = data.value
try:
return self.choice_strings_to_values[str(data)]
except KeyError:
self.fail('invalid_choice', input=data)
def to_representation(self, value):
if value in ('', None):
return value
if isinstance(value, Enum) and str(value) != str(value.value):
value = value.value
return self.choice_strings_to_values.get(str(value), value)
def iter_options(self):
"""
Helper method for use with templates rendering select widgets.
"""
return iter_options(
self.grouped_choices,
cutoff=self.html_cutoff,
cutoff_text=self.html_cutoff_text
)
def _get_choices(self):
return self._choices
def _set_choices(self, choices):
self.grouped_choices = to_choices_dict(choices)
self._choices = flatten_choices_dict(self.grouped_choices)
# Map the string representation of choices to the underlying value.
# Allows us to deal with eg. integer choices while supporting either
# integer or string input, but still get the correct datatype out.
self.choice_strings_to_values = {
str(key.value) if isinstance(key, Enum) and str(key) != str(key.value) else str(key): key for key in self.choices
}
choices = property(_get_choices, _set_choices)
|
ChoiceField
|
python
|
jina-ai__jina
|
jina/proto/docarray_v1/pb/jina_pb2_grpc.py
|
{
"start": 17788,
"end": 18798
}
|
class ____(object):
"""*
jina gRPC service to trigger a snapshot at the Executor Runtime.
"""
def snapshot(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_JinaExecutorSnapshotServicer_to_server(servicer, server):
rpc_method_handlers = {
'snapshot': grpc.unary_unary_rpc_method_handler(
servicer.snapshot,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'jina.JinaExecutorSnapshot', rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
|
JinaExecutorSnapshotServicer
|
python
|
falconry__falcon
|
tests/_wsgi_test_app.py
|
{
"start": 108,
"end": 739
}
|
class ____:
def on_post(self, req, resp):
parts = {}
for part in req.media:
# NOTE(vytas): SHA1 is no longer recommended for cryptographic
# purposes, but here we are only using it for integrity checking.
sha1 = hashlib.sha1()
while True:
chunk = part.stream.read(io.DEFAULT_BUFFER_SIZE)
if not chunk:
break
sha1.update(chunk)
parts[part.name] = {
'filename': part.filename,
'sha1': sha1.hexdigest(),
}
resp.media = parts
|
Forms
|
python
|
readthedocs__readthedocs.org
|
readthedocs/organizations/tests/test_filters.py
|
{
"start": 4962,
"end": 10974
}
|
class ____(OrganizationFilterTestCase):
def get_filterset_for_user(self, user, organization, data=None, **kwargs):
self.client.force_login(user)
url = reverse("organization_detail", kwargs={"slug": organization.slug})
resp = self.client.get(url, data=data)
return resp.context_data.get("filter")
@pytest.mark.parametrize(
"user,organization,project",
[
("user_a", "org_a", "project_a"),
("owner_a", "org_a", "project_a"),
("user_b", "org_b", "project_b"),
("owner_b", "org_b", "project_b"),
],
indirect=True,
)
def test_unfiltered_queryset(self, user, organization, project):
"""No active filters returns full queryset."""
filter = self.get_filterset_for_user(user, organization)
assertQuerySetEqual(
filter.qs,
[project],
transform=lambda o: o,
ordered=False,
)
@pytest.mark.parametrize(
"user,organization,project",
[
("user_a", "org_a", "project_a"),
("owner_a", "org_a", "project_a"),
("user_b", "org_b", "project_b"),
("owner_b", "org_b", "project_b"),
],
indirect=True,
)
def test_filtered_queryset_project_choice(self, user, organization, project):
"""Valid project choice returns expected results."""
filter = self.get_filterset_for_user(
user,
organization,
data={"slug": project.slug},
)
assertQuerySetEqual(
filter.qs,
[project],
transform=lambda o: o,
ordered=False,
)
@pytest.mark.parametrize(
"user,organization,project",
[
("user_a", "org_a", "project_a"),
("owner_a", "org_a", "project_a"),
("user_b", "org_b", "project_b"),
("owner_b", "org_b", "project_b"),
],
indirect=True,
)
def test_filtered_queryset_project_invalid_choice(
self, user, organization, project
):
"""Invalid project choice returns the original queryset."""
wrong_project = fixture.get(Project)
filter = self.get_filterset_for_user(
user,
organization,
data={"slug": wrong_project.slug},
)
assert not filter.is_valid()
# The full queryset is still returned when a filterset is invalid
assertQuerySetEqual(
filter.qs,
[project],
transform=lambda o: o,
ordered=False,
)
@pytest.mark.parametrize(
"user,organization,team,projects",
[
("user_a", "org_a", "team_a", ["project_a"]),
("owner_a", "org_a", "team_a", ["project_a"]),
("user_a", "org_a", "team_a_empty", ["project_a"]),
("owner_a", "org_a", "team_a_empty", []),
("user_b", "org_b", "team_b", ["project_b"]),
("owner_b", "org_b", "team_b", ["project_b"]),
],
indirect=["user", "organization", "team"],
)
def test_filtered_queryset_team_choice(
self, user, organization, team, projects, filter_data
):
"""Valid team choice returns expected results."""
filter = self.get_filterset_for_user(
user,
organization,
data={"teams__slug": team.slug},
)
assertQuerySetEqual(
filter.qs,
[filter_data["projects"][key] for key in projects],
transform=lambda o: o,
ordered=False,
)
@pytest.mark.parametrize(
"user,organization,project",
[
("user_a", "org_a", "project_a"),
("owner_a", "org_a", "project_a"),
("user_b", "org_b", "project_b"),
("owner_b", "org_b", "project_b"),
],
indirect=True,
)
def test_filtered_queryset_team_invalid_choice(self, user, organization, project):
"""Invalid team choice returns the original queryset."""
wrong_team = fixture.get(Team)
filter = self.get_filterset_for_user(
user,
organization,
data={"teams__slug": wrong_team.slug},
)
assert not filter.is_valid()
# By default, invalid filtersets return the original queryset
assertQuerySetEqual(
filter.qs,
[project],
transform=lambda o: o,
ordered=False,
)
@pytest.mark.parametrize(
"user,organization,project",
[
("user_a", "org_a", "project_a"),
("owner_a", "org_a", "project_a"),
("user_b", "org_b", "project_b"),
("owner_b", "org_b", "project_b"),
],
indirect=True,
)
def test_project_filter_choices(self, user, organization, project):
"""Project filter choices limited to organization projects."""
filter = self.get_filterset_for_user(
user,
organization,
)
assert list(dict(filter.filters["slug"].field.choices).keys()) == [
"",
project.slug,
]
@pytest.mark.parametrize(
"user,organization,teams",
[
("user_a", "org_a", ["team_a"]),
("owner_a", "org_a", ["team_a", "team_a_empty"]),
("user_b", "org_b", ["team_b"]),
("owner_b", "org_b", ["team_b"]),
],
indirect=["user", "organization"],
)
def test_team_filter_choices(self, user, organization, teams, filter_data):
"""Team filter choices limited to organization teams."""
filter = self.get_filterset_for_user(
user,
organization,
)
choices = [filter_data["teams"][key].slug for key in teams]
choices.insert(0, "")
assert list(dict(filter.filters["teams__slug"].field.choices).keys()) == choices
|
TestOrganizationProjectFilterSet
|
python
|
getsentry__sentry
|
src/sentry/integrations/slack/analytics.py
|
{
"start": 513,
"end": 646
}
|
class ____(BaseNotificationSent):
pass
@analytics.eventclass("integrations.slack.identity_linked")
|
SlackIntegrationNotificationSent
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/array_ops/init_ops_test.py
|
{
"start": 12283,
"end": 14176
}
|
class ____(test.TestCase):
@test_util.run_deprecated_v1
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
init4 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init3, init4))
@test_util.run_deprecated_v1
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=2, dtype=dtype)
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
self.assertFalse(identicaltest(self, init1, init3))
self.assertFalse(identicaltest(self, init2, init3))
@test_util.run_deprecated_v1
def testZeroSize(self):
shape = [0, 2]
with self.cached_session():
x = variable_scope.get_variable(
"x",
shape=shape,
initializer=init_ops.uniform_unit_scaling_initializer())
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(shape, self.evaluate(x).shape)
@test_util.run_deprecated_v1
def testDuplicatedInitializer(self):
init = init_ops.uniform_unit_scaling_initializer()
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.uniform_unit_scaling_initializer,
dtype=dtypes.string)
|
UniformUnitScalingInitializationTest
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/schedule_definition.py
|
{
"start": 3396,
"end": 6028
}
|
class ____(Enum):
RUNNING = "RUNNING"
STOPPED = "STOPPED"
def get_or_create_schedule_context(
fn: Callable[..., Any], *args: Any, **kwargs: Any
) -> "ScheduleEvaluationContext":
"""Based on the passed resource function and the arguments passed to it, returns the
user-passed ScheduleEvaluationContext or creates one if it is not passed.
Raises an exception if the user passes more than one argument or if the user-provided
function requires a context parameter but none is passed.
"""
from dagster._config.pythonic_config import is_coercible_to_resource
from dagster._core.definitions.sensor_definition import get_context_param_name
context_param_name = get_context_param_name(fn)
kwarg_keys_non_resource = set(kwargs.keys()) - {param.name for param in get_resource_args(fn)}
if len(args) + len(kwarg_keys_non_resource) > 1:
raise DagsterInvalidInvocationError(
"Schedule invocation received multiple non-resource arguments. Only a first "
"positional context parameter should be provided when invoking."
)
if any(is_coercible_to_resource(arg) for arg in args):
raise DagsterInvalidInvocationError(
"If directly invoking a schedule, you may not provide resources as"
" positional arguments, only as keyword arguments."
)
context: Optional[ScheduleEvaluationContext] = None
if len(args) > 0:
context = check.opt_inst(args[0], ScheduleEvaluationContext)
elif len(kwargs) > 0:
if context_param_name and context_param_name not in kwargs:
raise DagsterInvalidInvocationError(
f"Schedule invocation expected argument '{context_param_name}'."
)
context = check.opt_inst(
kwargs.get(context_param_name or "context"), ScheduleEvaluationContext
)
elif context_param_name:
# If the context parameter is present but no value was provided, we error
raise DagsterInvalidInvocationError(
"Schedule evaluation function expected context argument, but no context argument "
"was provided when invoking."
)
context = context or build_schedule_context()
resource_args_from_kwargs = {}
resource_args = {param.name for param in get_resource_args(fn)}
for resource_arg in resource_args:
if resource_arg in kwargs:
resource_args_from_kwargs[resource_arg] = kwargs[resource_arg]
if resource_args_from_kwargs:
return context.merge_resources(resource_args_from_kwargs)
return context
@public
|
DefaultScheduleStatus
|
python
|
pytorch__pytorch
|
test/jit/test_module_interface.py
|
{
"start": 960,
"end": 22829
}
|
class ____(JitTestCase):
def test_not_submodule_interface_call(self):
@torch.jit.interface
class ModuleInterface(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
class TestNotModuleInterfaceCall(nn.Module):
proxy_mod: ModuleInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.two(input)
with self.assertRaisesRegexWithHighlight(
RuntimeError, "object has no attribute or method", "self.proxy_mod.two"
):
torch.jit.script(TestNotModuleInterfaceCall())
def test_module_interface(self):
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x: Tensor, y: Tensor) -> Tensor:
pass
def two(self, x: Tensor) -> Tensor:
pass
def forward(self, x: Tensor) -> Tensor:
pass
@torch.jit.interface
class OneTwoClass:
def one(self, x: Tensor, y: Tensor) -> Tensor:
pass
def two(self, x: Tensor) -> Tensor:
pass
class FooMod(nn.Module):
def one(self, x: Tensor, y: Tensor) -> Tensor:
return x + y
def two(self, x: Tensor) -> Tensor:
return 2 * x
def forward(self, x: Tensor) -> Tensor:
return self.one(self.two(x), x)
class BarMod(nn.Module):
def one(self, x: Tensor, y: Tensor) -> Tensor:
return x * y
def two(self, x: Tensor) -> Tensor:
return 2 / x
def forward(self, x: Tensor) -> Tensor:
return self.two(self.one(x, x))
@torch.jit.export
def forward2(self, x: Tensor) -> Tensor:
return self.two(self.one(x, x)) + 1
make_global(OneTwoModule, OneTwoClass)
def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):
return mod_list[0].forward(x) + mod_list[1].forward(x)
def use_class_interface(mod_list: List[OneTwoClass], x: Tensor) -> Tensor:
return mod_list[0].two(x) + mod_list[1].one(x, x)
scripted_foo_mod = torch.jit.script(FooMod())
scripted_bar_mod = torch.jit.script(BarMod())
self.checkScript(
use_module_interface,
(
[scripted_foo_mod, scripted_bar_mod],
torch.rand(3, 4),
),
)
self.checkScript(
use_class_interface,
(
[scripted_foo_mod, scripted_bar_mod],
torch.rand(3, 4),
),
)
def call_module_interface_on_other_method(
mod_interface: OneTwoModule, x: Tensor
) -> Tensor:
return mod_interface.forward2(x)
# ensure error out when we call the module on the method other than the interface specified.
with self.assertRaisesRegexWithHighlight(
RuntimeError, "object has no attribute or method", "mod_interface.forward2"
):
self.checkScript(
call_module_interface_on_other_method,
(
scripted_bar_mod,
torch.rand(3, 4),
),
)
def test_module_doc_string(self):
@torch.jit.interface
class TestInterface(nn.Module):
def one(self, inp1, inp2):
# type: (Tensor, Tensor) -> Tensor
pass
def forward(self, input):
# type: (Tensor) -> Tensor
r"""stuff 1"""
r"""stuff 2"""
pass # noqa: PIE790
r"""stuff 3"""
class TestModule(nn.Module):
proxy_mod: TestInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigModule()
def forward(self, input):
# type: (Tensor) -> Tensor
return self.proxy_mod.forward(input)
input = torch.randn(3, 4)
self.checkModule(TestModule(), (input,))
def test_module_interface_subtype(self):
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x: Tensor, y: Tensor) -> Tensor:
pass
def two(self, x: Tensor) -> Tensor:
pass
def forward(self, x: Tensor) -> Tensor:
pass
make_global(OneTwoModule)
@torch.jit.script
def as_module_interface(x: OneTwoModule) -> OneTwoModule:
return x
@torch.jit.script
class Foo:
def one(self, x: Tensor, y: Tensor) -> Tensor:
return x + y
def two(self, x: Tensor) -> Tensor:
return 2 * x
def forward(self, x: Tensor) -> Tensor:
return self.one(self.two(x), x)
# check class object is not a subtype of module interface
with self.assertRaisesRegex(
RuntimeError, "ScriptModule class can be subtype of module interface"
):
as_module_interface(Foo())
class WrongMod(nn.Module):
def two(self, x: int) -> int:
return 2 * x
def forward(self, x: Tensor) -> Tensor:
return x + torch.randn(3, self.two(3))
scripted_wrong_mod = torch.jit.script(WrongMod())
# wrong module that is not compatible with module interface
with self.assertRaisesRegex(RuntimeError, "is not compatible with interface"):
as_module_interface(scripted_wrong_mod)
# Check that interface implementations can be contravariant in argument types and covariant in return type.
@torch.jit.interface
class TensorToAny(nn.Module):
def forward(self, input: torch.Tensor) -> Any:
pass
make_global(TensorToAny)
@torch.jit.script
def as_tensor_to_any(x: TensorToAny) -> TensorToAny:
return x
@torch.jit.interface
class AnyToAny(nn.Module):
def forward(self, input: Any) -> Any:
pass
make_global(AnyToAny)
@torch.jit.script
def as_any_to_any(x: AnyToAny) -> AnyToAny:
return x
class TensorToAnyImplA(nn.Module):
def forward(self, input: Any) -> Any:
return input
class TensorToAnyImplB(nn.Module):
def forward(self, input: Any) -> torch.Tensor:
return torch.tensor([1])
class AnyToAnyImpl(nn.Module):
def forward(self, input: Any) -> torch.Tensor:
return torch.tensor([1])
as_tensor_to_any(torch.jit.script(TensorToAnyImplA()))
as_tensor_to_any(torch.jit.script(TensorToAnyImplB()))
as_any_to_any(torch.jit.script(AnyToAnyImpl()))
def test_module_interface_inheritance(self):
with self.assertRaisesRegex(
RuntimeError, "does not support inheritance yet. Please directly"
):
@torch.jit.interface
class InheritMod(nn.ReLU):
def three(self, x: Tensor) -> Tensor:
return 3 * x
def test_module_swap(self):
@torch.jit.interface
class ModuleInterface(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
def forward(self, input: Tensor) -> Tensor:
pass
class TestModule(nn.Module):
proxy_mod: ModuleInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.forward(input)
scripted_mod = torch.jit.script(TestModule())
input = torch.randn(3, 4)
self.assertEqual(scripted_mod(input), 3 * input + 2)
# module swap with module that have the same interface
scripted_mod.proxy_mod = torch.jit.script(NewModule())
self.assertEqual(scripted_mod(input), input * (input + 1) + 1)
# module swap with non-scripted module should throw error
with self.assertRaisesRegex(
RuntimeError, "a ScriptModule with non-scripted module"
):
scripted_mod.proxy_mod = NewModule()
def test_module_swap_wrong_module(self):
@torch.jit.interface
class ModuleInterface(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
def forward(self, input: Tensor) -> Tensor:
pass
class NewModuleWrong(nn.Module):
def forward(self, input: int) -> int:
return input + 1
class TestModule(nn.Module):
proxy_mod: ModuleInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.forward(input)
scripted_mod = torch.jit.script(TestModule())
# module swap with in-compatible interface
with self.assertRaisesRegex(RuntimeError, "is not compatible with interface"):
scripted_mod.proxy_mod = torch.jit.script(NewModuleWrong())
def test_module_swap_no_lazy_compile(self):
@torch.jit.interface
class ModuleInterface(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
def forward(self, input: Tensor) -> Tensor:
pass
class TestModule(nn.Module):
proxy_mod: ModuleInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.forward(input)
class NewModuleMethodNotLazyCompile(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 * inp2 + 1
def forward(self, input: Tensor) -> Tensor:
return input + 1
scripted_mod = torch.jit.script(TestModule())
# module swap with module that have the same interface, but the method not get
# lazily compiled from forward, user need to export it explicitly for swap to work
with self.assertRaisesRegex(RuntimeError, "is not compatible with interface"):
scripted_mod.proxy_mod = torch.jit.script(NewModuleMethodNotLazyCompile())
class NewModuleMethodManualExport(nn.Module):
@torch.jit.export
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 * inp2 + 1
def forward(self, input: Tensor) -> Tensor:
return input + 1
scripted_mod.proxy_mod = torch.jit.script(NewModuleMethodManualExport())
input = torch.randn(3, 4)
self.assertEqual(scripted_mod(input), input + 1)
def test_module_swap_no_module_interface(self):
# test module swapping with no module interface
class TestNoModuleInterface(nn.Module):
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod(input)
scripted_no_module_interface = torch.jit.script(TestNoModuleInterface())
# proxy mod is swapped with the new ScriptModule that share the same JIT type, should succeed.
scripted_no_module_interface.proxy_mod = torch.jit.script(OrigModule())
# proxy_mod is neither a module interface or have the same JIT type, should fail
with self.assertRaisesRegex(
RuntimeError,
r"Expected a value of type '__torch__.jit.test_module_interface.OrigModule \(.*\)' "
+ r"for field 'proxy_mod', but found '__torch__.jit.test_module_interface.NewModule \(.*\)'",
):
scripted_no_module_interface.proxy_mod = torch.jit.script(NewModule())
def test_script_module_as_interface_swap(self):
@torch.jit.interface
class ModuleInterface(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
def forward(self, input: Tensor) -> Tensor:
pass
class OrigScriptModule(torch.jit.ScriptModule):
@torch.jit.script_method
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 + inp2 + 1
@torch.jit.script_method
def forward(self, input: Tensor) -> Tensor:
return input + self.one(input, input) + 1
class NewScriptModule(torch.jit.ScriptModule):
@torch.jit.script_method
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 * inp2 + 1
@torch.jit.script_method
def forward(self, input: Tensor) -> Tensor:
return self.one(input, input + 1)
class TestNNModuleWithScriptModule(nn.Module):
proxy_mod: ModuleInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigScriptModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.forward(input)
input = torch.randn(3, 4)
scripted_mod = torch.jit.script(TestNNModuleWithScriptModule())
self.assertEqual(scripted_mod(input), 3 * input + 2)
scripted_mod.proxy_mod = NewScriptModule()
self.assertEqual(scripted_mod(input), input * (input + 1) + 1)
# The call to forward of proxy_mod cannot be inlined. Making sure
# Freezing is throwing an error for now.
def test_freeze_module_with_interface(self):
class SubModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.b = 20
def forward(self, x):
return self.b
class OrigMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 0
def forward(self, x):
return self.a
@torch.jit.interface
class ModInterface(torch.nn.Module):
def forward(self, x: Tensor) -> int:
pass
class TestModule(torch.nn.Module):
proxy_mod: ModInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigMod()
self.sub = SubModule() # folded
def forward(self, x):
return self.proxy_mod(x) + self.sub(x)
m = torch.jit.script(TestModule())
m.eval()
mf = torch._C._freeze_module(m._c)
# Assume interface has no aliasing
mf = torch._C._freeze_module(m._c, freezeInterfaces=True)
input = torch.tensor([1])
out_s = m.forward(input)
out_f = mf.forward(input)
self.assertEqual(out_s, out_f)
def test_freeze_module_with_setattr_in_interface(self):
class SubModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.b = 20
def forward(self, x):
self.b += 2
return self.b
@torch.jit.export
def getb(self, x):
return self.b
class OrigMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 0
def forward(self, x):
return self.a
@torch.jit.interface
class ModInterface(torch.nn.Module):
def forward(self, x: Tensor) -> int:
pass
class TestModule(torch.nn.Module):
proxy_mod: ModInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigMod()
self.sub = SubModule()
def forward(self, x):
return self.proxy_mod(x) + self.sub.getb(x)
m = torch.jit.script(TestModule())
m.proxy_mod = m.sub
m.eval()
mf = torch._C._freeze_module(m._c, freezeInterfaces=True)
def test_freeze_module_with_inplace_mutation_in_interface(self):
class SubModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.b = torch.tensor([1.5])
def forward(self, x):
self.b[0] += 2
return self.b
@torch.jit.export
def getb(self, x):
return self.b
class OrigMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([0.5])
def forward(self, x):
return self.a
@torch.jit.interface
class ModInterface(torch.nn.Module):
def forward(self, x: Tensor) -> Tensor:
pass
class TestModule(torch.nn.Module):
proxy_mod: ModInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigMod()
self.sub = SubModule()
def forward(self, x):
y = self.proxy_mod(x)
z = self.sub.getb(x)
return y[0] + z[0]
m = torch.jit.script(TestModule())
m.proxy_mod = m.sub
m.sub.b = m.proxy_mod.b
m.eval()
mf = torch._C._freeze_module(m._c, freezeInterfaces=True)
def test_freeze_module_with_mutated_interface(self):
class SubModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.b = torch.tensor([1.5])
def forward(self, x):
return self.b
@torch.jit.export
def getb(self, x):
return self.b
class OrigMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([0.5])
def forward(self, x):
return self.a
@torch.jit.interface
class ModInterface(torch.nn.Module):
def forward(self, x: Tensor) -> Tensor:
pass
class TestModule(torch.nn.Module):
proxy_mod: ModInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigMod()
self.sub = SubModule()
def forward(self, x):
self.proxy_mod = self.sub
y = self.proxy_mod(x)
z = self.sub.getb(x)
return y[0] + z[0]
m = torch.jit.script(TestModule())
m.eval()
with self.assertRaisesRegex(
RuntimeError, "Freezing does not support SetAttr on an interface type."
):
mf = torch._C._freeze_module(m._c, freezeInterfaces=True)
def test_freeze_module_with_interface_and_fork(self):
class SubModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.b = torch.tensor([1.5])
def forward(self, x):
self.b[0] += 3.2
return self.b
class OrigMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([0.5])
def forward(self, x):
return self.a
@torch.jit.interface
class ModInterface(torch.nn.Module):
def forward(self, x: Tensor) -> Tensor:
pass
class TestModule(torch.nn.Module):
proxy_mod: ModInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigMod()
self.sub = SubModule()
def forward(self, x):
y = self.proxy_mod(x)
z = self.sub(x)
return y + z
class MainModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.test = TestModule()
def forward(self, x):
fut = torch.jit._fork(self.test.forward, x)
y = self.test(x)
z = torch.jit._wait(fut)
return y + z
m = torch.jit.script(MainModule())
m.eval()
mf = torch._C._freeze_module(m._c, freezeInterfaces=True)
def test_module_apis_interface(self):
@torch.jit.interface
class ModuleInterface(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
class TestModule(nn.Module):
proxy_mod: ModuleInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigModule()
def forward(self, input):
return input * 2
@torch.jit.export
def method(self, input):
for module in self.modules():
input = module(input)
return input
with self.assertRaisesRegex(Exception, "Could not compile"):
scripted_mod = torch.jit.script(TestModule())
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
|
TestModuleInterface
|
python
|
spack__spack
|
lib/spack/spack/vendor/attr/exceptions.py
|
{
"start": 33,
"end": 369
}
|
class ____(AttributeError):
"""
A frozen/immutable instance or attribute have been attempted to be
modified.
It mirrors the behavior of ``namedtuples`` by using the same error message
and subclassing `AttributeError`.
.. versionadded:: 20.1.0
"""
msg = "can't set attribute"
args = [msg]
|
FrozenError
|
python
|
getsentry__sentry
|
tests/sentry/users/models/test_user.py
|
{
"start": 2730,
"end": 7534
}
|
class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user()
self.user_id = self.user.id
# Organization membership determines which regions the deletion will cascade to
self.organization = self.create_organization(region=_TEST_REGIONS[0])
self.create_member(user=self.user, organization=self.organization)
self.create_saved_search(
name="some-search", owner=self.user, organization=self.organization
)
@assume_test_silo_mode(SiloMode.REGION)
def user_tombstone_exists(self, user_id: int) -> bool:
return RegionTombstone.objects.filter(
table_name="auth_user", object_identifier=user_id
).exists()
@assume_test_silo_mode(SiloMode.REGION)
def get_user_saved_search_count(self) -> int:
return SavedSearch.objects.filter(owner_id=self.user_id).count()
def test_simple(self) -> None:
assert not self.user_tombstone_exists(user_id=self.user_id)
with outbox_runner():
self.user.delete()
assert not User.objects.filter(id=self.user_id).exists()
assert self.user_tombstone_exists(user_id=self.user_id)
# cascade is asynchronous, ensure there is still related search,
assert self.get_user_saved_search_count() == 1
with assume_test_silo_mode(SiloMode.REGION), self.tasks():
schedule_hybrid_cloud_foreign_key_jobs()
# Ensure they are all now gone.
assert self.get_user_saved_search_count() == 0
def test_unrelated_saved_search_is_not_deleted(self) -> None:
another_user = self.create_user()
self.create_member(user=another_user, organization=self.organization)
self.create_saved_search(
name="another-search", owner=another_user, organization=self.organization
)
with outbox_runner():
self.user.delete()
with assume_test_silo_mode(SiloMode.REGION), self.tasks():
schedule_hybrid_cloud_foreign_key_jobs()
with assume_test_silo_mode(SiloMode.REGION):
assert SavedSearch.objects.filter(owner_id=another_user.id).exists()
def test_cascades_to_multiple_regions(self) -> None:
eu_org = self.create_organization(region=_TEST_REGIONS[1])
self.create_member(user=self.user, organization=eu_org)
self.create_saved_search(name="eu-search", owner=self.user, organization=eu_org)
with outbox_runner():
self.user.delete()
assert self.get_user_saved_search_count() == 2
with assume_test_silo_mode(SiloMode.REGION), self.tasks():
schedule_hybrid_cloud_foreign_key_jobs()
assert self.get_user_saved_search_count() == 0
def test_deletions_create_tombstones_in_regions_for_user_with_no_orgs(self) -> None:
# Create a user with no org memberships
user_to_delete = self.create_user("foo@example.com")
user_id = user_to_delete.id
with outbox_runner():
user_to_delete.delete()
assert self.user_tombstone_exists(user_id=user_id)
def test_cascades_to_regions_even_if_user_ownership_revoked(self) -> None:
eu_org = self.create_organization(region=_TEST_REGIONS[1])
self.create_member(user=self.user, organization=eu_org)
self.create_saved_search(name="eu-search", owner=self.user, organization=eu_org)
assert self.get_user_saved_search_count() == 2
with outbox_runner(), assume_test_silo_mode_of(OrganizationMember):
for member in OrganizationMember.objects.filter(user_id=self.user.id):
member.delete()
assert find_regions_for_user(self.user.id) == set()
with outbox_runner():
self.user.delete()
assert self.get_user_saved_search_count() == 2
with assume_test_silo_mode(SiloMode.REGION), self.tasks():
schedule_hybrid_cloud_foreign_key_jobs()
assert self.get_user_saved_search_count() == 0
def test_update_purge_region_cache(self) -> None:
user = self.create_user()
na_org = self.create_organization(region=_TEST_REGIONS[0])
self.create_member(user=user, organization=na_org)
with patch.object(caching_module, "region_caching_service") as mock_caching_service:
user.username = "bob2"
user.save()
mock_caching_service.clear_key.assert_any_call(
key=f"user_service.get_many_by_id:{user.id}",
region_name=_TEST_REGIONS[0].name,
)
mock_caching_service.clear_key.assert_any_call(
key=f"user_service.get_user:{user.id}",
region_name=_TEST_REGIONS[0].name,
)
@control_silo_test
|
UserHybridCloudDeletionTest
|
python
|
getsentry__sentry
|
src/sentry/audit_log/events.py
|
{
"start": 12782,
"end": 13202
}
|
class ____(AuditLogEvent):
def __init__(self) -> None:
super().__init__(
event_id=130, name="INTERNAL_INTEGRATION_ADD", api_name="internal-integration.create"
)
def render(self, audit_log_entry: AuditLogEntry) -> str:
integration_name = audit_log_entry.data.get("name") or ""
return f"created internal integration {integration_name}"
|
InternalIntegrationAddAuditLogEvent
|
python
|
tiangolo__fastapi
|
docs_src/body_nested_models/tutorial007.py
|
{
"start": 384,
"end": 581
}
|
class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
items: List[Item]
@app.post("/offers/")
async def create_offer(offer: Offer):
return offer
|
Offer
|
python
|
getsentry__sentry
|
src/sentry/backup/findings.py
|
{
"start": 911,
"end": 5044
}
|
class ____(FindingKind):
Unknown = auto()
# The instances of a particular model did not maintain total ordering of pks (that is, pks did not appear in ascending order, or appear multiple times).
UnorderedInput = auto()
# Multiple instances of the same custom ordinal signature exist in the input.
DuplicateCustomOrdinal = auto()
# The number of instances of a particular model on the left and right side of the input were not
# equal.
UnequalCounts = auto()
# The JSON of two instances of a model, after certain fields have been scrubbed by all applicable comparators, were not byte-for-byte equivalent.
UnequalJSON = auto()
# Failed to compare an auto suffixed field.
AutoSuffixComparator = auto()
# Failed to compare an auto suffixed field because one of the fields being compared was not
# present or `None`.
AutoSuffixComparatorExistenceCheck = auto()
# Two datetime fields were not equal.
DatetimeEqualityComparator = auto()
# Failed to compare datetimes because one of the fields being compared was not present or
# `None`.
DatetimeEqualityComparatorExistenceCheck = auto()
# The right side field's datetime value was not greater (ie, "newer") than the left side's.
DateUpdatedComparator = auto()
# Failed to compare datetimes because one of the fields being compared was not present or
# `None`.
DateUpdatedComparatorExistenceCheck = auto()
# Email equality comparison failed.
EmailObfuscatingComparator = auto()
# Failed to compare emails because one of the fields being compared was not present or
# `None`.
EmailObfuscatingComparatorExistenceCheck = auto()
# The fields were both present but unequal.
EqualOrRemovedComparator = auto()
# The left field does not exist.
EqualOrRemovedComparatorExistenceCheck = auto()
# Hash equality comparison failed.
HashObfuscatingComparator = auto()
# Failed to compare hashes because one of the fields being compared was not present or
# `None`.
HashObfuscatingComparatorExistenceCheck = auto()
# Foreign key field comparison failed.
ForeignKeyComparator = auto()
# Failed to compare foreign key fields because one of the fields being compared was not present
# or `None`.
ForeignKeyComparatorExistenceCheck = auto()
# DataSource.source_id field comparison failed (dynamic foreign key).
DataSourceComparator = auto()
# Failed to compare DataSource.source_id field because one of the fields being compared was not present
# or `None`.
DataSourceComparatorExistenceCheck = auto()
# Failed to compare an ignored field.
IgnoredComparator = auto()
# Secret token fields did not match their regex specification.
SecretHexComparator = auto()
# Failed to compare a secret token field because one of the fields being compared was not
# present or `None`.
SecretHexComparatorExistenceCheck = auto()
# Subscription ID fields did not match their regex specification.
SubscriptionIDComparator = auto()
# Failed to compare a subscription id field because one of the fields being compared was not
# present or `None`.
SubscriptionIDComparatorExistenceCheck = auto()
# Unordered list fields did not match.
UnorderedListComparator = auto()
# Failed to compare a unordered list field because one of the fields being compared was not
# present or `None`.
UnorderedListComparatorExistenceCheck = auto()
# UUID4 fields did not match their regex specification.
UUID4Comparator = auto()
# Failed to compare a UUID4 field because one of the fields being compared was not present or
# `None`.
UUID4ComparatorExistenceCheck = auto()
# Incorrect user password field.
UserPasswordObfuscatingComparator = auto()
# Failed to compare a user password field because one of the fields being compared was not
# present or `None`.
UserPasswordObfuscatingComparatorExistenceCheck = auto()
# Option values
OptionValueComparator = auto()
@dataclass(frozen=True)
|
ComparatorFindingKind
|
python
|
python__mypy
|
mypy/test/testmodulefinder.py
|
{
"start": 328,
"end": 5627
}
|
class ____(Suite):
def setUp(self) -> None:
self.search_paths = SearchPaths(
python_path=(),
mypy_path=(
os.path.join(data_path, "nsx-pkg1"),
os.path.join(data_path, "nsx-pkg2"),
os.path.join(data_path, "nsx-pkg3"),
os.path.join(data_path, "nsy-pkg1"),
os.path.join(data_path, "nsy-pkg2"),
os.path.join(data_path, "pkg1"),
os.path.join(data_path, "pkg2"),
),
package_path=(),
typeshed_path=(),
)
options = Options()
options.namespace_packages = True
self.fmc_ns = FindModuleCache(self.search_paths, fscache=None, options=options)
options = Options()
options.namespace_packages = False
self.fmc_nons = FindModuleCache(self.search_paths, fscache=None, options=options)
def test__no_namespace_packages__nsx(self) -> None:
"""
If namespace_packages is False, we shouldn't find nsx
"""
found_module = self.fmc_nons.find_module("nsx")
assert_equal(ModuleNotFoundReason.NOT_FOUND, found_module)
def test__no_namespace_packages__nsx_a(self) -> None:
"""
If namespace_packages is False, we shouldn't find nsx.a.
"""
found_module = self.fmc_nons.find_module("nsx.a")
assert_equal(ModuleNotFoundReason.NOT_FOUND, found_module)
def test__no_namespace_packages__find_a_in_pkg1(self) -> None:
"""
Find find pkg1/a.py for "a" with namespace_packages False.
"""
found_module = self.fmc_nons.find_module("a")
expected = os.path.abspath(os.path.join(data_path, "pkg1", "a.py"))
assert_equal(expected, found_module)
def test__no_namespace_packages__find_b_in_pkg2(self) -> None:
found_module = self.fmc_ns.find_module("b")
expected = os.path.abspath(os.path.join(data_path, "pkg2", "b", "__init__.py"))
assert_equal(expected, found_module)
def test__find_nsx_as_namespace_pkg_in_pkg1(self) -> None:
"""
There's no __init__.py in any of the nsx dirs, return
the path to the first one found in mypypath.
"""
found_module = self.fmc_ns.find_module("nsx")
expected = os.path.abspath(os.path.join(data_path, "nsx-pkg1", "nsx"))
assert_equal(expected, found_module)
def test__find_nsx_a_init_in_pkg1(self) -> None:
"""
Find nsx-pkg1/nsx/a/__init__.py for "nsx.a" in namespace mode.
"""
found_module = self.fmc_ns.find_module("nsx.a")
expected = os.path.abspath(os.path.join(data_path, "nsx-pkg1", "nsx", "a", "__init__.py"))
assert_equal(expected, found_module)
def test__find_nsx_b_init_in_pkg2(self) -> None:
"""
Find nsx-pkg2/nsx/b/__init__.py for "nsx.b" in namespace mode.
"""
found_module = self.fmc_ns.find_module("nsx.b")
expected = os.path.abspath(os.path.join(data_path, "nsx-pkg2", "nsx", "b", "__init__.py"))
assert_equal(expected, found_module)
def test__find_nsx_c_c_in_pkg3(self) -> None:
"""
Find nsx-pkg3/nsx/c/c.py for "nsx.c.c" in namespace mode.
"""
found_module = self.fmc_ns.find_module("nsx.c.c")
expected = os.path.abspath(os.path.join(data_path, "nsx-pkg3", "nsx", "c", "c.py"))
assert_equal(expected, found_module)
def test__find_nsy_a__init_pyi(self) -> None:
"""
Prefer nsy-pkg1/a/__init__.pyi file over __init__.py.
"""
found_module = self.fmc_ns.find_module("nsy.a")
expected = os.path.abspath(os.path.join(data_path, "nsy-pkg1", "nsy", "a", "__init__.pyi"))
assert_equal(expected, found_module)
def test__find_nsy_b__init_py(self) -> None:
"""
There is a nsy-pkg2/nsy/b.pyi, but also a nsy-pkg2/nsy/b/__init__.py.
We expect to find the latter when looking up "nsy.b" as
a package is preferred over a module.
"""
found_module = self.fmc_ns.find_module("nsy.b")
expected = os.path.abspath(os.path.join(data_path, "nsy-pkg2", "nsy", "b", "__init__.py"))
assert_equal(expected, found_module)
def test__find_nsy_c_pyi(self) -> None:
"""
There is a nsy-pkg2/nsy/c.pyi and nsy-pkg2/nsy/c.py
We expect to find the former when looking up "nsy.b" as
.pyi is preferred over .py.
"""
found_module = self.fmc_ns.find_module("nsy.c")
expected = os.path.abspath(os.path.join(data_path, "nsy-pkg2", "nsy", "c.pyi"))
assert_equal(expected, found_module)
def test__find_a_in_pkg1(self) -> None:
found_module = self.fmc_ns.find_module("a")
expected = os.path.abspath(os.path.join(data_path, "pkg1", "a.py"))
assert_equal(expected, found_module)
def test__find_b_init_in_pkg2(self) -> None:
found_module = self.fmc_ns.find_module("b")
expected = os.path.abspath(os.path.join(data_path, "pkg2", "b", "__init__.py"))
assert_equal(expected, found_module)
def test__find_d_nowhere(self) -> None:
found_module = self.fmc_ns.find_module("d")
assert_equal(ModuleNotFoundReason.NOT_FOUND, found_module)
|
ModuleFinderSuite
|
python
|
Textualize__textual
|
tests/text_area/test_escape_binding.py
|
{
"start": 129,
"end": 360
}
|
class ____(ModalScreen):
BINDINGS = [("escape", "dismiss")]
def compose(self) -> ComposeResult:
yield TextArea(
tab_behavior="focus", # the default
)
yield Button("Submit")
|
TextAreaDialog
|
python
|
encode__django-rest-framework
|
tests/test_request.py
|
{
"start": 10422,
"end": 13184
}
|
class ____(TestCase):
def test_repr(self):
http_request = factory.get('/path')
request = Request(http_request)
assert repr(request) == "<rest_framework.request.Request: GET '/path'>"
def test_attribute_access_proxy(self):
http_request = factory.get('/')
request = Request(http_request)
inner_sentinel = object()
http_request.inner_property = inner_sentinel
assert request.inner_property is inner_sentinel
outer_sentinel = object()
request.inner_property = outer_sentinel
assert request.inner_property is outer_sentinel
def test_exception_proxy(self):
# ensure the exception message is not for the underlying WSGIRequest
http_request = factory.get('/')
request = Request(http_request)
message = "'Request' object has no attribute 'inner_property'"
with self.assertRaisesMessage(AttributeError, message):
request.inner_property
@override_settings(ROOT_URLCONF='tests.test_request')
def test_duplicate_request_stream_parsing_exception(self):
"""
Check assumption that duplicate stream parsing will result in a
`RawPostDataException` being raised.
"""
response = APIClient().post('/echo/', data={'a': 'b'}, format='json')
request = response._request
# ensure that request stream was consumed by json parser
assert request.content_type.startswith('application/json')
assert response.data == {'a': 'b'}
# pass same HttpRequest to view, stream already consumed
with pytest.raises(RawPostDataException):
EchoView.as_view()(request._request)
@override_settings(ROOT_URLCONF='tests.test_request')
def test_duplicate_request_form_data_access(self):
"""
Form data is copied to the underlying django request for middleware
and file closing reasons. Duplicate processing of a request with form
data is 'safe' in so far as accessing `request.POST` does not trigger
the duplicate stream parse exception.
"""
response = APIClient().post('/echo/', data={'a': 'b'})
request = response._request
# ensure that request stream was consumed by form parser
assert request.content_type.startswith('multipart/form-data')
assert response.data == {'a': ['b']}
# pass same HttpRequest to view, form data set on underlying request
response = EchoView.as_view()(request._request)
request = response._request
# ensure that request stream was consumed by form parser
assert request.content_type.startswith('multipart/form-data')
assert response.data == {'a': ['b']}
|
TestHttpRequest
|
python
|
huggingface__transformers
|
src/transformers/models/lfm2_moe/modeling_lfm2_moe.py
|
{
"start": 27082,
"end": 29217
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: Lfm2MoeConfig, layer_idx: int):
super().__init__()
self.is_attention_layer = config.layer_types[layer_idx] == "full_attention"
if self.is_attention_layer:
self.self_attn = Lfm2MoeAttention(config, layer_idx)
else:
self.conv = Lfm2MoeShortConv(config, layer_idx)
self.feed_forward = (
Lfm2MoeMLP(config, intermediate_size=config.intermediate_size)
if layer_idx < config.num_dense_layers
else Lfm2MoeSparseMoeBlock(config)
)
self.operator_norm = Lfm2MoeRMSNorm(config.hidden_size, eps=config.norm_eps)
self.ffn_norm = Lfm2MoeRMSNorm(config.hidden_size, eps=config.norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Lfm2MoeHybridConvCache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> torch.Tensor:
residual = hidden_states
if self.is_attention_layer:
hidden_states, _ = self.self_attn(
hidden_states=self.operator_norm(hidden_states),
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
else:
hidden_states = self.conv(
hidden_states=self.operator_norm(hidden_states),
past_key_values=past_key_values,
cache_position=cache_position,
attention_mask=attention_mask,
)
hidden_states = hidden_states + residual
hidden_states = hidden_states + self.feed_forward(self.ffn_norm(hidden_states))
return hidden_states
@auto_docstring
|
Lfm2MoeDecoderLayer
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/event/registry.py
|
{
"start": 6130,
"end": 11144
}
|
class ____(Generic[_ET]):
"""Represent :func:`.listen` arguments."""
__slots__ = (
"target",
"identifier",
"fn",
"fn_key",
"fn_wrap",
"dispatch_target",
)
target: _ET
identifier: str
fn: _ListenerFnType
fn_key: _ListenerFnKeyType
dispatch_target: Any
_fn_wrap: Optional[_ListenerFnType]
def __init__(
self,
target: _ET,
identifier: str,
fn: _ListenerFnType,
dispatch_target: Any,
_fn_wrap: Optional[_ListenerFnType] = None,
):
self.target = target
self.identifier = identifier
self.fn = fn
if isinstance(fn, types.MethodType):
self.fn_key = id(fn.__func__), id(fn.__self__)
else:
self.fn_key = id(fn)
self.fn_wrap = _fn_wrap
self.dispatch_target = dispatch_target
@property
def _key(self) -> _EventKeyTupleType:
return (id(self.target), self.identifier, self.fn_key)
def with_wrapper(self, fn_wrap: _ListenerFnType) -> _EventKey[_ET]:
if fn_wrap is self._listen_fn:
return self
else:
return _EventKey(
self.target,
self.identifier,
self.fn,
self.dispatch_target,
_fn_wrap=fn_wrap,
)
def with_dispatch_target(self, dispatch_target: Any) -> _EventKey[_ET]:
if dispatch_target is self.dispatch_target:
return self
else:
return _EventKey(
self.target,
self.identifier,
self.fn,
dispatch_target,
_fn_wrap=self.fn_wrap,
)
def listen(self, *args: Any, **kw: Any) -> None:
once = kw.pop("once", False)
once_unless_exception = kw.pop("_once_unless_exception", False)
named = kw.pop("named", False)
target, identifier, fn = (
self.dispatch_target,
self.identifier,
self._listen_fn,
)
dispatch_collection = getattr(target.dispatch, identifier)
adjusted_fn = dispatch_collection._adjust_fn_spec(fn, named)
self = self.with_wrapper(adjusted_fn)
stub_function = getattr(
self.dispatch_target.dispatch._events, self.identifier
)
if hasattr(stub_function, "_sa_warn"):
stub_function._sa_warn()
if once or once_unless_exception:
self.with_wrapper(
util.only_once(
self._listen_fn, retry_on_exception=once_unless_exception
)
).listen(*args, **kw)
else:
self.dispatch_target.dispatch._listen(self, *args, **kw)
def remove(self) -> None:
key = self._key
if key not in _key_to_collection:
raise exc.InvalidRequestError(
"No listeners found for event %s / %r / %s "
% (self.target, self.identifier, self.fn)
)
dispatch_reg = _key_to_collection.pop(key)
for collection_ref, listener_ref in dispatch_reg.items():
collection = collection_ref()
listener_fn = listener_ref()
if collection is not None and listener_fn is not None:
collection.remove(self.with_wrapper(listener_fn))
def contains(self) -> bool:
"""Return True if this event key is registered to listen."""
return self._key in _key_to_collection
def base_listen(
self,
propagate: bool = False,
insert: bool = False,
named: bool = False,
retval: Optional[bool] = None,
asyncio: bool = False,
) -> None:
target, identifier = self.dispatch_target, self.identifier
dispatch_collection = getattr(target.dispatch, identifier)
for_modify = dispatch_collection.for_modify(target.dispatch)
if asyncio:
for_modify._set_asyncio()
if insert:
for_modify.insert(self, propagate)
else:
for_modify.append(self, propagate)
@property
def _listen_fn(self) -> _ListenerFnType:
return self.fn_wrap or self.fn
def append_to_list(
self,
owner: RefCollection[_ET],
list_: Deque[_ListenerFnType],
) -> bool:
if _stored_in_collection(self, owner):
list_.append(self._listen_fn)
return True
else:
return False
def remove_from_list(
self,
owner: RefCollection[_ET],
list_: Deque[_ListenerFnType],
) -> None:
_removed_from_collection(self, owner)
list_.remove(self._listen_fn)
def prepend_to_list(
self,
owner: RefCollection[_ET],
list_: Deque[_ListenerFnType],
) -> bool:
if _stored_in_collection(self, owner):
list_.appendleft(self._listen_fn)
return True
else:
return False
|
_EventKey
|
python
|
getsentry__sentry
|
src/sentry/issues/endpoints/project_user_issue.py
|
{
"start": 5253,
"end": 5568
}
|
class ____(serializers.Serializer):
transaction = serializers.CharField(required=True)
issueType = serializers.ChoiceField(required=True, choices=ISSUE_TYPE_CHOICES)
traceId = serializers.CharField(required=False)
timestamp = serializers.DateTimeField(required=False)
|
ProjectUserIssueRequestSerializer
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/alloy_db.py
|
{
"start": 27934,
"end": 33462
}
|
class ____(AlloyDBWriteBaseOperator):
"""
Update an Alloy DB instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AlloyDBUpdateInstanceOperator`
:param cluster_id: Required. ID of the cluster.
:param instance_id: Required. ID of the instance to update.
:param instance_configuration: Required. Instance to update. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Instance
:param update_mask: Optional. Field mask is used to specify the fields to be overwritten in the
Instance resource by the update.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_request: Optional. If set, performs request validation, but does not actually
execute the request.
:param allow_missing: Optional. If set to true, update succeeds even if instance is not found.
In that case, a new instance is created and update_mask is ignored.
:param project_id: Required. The ID of the Google Cloud project where the service is used.
:param location: Required. The ID of the Google Cloud region where the service is used.
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"cluster_id", "instance_id", "instance_configuration", "update_mask", "allow_missing"}
| set(AlloyDBWriteBaseOperator.template_fields)
)
operator_extra_links = (AlloyDBClusterLink(),)
def __init__(
self,
cluster_id: str,
instance_id: str,
instance_configuration: alloydb_v1.Instance | dict,
update_mask: FieldMask | dict | None = None,
allow_missing: bool = False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.cluster_id = cluster_id
self.instance_id = instance_id
self.instance_configuration = instance_configuration
self.update_mask = update_mask
self.allow_missing = allow_missing
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"location_id": self.location,
"cluster_id": self.cluster_id,
"project_id": self.project_id,
}
def execute(self, context: Context) -> dict | None:
AlloyDBClusterLink.persist(context=context)
if self.validate_request:
self.log.info("Validating an Update AlloyDB instance request.")
else:
self.log.info("Updating an AlloyDB instance.")
try:
operation = self.hook.update_instance(
cluster_id=self.cluster_id,
instance_id=self.instance_id,
project_id=self.project_id,
location=self.location,
instance=self.instance_configuration,
update_mask=self.update_mask,
allow_missing=self.allow_missing,
request_id=self.request_id,
validate_only=self.validate_request,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except Exception as ex:
raise AirflowException(ex) from ex
else:
operation_result = self.get_operation_result(operation)
result = alloydb_v1.Instance.to_dict(operation_result) if operation_result else None
if not self.validate_request:
self.log.info("AlloyDB instance %s was successfully updated.", self.cluster_id)
return result
|
AlloyDBUpdateInstanceOperator
|
python
|
neetcode-gh__leetcode
|
python/0100-same-tree.py
|
{
"start": 164,
"end": 479
}
|
class ____:
def isSameTree(self, p: Optional[TreeNode], q: Optional[TreeNode]) -> bool:
if not p and not q:
return True
if p and q and p.val == q.val:
return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
else:
return False
|
Solution
|
python
|
pandas-dev__pandas
|
pandas/tests/indexing/test_iloc.py
|
{
"start": 841,
"end": 1606
}
|
class ____:
@pytest.mark.parametrize("key", [2, -1, [0, 1, 2]])
@pytest.mark.parametrize(
"index",
[
Index(list("abcd"), dtype=object),
Index([2, 4, "null", 8], dtype=object),
date_range("20130101", periods=4),
Index(range(0, 8, 2), dtype=np.float64),
Index([]),
],
)
def test_iloc_getitem_int_and_list_int(self, key, frame_or_series, index, request):
obj = frame_or_series(range(len(index)), index=index)
check_indexing_smoketest_or_raises(
obj,
"iloc",
key,
fails=IndexError,
)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
|
TestiLoc
|
python
|
pytorch__pytorch
|
test/functorch/test_control_flow.py
|
{
"start": 335354,
"end": 338948
}
|
class ____(torch.nn.Module):
def forward(self, x):
x: "f32[s6, 3]";
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
_guards_fn = self._guards_fn(x); _guards_fn = None
sym_size_int_1: "Sym(s6)" = torch.ops.aten.sym_size.int(x, 0)
sin: "f32[s6, 3]" = torch.ops.aten.sin.default(x); x = None
while_loop_cond_graph_0 = self.while_loop_cond_graph_0
while_loop_body_graph_0 = self.while_loop_body_graph_0
while_loop = torch.ops.higher_order.while_loop(while_loop_cond_graph_0, while_loop_body_graph_0, (sym_size_int_1, 3, 2, 2, 3, sin), ()); while_loop_cond_graph_0 = while_loop_body_graph_0 = sym_size_int_1 = sin = None
getitem_6: "Sym(u10)" = while_loop[0]
getitem_7: "Sym(u11)" = while_loop[1]
getitem_8: "Sym(u12)" = while_loop[2]
getitem_9: "Sym(u13)" = while_loop[3]
getitem_10: "Sym(u14)" = while_loop[4]
getitem_5: "f32[s6, 3]" = while_loop[5]; while_loop = None
add: "Sym(u12 + 1)" = getitem_8 + 1
add_1: "Sym(u13 + 1)" = getitem_9 + 1
add_2: "Sym(u14 + 1)" = getitem_10 + 1
add_3: "f32[s6, 3]" = torch.ops.aten.add.Tensor(getitem_5, getitem_8); getitem_8 = None
add_4: "f32[s6, 3]" = torch.ops.aten.add.Tensor(getitem_5, getitem_9); getitem_9 = None
add_5: "f32[s6, 3]" = torch.ops.aten.add.Tensor(getitem_5, getitem_10); getitem_10 = None
return pytree.tree_unflatten((getitem_6, getitem_7, add, add_1, add_2, add_3, add_4, add_5, getitem_5), self._out_spec)
class while_loop_cond_graph_0(torch.nn.Module):
def forward(self, arg0_1: "Sym(u15)", arg1_1: "Sym(u16)", arg2_1: "Sym(u17)", arg3_1: "Sym(u18)", arg4_1: "Sym(u19)", arg5_1: "f32[s6, 3]"):
mul: "Sym(u17*u18)" = arg2_1 * arg3_1; arg2_1 = arg3_1 = None
mul_1: "Sym(u17*u18*u19)" = mul * arg4_1; mul = arg4_1 = None
mul_2: "Sym(u15*u16)" = arg0_1 * arg1_1; arg0_1 = arg1_1 = None
lt: "Sym(u17*u18*u19 < u15*u16)" = mul_1 < mul_2; mul_1 = mul_2 = None
return lt
class while_loop_body_graph_0(torch.nn.Module):
def forward(self, arg0_1: "Sym(u15)", arg1_1: "Sym(u16)", arg2_1: "Sym(u17)", arg3_1: "Sym(u18)", arg4_1: "Sym(u19)", arg5_1: "f32[s6, 3]"):
add: "Sym(u15 + 1)" = arg0_1 + 1; arg0_1 = None
add_1: "Sym(u16 + 1)" = arg1_1 + 1; arg1_1 = None
add_2: "Sym(u17 + 1)" = arg2_1 + 1; arg2_1 = None
add_3: "Sym(u18 + 1)" = arg3_1 + 1; arg3_1 = None
add_4: "Sym(u19 + 1)" = arg4_1 + 1; arg4_1 = None
add_5: "f32[s6, 3]" = torch.ops.aten.add.Tensor(arg5_1, 1); arg5_1 = None
return (add, add_1, add_2, add_3, add_4, add_5)
""", # noqa: B950
)
@skipIfTorchDynamo("Graph is not captured correctly when test with dynamo")
@parametrize("dynamic", [True, False])
@parametrize("backend", ["eager", "aot_eager"])
def test_while_loop_op_pytree_int_carry_compile(self, dynamic, backend):
m, args = WHILE_LOOP_TESTS["pytree_int_carry"]
if backend == "eager":
backend = EagerAndRecordGraphs()
self._check_compile(m, args, dynamic=dynamic, backend=backend)
if (
isinstance(backend, EagerAndRecordGraphs)
and dynamic
and not TEST_WITH_CROSSREF
):
self.assertEqual(len(backend.graphs), 1)
self.assertExpectedInline(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
"""\
|
GraphModule
|
python
|
sqlalchemy__sqlalchemy
|
test/dialect/mysql/test_query.py
|
{
"start": 1198,
"end": 2019
}
|
class ____(fixtures.TestBase):
__only_on__ = "mysql", "mariadb"
__backend__ = True
def test_is_boolean_symbols_despite_no_native(self, connection):
with expect_warnings("Datatype BOOL does not support CAST"):
is_(
connection.scalar(select(cast(true().is_(true()), Boolean))),
True,
)
with expect_warnings("Datatype BOOL does not support CAST"):
is_(
connection.scalar(
select(cast(true().is_not(true()), Boolean))
),
False,
)
with expect_warnings("Datatype BOOL does not support CAST"):
is_(
connection.scalar(select(cast(false().is_(false()), Boolean))),
True,
)
|
IdiosyncrasyTest
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/translator.py
|
{
"start": 969,
"end": 1308
}
|
class ____:
RUNNING = AirbyteJobStatusType.RUNNING
SUCCEEDED = AirbyteJobStatusType.SUCCEEDED
CANCELLED = AirbyteJobStatusType.CANCELLED
PENDING = AirbyteJobStatusType.PENDING
FAILED = AirbyteJobStatusType.FAILED
ERROR = AirbyteJobStatusType.ERROR
INCOMPLETE = AirbyteJobStatusType.INCOMPLETE
@record
|
AirbyteState
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/ruff/RUF049.py
|
{
"start": 539,
"end": 575
}
|
class ____(Enum): ...
@attrs.mutable
|
E
|
python
|
spack__spack
|
lib/spack/spack/repo.py
|
{
"start": 70741,
"end": 77867
}
|
class ____(Mapping[str, RepoDescriptor]):
"""A collection of repository descriptors."""
def __init__(self, descriptors: Dict[str, RepoDescriptor]) -> None:
self.descriptors = descriptors
def __getitem__(self, name: str) -> RepoDescriptor:
return self.descriptors[name]
def __iter__(self):
return iter(self.descriptors.keys())
def __len__(self):
return len(self.descriptors)
def __contains__(self, name) -> bool:
return name in self.descriptors
def __repr__(self):
return f"RepoDescriptors({self.descriptors!r})"
@staticmethod
def from_config(
lock: spack.util.lock.Lock, config: spack.config.Configuration, scope=None
) -> "RepoDescriptors":
return RepoDescriptors(
{
name: parse_config_descriptor(name, cfg, lock)
for name, cfg in config.get_config("repos", scope=scope).items()
}
)
def construct(
self,
cache: spack.util.file_cache.FileCache,
fetch: bool = True,
find_git: Callable[[], MaybeExecutable] = lambda: spack.util.git.git(required=True),
overrides: Optional[Dict[str, Any]] = None,
) -> Tuple[RepoPath, Dict[str, Exception]]:
"""Construct a RepoPath from the descriptors.
If init is True, initialize all remote repositories that have not been fetched yet.
Returns:
A tuple containing a RepoPath instance with all constructed Repos and a dictionary
mapping paths to exceptions that occurred during construction.
"""
repos: List[Repo] = []
errors: Dict[str, Exception] = {}
git: MaybeExecutable = None
for descriptor in self.descriptors.values():
if fetch and isinstance(descriptor, RemoteRepoDescriptor):
git = git or find_git()
descriptor.initialize(fetch=True, git=git)
else:
descriptor.initialize(fetch=False)
for path, result in descriptor.construct(cache=cache, overrides=overrides).items():
if isinstance(result, Repo):
repos.append(result)
else:
errors[path] = result
return RepoPath(*repos), errors
def parse_config_descriptor(
name: Optional[str], descriptor: Any, lock: spack.util.lock.Lock
) -> RepoDescriptor:
"""Parse a repository descriptor from validated configuration. This does not instantiate Repo
objects, but merely turns the config into a more useful RepoDescriptor instance.
Args:
name: the name of the repository, used for error messages
descriptor: the configuration for the repository, which can be a string (local path),
or a dictionary with ``git`` key containing git URL and other options.
Returns:
A RepoDescriptor instance, either LocalRepoDescriptor or RemoteRepoDescriptor.
Raises:
BadRepoError: if the descriptor is invalid or cannot be parsed.
RuntimeError: if the descriptor is of an unexpected type.
"""
if isinstance(descriptor, str):
return LocalRepoDescriptor(name, spack.util.path.canonicalize_path(descriptor))
# Should be the case due to config validation.
assert isinstance(descriptor, dict), "Repository descriptor must be a string or a dictionary"
# Configuration validation works per scope, and we want to allow overriding e.g. destination
# in user config without the user having to repeat the `git` key and value again. This is a
# hard error, since config validation is a hard error.
if "git" not in descriptor:
raise RuntimeError(
f"Invalid configuration for repository '{name}': {descriptor!r}. A `git` attribute is "
"required for remote repositories."
)
repository = descriptor["git"]
assert isinstance(repository, str), "Package repository git URL must be a string"
destination = descriptor.get("destination", None)
if destination is None: # use a default destination
dir_name = spack.util.hash.b32_hash(repository)[-7:]
destination = os.path.join(spack.paths.package_repos_path, dir_name)
else:
destination = spack.util.path.canonicalize_path(destination)
return RemoteRepoDescriptor(
name=name,
repository=repository,
branch=descriptor.get("branch"),
commit=descriptor.get("commit"),
tag=descriptor.get("tag"),
destination=destination,
relative_paths=descriptor.get("paths"),
lock=lock,
)
def create_or_construct(
root: str,
namespace: Optional[str] = None,
package_api: Tuple[int, int] = spack.package_api_version,
) -> Repo:
"""Create a repository, or just return a Repo if it already exists."""
repo_yaml_dir, _ = get_repo_yaml_dir(root, namespace, package_api)
if not os.path.exists(repo_yaml_dir):
fs.mkdirp(root)
create_repo(root, namespace=namespace, package_api=package_api)
return from_path(repo_yaml_dir)
def create_and_enable(config: spack.config.Configuration) -> RepoPath:
"""Immediately call enable() on the created RepoPath instance."""
repo_path = RepoPath.from_config(config)
repo_path.enable()
return repo_path
#: Global package repository instance.
PATH: RepoPath = spack.llnl.util.lang.Singleton(
lambda: create_and_enable(spack.config.CONFIG)
) # type: ignore[assignment]
# Add the finder to sys.meta_path
REPOS_FINDER = ReposFinder()
sys.meta_path.append(REPOS_FINDER)
def all_package_names(include_virtuals=False):
"""Convenience wrapper around ``spack.repo.all_package_names()``."""
return PATH.all_package_names(include_virtuals)
@contextlib.contextmanager
def use_repositories(
*paths_and_repos: Union[str, Repo], override: bool = True
) -> Generator[RepoPath, None, None]:
"""Use the repositories passed as arguments within the context manager.
Args:
*paths_and_repos: paths to the repositories to be used, or
already constructed Repo objects
override: if True use only the repositories passed as input,
if False add them to the top of the list of current repositories.
Returns:
Corresponding RepoPath object
"""
paths = {getattr(x, "root", x): getattr(x, "root", x) for x in paths_and_repos}
scope_name = f"use-repo-{uuid.uuid4()}"
repos_key = "repos:" if override else "repos"
spack.config.CONFIG.push_scope(
spack.config.InternalConfigScope(name=scope_name, data={repos_key: paths})
)
old_repo, new_repo = PATH, RepoPath.from_config(spack.config.CONFIG)
old_repo.disable()
enable_repo(new_repo)
try:
yield new_repo
finally:
spack.config.CONFIG.remove_scope(scope_name=scope_name)
new_repo.disable()
enable_repo(old_repo)
def enable_repo(repo_path: RepoPath) -> None:
"""Set the global package repository and make them available in module search paths."""
global PATH
PATH = repo_path
PATH.enable()
|
RepoDescriptors
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/text.py
|
{
"start": 59786,
"end": 76107
}
|
class ____(Text, _AnnotationBase):
"""
An `.Annotation` is a `.Text` that can refer to a specific position *xy*.
Optionally an arrow pointing from the text to *xy* can be drawn.
Attributes
----------
xy
The annotated position.
xycoords
The coordinate system for *xy*.
arrow_patch
A `.FancyArrowPatch` to point from *xytext* to *xy*.
"""
def __str__(self):
return f"Annotation({self.xy[0]:g}, {self.xy[1]:g}, {self._text!r})"
def __init__(self, text, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
annotation_clip=None,
**kwargs):
"""
Annotate the point *xy* with text *text*.
In the simplest form, the text is placed at *xy*.
Optionally, the text can be displayed in another position *xytext*.
An arrow pointing from the text to the annotated point *xy* can then
be added by defining *arrowprops*.
Parameters
----------
text : str
The text of the annotation.
xy : (float, float)
The point *(x, y)* to annotate. The coordinate system is determined
by *xycoords*.
xytext : (float, float), default: *xy*
The position *(x, y)* to place the text at. The coordinate system
is determined by *textcoords*.
xycoords : single or two-tuple of str or `.Artist` or `.Transform` or \
callable, default: 'data'
The coordinate system that *xy* is given in. The following types
of values are supported:
- One of the following strings:
==================== ============================================
Value Description
==================== ============================================
'figure points' Points from the lower left of the figure
'figure pixels' Pixels from the lower left of the figure
'figure fraction' Fraction of figure from lower left
'subfigure points' Points from the lower left of the subfigure
'subfigure pixels' Pixels from the lower left of the subfigure
'subfigure fraction' Fraction of subfigure from lower left
'axes points' Points from lower left corner of the Axes
'axes pixels' Pixels from lower left corner of the Axes
'axes fraction' Fraction of Axes from lower left
'data' Use the coordinate system of the object
being annotated (default)
'polar' *(theta, r)* if not native 'data'
coordinates
==================== ============================================
Note that 'subfigure pixels' and 'figure pixels' are the same
for the parent figure, so users who want code that is usable in
a subfigure can use 'subfigure pixels'.
- An `.Artist`: *xy* is interpreted as a fraction of the artist's
`~matplotlib.transforms.Bbox`. E.g. *(0, 0)* would be the lower
left corner of the bounding box and *(0.5, 1)* would be the
center top of the bounding box.
- A `.Transform` to transform *xy* to screen coordinates.
- A function with one of the following signatures::
def transform(renderer) -> Bbox
def transform(renderer) -> Transform
where *renderer* is a `.RendererBase` subclass.
The result of the function is interpreted like the `.Artist` and
`.Transform` cases above.
- A tuple *(xcoords, ycoords)* specifying separate coordinate
systems for *x* and *y*. *xcoords* and *ycoords* must each be
of one of the above described types.
See :ref:`plotting-guide-annotation` for more details.
textcoords : single or two-tuple of str or `.Artist` or `.Transform` \
or callable, default: value of *xycoords*
The coordinate system that *xytext* is given in.
All *xycoords* values are valid as well as the following strings:
================= =================================================
Value Description
================= =================================================
'offset points' Offset, in points, from the *xy* value
'offset pixels' Offset, in pixels, from the *xy* value
'offset fontsize' Offset, relative to fontsize, from the *xy* value
================= =================================================
arrowprops : dict, optional
The properties used to draw a `.FancyArrowPatch` arrow between the
positions *xy* and *xytext*. Defaults to None, i.e. no arrow is
drawn.
For historical reasons there are two different ways to specify
arrows, "simple" and "fancy":
**Simple arrow:**
If *arrowprops* does not contain the key 'arrowstyle' the
allowed keys are:
========== =================================================
Key Description
========== =================================================
width The width of the arrow in points
headwidth The width of the base of the arrow head in points
headlength The length of the arrow head in points
shrink Fraction of total length to shrink from both ends
? Any `.FancyArrowPatch` property
========== =================================================
The arrow is attached to the edge of the text box, the exact
position (corners or centers) depending on where it's pointing to.
**Fancy arrow:**
This is used if 'arrowstyle' is provided in the *arrowprops*.
Valid keys are the following `.FancyArrowPatch` parameters:
=============== ===================================
Key Description
=============== ===================================
arrowstyle The arrow style
connectionstyle The connection style
relpos See below; default is (0.5, 0.5)
patchA Default is bounding box of the text
patchB Default is None
shrinkA In points. Default is 2 points
shrinkB In points. Default is 2 points
mutation_scale Default is text size (in points)
mutation_aspect Default is 1
? Any `.FancyArrowPatch` property
=============== ===================================
The exact starting point position of the arrow is defined by
*relpos*. It's a tuple of relative coordinates of the text box,
where (0, 0) is the lower left corner and (1, 1) is the upper
right corner. Values <0 and >1 are supported and specify points
outside the text box. By default (0.5, 0.5), so the starting point
is centered in the text box.
annotation_clip : bool or None, default: None
Whether to clip (i.e. not draw) the annotation when the annotation
point *xy* is outside the Axes area.
- If *True*, the annotation will be clipped when *xy* is outside
the Axes.
- If *False*, the annotation will always be drawn.
- If *None*, the annotation will be clipped when *xy* is outside
the Axes and *xycoords* is 'data'.
**kwargs
Additional kwargs are passed to `.Text`.
Returns
-------
`.Annotation`
See Also
--------
:ref:`annotations`
"""
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
# warn about wonky input data
if (xytext is None and
textcoords is not None and
textcoords != xycoords):
_api.warn_external("You have used the `textcoords` kwarg, but "
"not the `xytext` kwarg. This can lead to "
"surprising results.")
# clean up textcoords and assign default
if textcoords is None:
textcoords = self.xycoords
self._textcoords = textcoords
# cleanup xytext defaults
if xytext is None:
xytext = self.xy
x, y = xytext
self.arrowprops = arrowprops
if arrowprops is not None:
arrowprops = arrowprops.copy()
if "arrowstyle" in arrowprops:
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
else:
# modified YAArrow API to be used with FancyArrowPatch
for key in ['width', 'headwidth', 'headlength', 'shrink']:
arrowprops.pop(key, None)
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1), **arrowprops)
else:
self.arrow_patch = None
# Must come last, as some kwargs may be propagated to arrow_patch.
Text.__init__(self, x, y, text, **kwargs)
def contains(self, mouseevent):
if self._different_canvas(mouseevent):
return False, {}
contains, tinfo = Text.contains(self, mouseevent)
if self.arrow_patch is not None:
in_patch, _ = self.arrow_patch.contains(mouseevent)
contains = contains or in_patch
return contains, tinfo
@property
def xycoords(self):
return self._xycoords
@xycoords.setter
def xycoords(self, xycoords):
def is_offset(s):
return isinstance(s, str) and s.startswith("offset")
if (isinstance(xycoords, tuple) and any(map(is_offset, xycoords))
or is_offset(xycoords)):
raise ValueError("xycoords cannot be an offset coordinate")
self._xycoords = xycoords
@property
def xyann(self):
"""
The text position.
See also *xytext* in `.Annotation`.
"""
return self.get_position()
@xyann.setter
def xyann(self, xytext):
self.set_position(xytext)
def get_anncoords(self):
"""
Return the coordinate system to use for `.Annotation.xyann`.
See also *xycoords* in `.Annotation`.
"""
return self._textcoords
def set_anncoords(self, coords):
"""
Set the coordinate system to use for `.Annotation.xyann`.
See also *xycoords* in `.Annotation`.
"""
self._textcoords = coords
anncoords = property(get_anncoords, set_anncoords, doc="""
The coordinate system to use for `.Annotation.xyann`.""")
def set_figure(self, fig):
# docstring inherited
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def update_positions(self, renderer):
"""
Update the pixel positions of the annotation text and the arrow patch.
"""
# generate transformation
self.set_transform(self._get_xy_transform(renderer, self.anncoords))
arrowprops = self.arrowprops
if arrowprops is None:
return
bbox = Text.get_window_extent(self, renderer)
arrow_end = x1, y1 = self._get_position_xy(renderer) # Annotated pos.
ms = arrowprops.get("mutation_scale", self.get_size())
self.arrow_patch.set_mutation_scale(ms)
if "arrowstyle" not in arrowprops:
# Approximately simulate the YAArrow.
shrink = arrowprops.get('shrink', 0.0)
width = arrowprops.get('width', 4)
headwidth = arrowprops.get('headwidth', 12)
headlength = arrowprops.get('headlength', 12)
# NB: ms is in pts
stylekw = dict(head_length=headlength / ms,
head_width=headwidth / ms,
tail_width=width / ms)
self.arrow_patch.set_arrowstyle('simple', **stylekw)
# using YAArrow style:
# pick the corner of the text bbox closest to annotated point.
xpos = [(bbox.x0, 0), ((bbox.x0 + bbox.x1) / 2, 0.5), (bbox.x1, 1)]
ypos = [(bbox.y0, 0), ((bbox.y0 + bbox.y1) / 2, 0.5), (bbox.y1, 1)]
x, relposx = min(xpos, key=lambda v: abs(v[0] - x1))
y, relposy = min(ypos, key=lambda v: abs(v[0] - y1))
self._arrow_relpos = (relposx, relposy)
r = np.hypot(y - y1, x - x1)
shrink_pts = shrink * r / renderer.points_to_pixels(1)
self.arrow_patch.shrinkA = self.arrow_patch.shrinkB = shrink_pts
# adjust the starting point of the arrow relative to the textbox.
# TODO : Rotation needs to be accounted.
arrow_begin = bbox.p0 + bbox.size * self._arrow_relpos
# The arrow is drawn from arrow_begin to arrow_end. It will be first
# clipped by patchA and patchB. Then it will be shrunk by shrinkA and
# shrinkB (in points). If patchA is not set, self.bbox_patch is used.
self.arrow_patch.set_positions(arrow_begin, arrow_end)
if "patchA" in arrowprops:
patchA = arrowprops["patchA"]
elif self._bbox_patch:
patchA = self._bbox_patch
elif self.get_text() == "":
patchA = None
else:
pad = renderer.points_to_pixels(4)
patchA = Rectangle(
xy=(bbox.x0 - pad / 2, bbox.y0 - pad / 2),
width=bbox.width + pad, height=bbox.height + pad,
transform=IdentityTransform(), clip_on=False)
self.arrow_patch.set_patchA(patchA)
@artist.allow_rasterization
def draw(self, renderer):
# docstring inherited
if renderer is not None:
self._renderer = renderer
if not self.get_visible() or not self._check_xy(renderer):
return
# Update text positions before `Text.draw` would, so that the
# FancyArrowPatch is correctly positioned.
self.update_positions(renderer)
self.update_bbox_position_size(renderer)
if self.arrow_patch is not None: # FancyArrowPatch
if (self.arrow_patch.get_figure(root=False) is None and
(fig := self.get_figure(root=False)) is not None):
self.arrow_patch.set_figure(fig)
self.arrow_patch.draw(renderer)
# Draw text, including FancyBboxPatch, after FancyArrowPatch.
# Otherwise, a wedge arrowstyle can land partly on top of the Bbox.
Text.draw(self, renderer)
def get_window_extent(self, renderer=None):
# docstring inherited
# This block is the same as in Text.get_window_extent, but we need to
# set the renderer before calling update_positions().
if not self.get_visible() or not self._check_xy(renderer):
return Bbox.unit()
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
self._renderer = self.get_figure(root=True)._get_renderer()
if self._renderer is None:
raise RuntimeError('Cannot get window extent without renderer')
self.update_positions(self._renderer)
text_bbox = Text.get_window_extent(self)
bboxes = [text_bbox]
if self.arrow_patch is not None:
bboxes.append(self.arrow_patch.get_window_extent())
return Bbox.union(bboxes)
def get_tightbbox(self, renderer=None):
# docstring inherited
if not self._check_xy(renderer):
return Bbox.null()
return super().get_tightbbox(renderer)
_docstring.interpd.register(Annotation=Annotation.__init__.__doc__)
|
Annotation
|
python
|
google__pytype
|
pytype/typegraph/typegraph_serializer.py
|
{
"start": 1884,
"end": 2358
}
|
class ____:
# Note that cfg_nodes and bindings contain all instances of their respective
# types that are found in the program, while variables only contains the
# Variables that have Bindings. This means lookups of variables should be
# by using `find`, not by direct index access.
cfg_nodes: list[SerializedCFGNode]
variables: list[SerializedVariable]
bindings: list[SerializedBinding]
entrypoint: CFGNodeId
queries: list[SerializedQuery]
|
SerializedProgram
|
python
|
google__jax
|
jax/_src/numpy/indexing.py
|
{
"start": 27835,
"end": 52827
}
|
class ____(NamedTuple):
# The expected shape of the slice output.
slice_shape: Sequence[int]
# The slice shape to pass to lax.gather().
gather_slice_shape: Sequence[int]
# The gather indices to use.
gather_indices: ArrayLike
# A GatherDimensionNumbers object describing the gather to perform.
dnums: slicing.GatherDimensionNumbers
# Are the gather_indices known to be non-overlapping and/or sorted?
# (In practice, these translate to "there no advanced indices", because
# only advanced indices could lead to index repetition.)
unique_indices: bool
indices_are_sorted: bool
# Slice dimensions that have negative strides, and so must be reversed after
# the gather.
reversed_y_dims: Sequence[int]
# Keep track of any axes created by `newaxis`. These must be inserted for
# gathers and eliminated for scatters.
newaxis_dims: Sequence[int]
# Keep track of dimensions with scalar bool indices. These must be inserted
# for gathers before performing other index operations.
scalar_bool_dims: Sequence[int]
# The expected sharding of the slice output.
slice_sharding: NamedSharding | None = None
def split_index_for_jit(idx, shape):
"""Splits indices into necessarily-static and dynamic parts.
Used to pass indices into `jit`-ted function.
"""
# Convert list indices to tuples in cases (deprecated by NumPy.)
idx = eliminate_deprecated_list_indexing(idx)
if any(isinstance(i, str) for i in idx):
raise TypeError(f"JAX does not support string indexing; got {idx=}")
# Expand any (concrete) boolean indices. We can then use advanced integer
# indexing logic to handle them.
idx = _expand_bool_indices(idx, shape)
leaves, treedef = tree_flatten(idx)
dynamic = [None] * len(leaves)
static = [None] * len(leaves)
for i, x in enumerate(leaves):
if x is Ellipsis:
static[i] = x
elif isinstance(x, slice):
# slice objects aren't hashable.
static[i] = (x.start, x.stop, x.step)
else:
dynamic[i] = x
return treedef, tuple(static), dynamic
def merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx):
"""Recombines indices that were split by split_index_for_jit."""
idx = []
for s, d in zip(static_idx, dynamic_idx):
if d is not None:
idx.append(d)
elif isinstance(s, tuple):
idx.append(slice(s[0], s[1], s[2]))
else:
idx.append(s)
return treedef.unflatten(idx)
def _int(aval):
return not aval.shape and dtypes.issubdtype(aval.dtype, np.integer)
def _aval_or_none(x):
try:
return core.get_aval(x)
except:
return None
def index_to_gather(x_shape: Sequence[int], idx: Sequence[Any],
x_sharding, normalize_indices: bool = True) -> _Indexer:
# Convert sequences to arrays
idx = tuple(lax_numpy.asarray(i, dtype=None if i else int)
if isinstance(i, Sequence) else i for i in idx)
abstract_idx = [_aval_or_none(i) for i in idx]
float_indices = [(i, val, aval) for i, (val, aval) in enumerate(zip(idx, abstract_idx))
if aval is not None and dtypes.issubdtype(aval, np.inexact)]
# Check for float or complex indices:
if float_indices:
i, val, aval = float_indices[0]
msg = ("Indexer must have integer or boolean type, got indexer "
"with type {} at position {}, indexer value {}")
raise TypeError(msg.format(aval.dtype.name, i, val))
# Check whether advanced indices are contiguous. We must do this before
# removing ellipses (https://github.com/jax-ml/jax/issues/25109)
# If advanced idexing axes do not appear contiguously, NumPy semantics
# move the advanced axes to the front.
(is_advanced,) = np.nonzero([
isinstance(e, (int, np.integer, Array, np.ndarray,
literals.TypedNdArray))
or lax_numpy.isscalar(e)
for e in idx
])
advanced_axes_are_contiguous = np.all(np.diff(is_advanced) == 1)
# Remove ellipses and add trailing slice(None)s.
idx = _canonicalize_tuple_index(len(x_shape), idx)
x_spec = x_sharding.spec
# Check for scalar boolean indexing: this requires inserting extra dimensions
# before performing the rest of the logic.
scalar_bool_dims: Sequence[int] = [n for n, i in enumerate(idx) if isinstance(i, bool)]
if scalar_bool_dims:
idx = tuple(np.arange(int(i)) if isinstance(i, bool) else i for i in idx)
x_shape = list(x_shape)
x_spec = list(x_spec)
for i in sorted(scalar_bool_dims):
x_shape.insert(i, 1)
x_spec.insert(i, None)
x_shape = tuple(x_shape)
x_spec = tuple(x_spec)
# Check for advanced indexing:
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
advanced_indexes: Sequence[Array | np.ndarray] | None = None
# The positions of the advanced indexing axes in `idx`.
idx_advanced_axes: Sequence[int] = []
# The positions of the advanced indexes in x's shape.
# collapsed, after None axes have been removed. See below.
x_advanced_axes: Sequence[int] | None = None
if _is_advanced_int_indexer(idx):
idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None]
advanced_pairs = (
(lax_numpy.asarray(e), i, j) for j, (i, e) in enumerate(idx_no_nones)
if lax_numpy.isscalar(e)
or isinstance(e, (Sequence, Array, np.ndarray,
literals.TypedNdArray)))
if normalize_indices:
advanced_pairs = ((_normalize_index(e, x_shape[j]), i, j)
for e, i, j in advanced_pairs)
advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs)
x_axis = 0 # Current axis in x.
y_axis = 0 # Current axis in y, before collapsing. See below.
collapsed_y_axis = 0 # Current axis in y, after collapsing.
# Scatter dimension numbers.
offset_dims: list[int] = []
collapsed_slice_dims: list[int] = []
start_index_map: list[int] = []
index_dtype = lax_utils.int_dtype_for_shape(x_shape, signed=True)
# Gather indices.
# Pairs of (array, start_dim) values. These will be broadcast into
# gather_indices_shape, with the array dimensions aligned to start_dim, and
# then concatenated.
gather_indices: list[tuple[Array, int]] = []
gather_indices_shape: list[int] = []
# We perform three transformations to y before the scatter op, in order:
# First, y is broadcast to slice_shape. In general `y` only need broadcast to
# the right shape.
slice_shape: list[int] = []
# Next, y is squeezed to remove newaxis_dims. This removes np.newaxis/`None`
# indices, which the scatter cannot remove itself.
newaxis_dims: list[int] = []
# Finally, we reverse reversed_y_dims to handle slices with negative strides.
reversed_y_dims: list[int] = []
gather_slice_shape: list[int] = []
slice_spec = []
for idx_pos, i in enumerate(idx):
# Handle the advanced indices here if:
# * the advanced indices were not contiguous and we are the start.
# * we are at the position of the first advanced index.
if (advanced_indexes is not None and
(advanced_axes_are_contiguous and idx_pos == idx_advanced_axes[0] or
not advanced_axes_are_contiguous and idx_pos == 0)):
advanced_index_arrs = util._broadcast_arrays(*advanced_indexes)
shape = advanced_index_arrs[0].shape
aia_spec = core.typeof(advanced_index_arrs[0]).sharding.spec
ndim = len(shape)
start_dim = len(gather_indices_shape)
gather_indices.extend(
(lax.convert_element_type(a, index_dtype), start_dim)
for a in advanced_index_arrs
)
gather_indices_shape += shape
assert x_advanced_axes is not None
start_index_map.extend(x_advanced_axes)
collapsed_slice_dims.extend(x_advanced_axes)
slice_shape.extend(shape)
slice_spec.extend(aia_spec)
y_axis += ndim
collapsed_y_axis += ndim
# Per-index bookkeeping for advanced indexes.
if idx_pos in idx_advanced_axes:
x_axis += 1
gather_slice_shape.append(1)
continue
# Handle basic int indexes.
abstract_i = _aval_or_none(i)
if isinstance(abstract_i, core.ShapedArray) and _int(abstract_i):
if core.definitely_equal(x_shape[x_axis], 0):
# XLA gives error when indexing into an axis of size 0
raise IndexError(f"index is out of bounds for axis {x_axis} with size 0")
i = _normalize_index(i, x_shape[x_axis]) if normalize_indices else i
i_converted = lax.convert_element_type(i, index_dtype)
gather_indices.append((i_converted, len(gather_indices_shape)))
collapsed_slice_dims.append(x_axis)
gather_slice_shape.append(1)
start_index_map.append(x_axis)
x_axis += 1
# Handle np.newaxis (None)
elif i is None:
slice_shape.append(1)
slice_spec.append(None)
newaxis_dims.append(y_axis)
y_axis += 1
elif isinstance(i, slice):
# Handle slice index (only static, otherwise an error is raised)
if not all(_is_slice_element_none_or_constant_or_symbolic(elt)
for elt in (i.start, i.stop, i.step)):
msg = ("Array slice indices must have static start/stop/step to be used "
"with NumPy indexing syntax. "
f"Found slice({i.start}, {i.stop}, {i.step}). "
"To index a statically sized "
"array at a dynamic position, try lax.dynamic_slice/"
"dynamic_update_slice (JAX does not support dynamically sized "
"arrays within JIT compiled functions).")
raise IndexError(msg)
start, step, slice_size = core.canonicalize_slice(i, x_shape[x_axis])
slice_shape.append(slice_size)
slice_spec.append(x_spec[x_axis])
if core.definitely_equal(step, 1):
# Avoid generating trivial gather (an optimization)
if not core.definitely_equal(slice_size, x_shape[x_axis]):
gather_indices.append((lax.convert_element_type(start, index_dtype),
len(gather_indices_shape)))
start_index_map.append(x_axis)
gather_slice_shape.append(slice_size)
offset_dims.append(collapsed_y_axis)
else:
indices = (lax_numpy.array(start, dtype=index_dtype) +
lax_numpy.array(step, dtype=index_dtype) * lax.iota(index_dtype, slice_size))
if step < 0:
reversed_y_dims.append(collapsed_y_axis)
indices = lax.rev(indices, dimensions=(0,))
gather_slice_shape.append(1)
gather_indices.append((indices, len(gather_indices_shape)))
start_index_map.append(x_axis)
gather_indices_shape.append(slice_size)
collapsed_slice_dims.append(x_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
else:
if (abstract_i is not None and
not (dtypes.issubdtype(abstract_i.dtype, np.integer) or dtypes.issubdtype(abstract_i.dtype, np.bool_))):
msg = ("Indexer must have integer or boolean type, got indexer "
"with type {} at position {}, indexer value {}")
raise TypeError(msg.format(abstract_i.dtype.name, idx_pos, i))
raise IndexError("Indexing mode not yet supported. Got unsupported indexer "
f"at position {idx_pos}: {i!r}")
if len(gather_indices) == 0:
gather_indices_array: ArrayLike = np.zeros((0,), dtype=index_dtype)
elif len(gather_indices) == 1:
g, _ = gather_indices[0]
gather_indices_array = lax.expand_dims(g, (g.ndim,))
else:
last_dim = len(gather_indices_shape)
gather_indices_shape.append(1)
gather_indices_array = lax.concatenate([
lax.broadcast_in_dim(g, gather_indices_shape, tuple(range(i, i + g.ndim)))
for g, i in gather_indices],
last_dim)
dnums = slicing.GatherDimensionNumbers(
offset_dims = tuple(offset_dims),
collapsed_slice_dims = tuple(sorted(collapsed_slice_dims)),
start_index_map = tuple(start_index_map)
)
slice_sharding = x_sharding.update(spec=slice_spec)
return _Indexer(
slice_shape=slice_shape,
newaxis_dims=tuple(newaxis_dims),
gather_slice_shape=gather_slice_shape,
reversed_y_dims=reversed_y_dims,
dnums=dnums,
gather_indices=gather_indices_array,
unique_indices=advanced_indexes is None,
indices_are_sorted=advanced_indexes is None,
scalar_bool_dims=scalar_bool_dims,
slice_sharding=slice_sharding)
def _should_unpack_list_index(x):
"""Helper for eliminate_deprecated_list_indexing."""
return (isinstance(x, (np.ndarray, Array, literals.TypedNdArray))
and np.ndim(x) != 0
or isinstance(x, (Sequence, slice))
or x is Ellipsis or x is None)
def eliminate_deprecated_list_indexing(idx):
# "Basic slicing is initiated if the selection object is a non-array,
# non-tuple sequence containing slice objects, [Ellipses, or newaxis
# objects]". Detects this and raises a TypeError.
if not isinstance(idx, tuple):
if isinstance(idx, Sequence) and not isinstance(
idx, (Array, np.ndarray, literals.TypedNdArray, str)
):
# As of numpy 1.16, some non-tuple sequences of indices result in a warning, while
# others are converted to arrays, based on a set of somewhat convoluted heuristics
# (See https://github.com/numpy/numpy/blob/v1.19.2/numpy/core/src/multiarray/mapping.c#L179-L343)
# In JAX, we raise an informative TypeError for *all* non-tuple sequences.
if any(_should_unpack_list_index(i) for i in idx):
msg = ("Using a non-tuple sequence for multidimensional indexing is not allowed; "
"use `arr[tuple(seq)]` instead of `arr[seq]`. "
"See https://github.com/jax-ml/jax/issues/4564 for more information.")
else:
msg = ("Using a non-tuple sequence for multidimensional indexing is not allowed; "
"use `arr[array(seq)]` instead of `arr[seq]`. "
"See https://github.com/jax-ml/jax/issues/4564 for more information.")
raise TypeError(msg)
else:
idx = (idx,)
return idx
def _is_boolean_index(i):
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
return (isinstance(abstract_i, core.ShapedArray) and dtypes.issubdtype(abstract_i.dtype, np.bool_)
or isinstance(i, list) and i and all(_is_scalar(e)
and dtypes.issubdtype(dtypes.dtype(e), np.bool_) for e in i))
def _expand_bool_indices(idx, shape):
"""Converts concrete bool indexes into advanced integer indexes."""
out = []
total_dims = len(shape)
num_ellipsis = sum(e is Ellipsis for e in idx)
if num_ellipsis > 1:
raise IndexError("an index can only have a single ellipsis ('...')")
elif num_ellipsis == 1:
total_dims = sum(np.ndim(e) if _is_boolean_index(e) else 1 for e in idx
if e is not None and e is not Ellipsis)
ellipsis_offset = 0
newaxis_offset = 0
for dim_number, i in enumerate(idx):
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
if _is_boolean_index(i):
if isinstance(i, list):
i = lax_numpy.array(i)
abstract_i = core.get_aval(i)
if not core.is_concrete(i):
# TODO(mattjj): improve this error by tracking _why_ the indices are not concrete
raise errors.NonConcreteBooleanIndexError(abstract_i)
elif np.ndim(i) == 0:
out.append(bool(i))
else:
i_shape = np.shape(i)
start = len(out) + ellipsis_offset - newaxis_offset
expected_shape = shape[start: start + np.ndim(i)]
if len(i_shape) != len(expected_shape):
raise IndexError(f"too many boolean indices at index {dim_number}: got mask of shape "
f"{i_shape}, but only {len(expected_shape)} dimensions remain.")
if not all(s1 in (0, s2) for s1, s2 in zip(i_shape, expected_shape)):
raise IndexError("boolean index did not match shape of indexed array in index "
f"{dim_number}: got {i_shape}, expected {expected_shape}")
out.extend(np.where(i))
else:
out.append(i)
if i is Ellipsis:
ellipsis_offset = len(shape) - total_dims - 1
if i is None:
newaxis_offset += 1
return tuple(out)
def _is_slice_element_none_or_constant_or_symbolic(elt):
"""Return True if elt is a constant or None."""
if elt is None: return True
if core.is_symbolic_dim(elt): return True
try:
return core.is_concrete(elt)
except TypeError:
return False
# TODO(mattjj): clean up this logic
def _is_advanced_int_indexer(idx):
"""Returns True if idx should trigger int array indexing, False otherwise."""
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
assert isinstance(idx, tuple)
if all(e is None or e is Ellipsis or isinstance(e, slice)
or _is_scalar(e) and dtypes.issubdtype(dtypes.dtype(e), np.integer) for e in idx):
return False
return all(e is None or e is Ellipsis or isinstance(e, slice)
or _is_int_arraylike(e) for e in idx)
def _is_int_arraylike(x):
"""Returns True if x is array-like with integer dtype, False otherwise."""
return (isinstance(x, int) and not isinstance(x, bool)
or dtypes.issubdtype(getattr(x, "dtype", None), np.integer)
or isinstance(x, (list, tuple)) and all(_is_int_arraylike(e) for e in x))
def _is_scalar(x):
"""Checks if a Python or NumPy scalar."""
return np.isscalar(x) or (
isinstance(x, (np.ndarray, literals.TypedNdArray, Array))
and np.ndim(x) == 0
)
def _canonicalize_tuple_index(arr_ndim, idx):
"""Helper to remove Ellipsis and add in the implicit trailing slice(None)."""
num_dimensions_consumed = sum(not (e is None or e is Ellipsis or isinstance(e, bool)) for e in idx)
if num_dimensions_consumed > arr_ndim:
index_or_indices = "index" if num_dimensions_consumed == 1 else "indices"
raise IndexError(
f"Too many indices: {arr_ndim}-dimensional array indexed "
f"with {num_dimensions_consumed} regular {index_or_indices}.")
ellipses = (i for i, elt in enumerate(idx) if elt is Ellipsis)
ellipsis_index = next(ellipses, None)
if ellipsis_index is not None:
if next(ellipses, None) is not None:
raise IndexError(
f"Multiple ellipses (...) not supported: {list(map(type, idx))}.")
colons = (slice(None),) * (arr_ndim - num_dimensions_consumed)
idx = idx[:ellipsis_index] + colons + idx[ellipsis_index + 1:]
elif num_dimensions_consumed < arr_ndim:
colons = (slice(None),) * (arr_ndim - num_dimensions_consumed)
idx = tuple(idx) + colons
return idx
@export
def place(arr: ArrayLike, mask: ArrayLike, vals: ArrayLike, *,
inplace: bool = True) -> Array:
"""Update array elements based on a mask.
JAX implementation of :func:`numpy.place`.
The semantics of :func:`numpy.place` are to modify arrays in-place, which
is not possible for JAX's immutable arrays. The JAX version returns a modified
copy of the input, and adds the ``inplace`` parameter which must be set to
`False`` by the user as a reminder of this API difference.
Args:
arr: array into which values will be placed.
mask: boolean mask with the same size as ``arr``.
vals: values to be inserted into ``arr`` at the locations indicated
by mask. If too many values are supplied, they will be truncated.
If not enough values are supplied, they will be repeated.
inplace: must be set to False to indicate that the input is not modified
in-place, but rather a modified copy is returned.
Returns:
A copy of ``arr`` with masked values set to entries from `vals`.
See Also:
- :func:`jax.numpy.put`: put elements into an array at numerical indices.
- :func:`jax.numpy.ndarray.at`: array updates using NumPy-style indexing
Examples:
>>> x = jnp.zeros((3, 5), dtype=int)
>>> mask = (jnp.arange(x.size) % 3 == 0).reshape(x.shape)
>>> mask
Array([[ True, False, False, True, False],
[False, True, False, False, True],
[False, False, True, False, False]], dtype=bool)
Placing a scalar value:
>>> jnp.place(x, mask, 1, inplace=False)
Array([[1, 0, 0, 1, 0],
[0, 1, 0, 0, 1],
[0, 0, 1, 0, 0]], dtype=int32)
In this case, ``jnp.place`` is similar to the masked array update syntax:
>>> x.at[mask].set(1)
Array([[1, 0, 0, 1, 0],
[0, 1, 0, 0, 1],
[0, 0, 1, 0, 0]], dtype=int32)
``place`` differs when placing values from an array. The array is repeated
to fill the masked entries:
>>> vals = jnp.array([1, 3, 5])
>>> jnp.place(x, mask, vals, inplace=False)
Array([[1, 0, 0, 3, 0],
[0, 5, 0, 0, 1],
[0, 0, 3, 0, 0]], dtype=int32)
"""
data, mask_arr, vals_arr = util.ensure_arraylike("place", arr, mask, vals)
vals_arr = vals_arr.ravel()
if inplace:
raise ValueError(
"jax.numpy.place cannot modify arrays in-place, because JAX arrays are immutable. "
"Pass inplace=False to instead return an updated array.")
if data.size != mask_arr.size:
raise ValueError("place: arr and mask must be the same size")
if not vals_arr.size:
raise ValueError("Cannot place values from an empty array")
if not data.size:
return data
indices = lax_numpy.where(mask_arr.ravel(), size=mask_arr.size, fill_value=mask_arr.size)[0]
vals_arr = lax_numpy._tile_to_size(vals_arr, len(indices))
return data.ravel().at[indices].set(vals_arr, mode='drop').reshape(data.shape)
@export
def put(a: ArrayLike, ind: ArrayLike, v: ArrayLike,
mode: str | None = None, *, inplace: bool = True) -> Array:
"""Put elements into an array at given indices.
JAX implementation of :func:`numpy.put`.
The semantics of :func:`numpy.put` are to modify arrays in-place, which
is not possible for JAX's immutable arrays. The JAX version returns a modified
copy of the input, and adds the ``inplace`` parameter which must be set to
`False`` by the user as a reminder of this API difference.
Args:
a: array into which values will be placed.
ind: array of indices over the flattened array at which to put values.
v: array of values to put into the array.
mode: string specifying how to handle out-of-bound indices. Supported values:
- ``"clip"`` (default): clip out-of-bound indices to the final index.
- ``"wrap"``: wrap out-of-bound indices to the beginning of the array.
inplace: must be set to False to indicate that the input is not modified
in-place, but rather a modified copy is returned.
Returns:
A copy of ``a`` with specified entries updated.
See Also:
- :func:`jax.numpy.place`: place elements into an array via boolean mask.
- :func:`jax.numpy.ndarray.at`: array updates using NumPy-style indexing.
- :func:`jax.numpy.take`: extract values from an array at given indices.
Examples:
>>> x = jnp.zeros(5, dtype=int)
>>> indices = jnp.array([0, 2, 4])
>>> values = jnp.array([10, 20, 30])
>>> jnp.put(x, indices, values, inplace=False)
Array([10, 0, 20, 0, 30], dtype=int32)
This is equivalent to the following :attr:`jax.numpy.ndarray.at` indexing syntax:
>>> x.at[indices].set(values)
Array([10, 0, 20, 0, 30], dtype=int32)
There are two modes for handling out-of-bound indices. By default they are
clipped:
>>> indices = jnp.array([0, 2, 6])
>>> jnp.put(x, indices, values, inplace=False, mode='clip')
Array([10, 0, 20, 0, 30], dtype=int32)
Alternatively, they can be wrapped to the beginning of the array:
>>> jnp.put(x, indices, values, inplace=False, mode='wrap')
Array([10, 30, 20, 0, 0], dtype=int32)
For N-dimensional inputs, the indices refer to the flattened array:
>>> x = jnp.zeros((3, 5), dtype=int)
>>> indices = jnp.array([0, 7, 14])
>>> jnp.put(x, indices, values, inplace=False)
Array([[10, 0, 0, 0, 0],
[ 0, 0, 20, 0, 0],
[ 0, 0, 0, 0, 30]], dtype=int32)
"""
if inplace:
raise ValueError(
"jax.numpy.put cannot modify arrays in-place, because JAX arrays are immutable. "
"Pass inplace=False to instead return an updated array.")
arr, ind_arr, _ = util.ensure_arraylike("put", a, ind, v)
ind_arr = ind_arr.ravel()
v_arr = lax_numpy.ravel(v)
if not arr.size or not ind_arr.size or not v_arr.size:
return arr
v_arr = lax_numpy._tile_to_size(v_arr, len(ind_arr))
if mode is None:
scatter_mode = "drop"
elif mode == "clip":
ind_arr = lax_numpy.clip(ind_arr, 0, arr.size - 1)
scatter_mode = "promise_in_bounds"
elif mode == "wrap":
ind_arr = ind_arr % arr.size
scatter_mode = "promise_in_bounds"
elif mode == "raise":
raise NotImplementedError("The 'raise' mode to jnp.put is not supported.")
else:
raise ValueError(f"mode should be one of 'wrap' or 'clip'; got {mode=}")
return arr.at[lax_numpy.unravel_index(ind_arr, arr.shape)].set(v_arr, mode=scatter_mode)
|
_Indexer
|
python
|
django__django
|
django/contrib/postgres/forms/ranges.py
|
{
"start": 3295,
"end": 3484
}
|
class ____(BaseRangeField):
default_error_messages = {"invalid": _("Enter two valid date/times.")}
base_field = forms.DateTimeField
range_type = DateTimeTZRange
|
DateTimeRangeField
|
python
|
huggingface__transformers
|
src/transformers/models/owlvit/modeling_owlvit.py
|
{
"start": 17283,
"end": 22260
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scale
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
# apply the causal_attention_mask first
if causal_attention_mask is not None:
if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
f" {causal_attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
# For int8 compatibility, sometimes the `attn_probs` are in `fp32`
attn_probs = attn_probs.to(value_states.dtype)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->OwlViT
|
OwlViTAttention
|
python
|
django__django
|
django/db/models/fields/json.py
|
{
"start": 13954,
"end": 17179
}
|
class ____(Transform):
postgres_operator = "->"
postgres_nested_operator = "#>"
def __init__(self, key_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key_name = str(key_name)
def preprocess_lhs(self, compiler, connection):
key_transforms = [self.key_name]
previous = self.lhs
while isinstance(previous, KeyTransform):
key_transforms.insert(0, previous.key_name)
previous = previous.lhs
lhs, params = compiler.compile(previous)
if connection.vendor == "oracle":
# Escape string-formatting.
key_transforms = [key.replace("%", "%%") for key in key_transforms]
return lhs, params, key_transforms
def as_mysql(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = connection.ops.compile_json_path(key_transforms)
return "JSON_EXTRACT(%s, %%s)" % lhs, (*params, json_path)
def as_oracle(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = connection.ops.compile_json_path(key_transforms)
if connection.features.supports_primitives_in_json_field:
sql = (
"COALESCE("
"JSON_VALUE(%s, q'\uffff%s\uffff'),"
"JSON_QUERY(%s, q'\uffff%s\uffff' DISALLOW SCALARS)"
")"
)
else:
sql = (
"COALESCE("
"JSON_QUERY(%s, q'\uffff%s\uffff'),"
"JSON_VALUE(%s, q'\uffff%s\uffff')"
")"
)
# Add paths directly into SQL because path expressions cannot be passed
# as bind variables on Oracle. Use a custom delimiter to prevent the
# JSON path from escaping the SQL literal. Each key in the JSON path is
# passed through json.dumps() with ensure_ascii=True (the default),
# which converts the delimiter into the escaped \uffff format. This
# ensures that the delimiter is not present in the JSON path.
return sql % ((lhs, json_path) * 2), tuple(params) * 2
def as_postgresql(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
if len(key_transforms) > 1:
sql = "(%s %s %%s)" % (lhs, self.postgres_nested_operator)
return sql, (*params, key_transforms)
try:
lookup = int(self.key_name)
except ValueError:
lookup = self.key_name
return "(%s %s %%s)" % (lhs, self.postgres_operator), (*params, lookup)
def as_sqlite(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = connection.ops.compile_json_path(key_transforms)
datatype_values = ",".join(
[repr(datatype) for datatype in connection.ops.jsonfield_datatype_values]
)
return (
"(CASE WHEN JSON_TYPE(%s, %%s) IN (%s) "
"THEN JSON_TYPE(%s, %%s) ELSE JSON_EXTRACT(%s, %%s) END)"
) % (lhs, datatype_values, lhs, lhs), (*params, json_path) * 3
|
KeyTransform
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/migrations/0081_add_unique_constraint_to_detector_group.py
|
{
"start": 222,
"end": 1711
}
|
class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "0952_fix_span_item_event_type_alerts"),
("workflow_engine", "0080_update_metric_detector_config_fields"),
]
operations = [
migrations.AlterField(
model_name="detectorgroup",
name="group",
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="sentry.group"
),
),
]
|
Migration
|
python
|
huggingface__transformers
|
src/transformers/models/informer/modeling_informer.py
|
{
"start": 42910,
"end": 52142
}
|
class ____(InformerPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a
[`InformerDecoderLayer`]
Args:
config: InformerConfig
"""
def __init__(self, config: InformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
if config.prediction_length is None:
raise ValueError("The `prediction_length` config needs to be specified.")
self.value_embedding = InformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
self.embed_positions = InformerSinusoidalPositionalEmbedding(
config.context_length + config.prediction_length, config.d_model
)
self.layers = nn.ModuleList([InformerDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
r"""
Args:
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
input_shape = inputs_embeds.size()[:-1]
# initialize `past_key_values`
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + input_shape[1], device=inputs_embeds.device
)
attention_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
)
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
hidden_states = self.value_embedding(inputs_embeds)
embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length)
hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@auto_docstring
|
InformerDecoder
|
python
|
automl__auto-sklearn
|
test/test_pipeline/implementations/test_util.py
|
{
"start": 101,
"end": 1861
}
|
class ____(unittest.TestCase):
def test_softmax_binary(self):
df = np.array(
[
-40.00643897,
34.69754581,
23.71181359,
-29.89724287,
27.06071791,
-37.78334103,
-40.15812461,
40.16139229,
-27.85887801,
42.67404756,
-36.89753589,
-36.45148009,
54.68976306,
19.47886562,
-49.99821027,
-35.70205302,
-40.59639267,
32.96343916,
-39.23777841,
-37.86535019,
-33.10196906,
26.84144377,
-36.8569686,
]
)
probas = softmax(df)
expected = [[1.0, 0.0] if d < 0.0 else [0.0, 1.0] for d in df]
np.testing.assert_array_almost_equal(expected, probas)
def test_softmax(self):
df = np.array(
[
[2.75021367e10, -8.83772371e-01, -2.20516715e27],
[-2.10848072e11, 2.35024444e-01, 5.20106536e25],
]
)
# With a numerically unstable softmax, the output would be something
# like this:
# [[ 0. 0. nan]
# [nan 0. 0.]]
probas = softmax(df)
expected = np.array([[1, 0, 0], [0, 0, 1]])
self.assertTrue((expected == probas).all())
df = np.array([[0.1, 0.6, 0.3], [0.2, 0.3, 0.5]])
probas = softmax(df)
expected = np.array(
[[0.25838965, 0.42601251, 0.31559783], [0.28943311, 0.31987306, 0.39069383]]
)
np.testing.assert_array_almost_equal(expected, probas)
|
UtilTest
|
python
|
huggingface__transformers
|
src/transformers/models/sam/modeling_sam.py
|
{
"start": 2344,
"end": 4368
}
|
class ____(ModelOutput):
r"""
iou_scores (`torch.FloatTensor` of shape `(batch_size, num_masks)`):
The iou scores of the predicted masks.
pred_masks (`torch.FloatTensor` of shape `(batch_size, num_masks, height, width)`):
The predicted low resolutions masks. Needs to be post-processed by the processor
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the vision model at the output of each layer plus the optional initial embedding outputs.
vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
iou_scores: Optional[torch.FloatTensor] = None
pred_masks: Optional[torch.FloatTensor] = None
vision_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
vision_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
mask_decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
SamImageSegmentationOutput
|
python
|
pydata__xarray
|
xarray/computation/rolling_exp.py
|
{
"start": 1460,
"end": 9389
}
|
class ____(Generic[T_DataWithCoords]):
"""
Exponentially-weighted moving window object.
Similar to EWM in pandas
Parameters
----------
obj : Dataset or DataArray
Object to window.
windows : mapping of hashable to int (or float for alpha type)
A mapping from the name of the dimension to create the rolling
exponential window along (e.g. `time`) to the size of the moving window.
window_type : {"span", "com", "halflife", "alpha"}, default: "span"
The format of the previously supplied window. Each is a simple
numerical transformation of the others. Described in detail:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html
Returns
-------
RollingExp : type of input argument
"""
def __init__(
self,
obj: T_DataWithCoords,
windows: Mapping[Any, int | float],
window_type: str = "span",
min_weight: float = 0.0,
):
if not module_available("numbagg"):
raise ImportError(
"numbagg >= 0.2.1 is required for rolling_exp but currently numbagg is not installed"
)
self.obj: T_DataWithCoords = obj
dim, window = next(iter(windows.items()))
self.dim = dim
self.alpha = _get_alpha(**{window_type: window})
self.min_weight = min_weight
# Don't pass min_weight=0 so we can support older versions of numbagg
kwargs = dict(alpha=self.alpha, axis=-1)
if min_weight > 0:
kwargs["min_weight"] = min_weight
self.kwargs = kwargs
def mean(self, keep_attrs: bool | None = None) -> T_DataWithCoords:
"""
Exponentially weighted moving average.
Parameters
----------
keep_attrs : bool, default: None
If True, the attributes (``attrs``) will be copied from the original
object to the new one. If False, the new object will be returned
without attributes. If None uses the global default.
Examples
--------
>>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x")
>>> da.rolling_exp(x=2, window_type="span").mean()
<xarray.DataArray (x: 5)> Size: 40B
array([1. , 1. , 1.69230769, 1.9 , 1.96694215])
Dimensions without coordinates: x
"""
import numbagg
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
dim_order = self.obj.dims
return apply_ufunc(
numbagg.move_exp_nanmean,
self.obj,
input_core_dims=[[self.dim]],
kwargs=self.kwargs,
output_core_dims=[[self.dim]],
keep_attrs=keep_attrs,
on_missing_core_dim="copy",
dask="parallelized",
).transpose(*dim_order)
def sum(self, keep_attrs: bool | None = None) -> T_DataWithCoords:
"""
Exponentially weighted moving sum.
Parameters
----------
keep_attrs : bool, default: None
If True, the attributes (``attrs``) will be copied from the original
object to the new one. If False, the new object will be returned
without attributes. If None uses the global default.
Examples
--------
>>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x")
>>> da.rolling_exp(x=2, window_type="span").sum()
<xarray.DataArray (x: 5)> Size: 40B
array([1. , 1.33333333, 2.44444444, 2.81481481, 2.9382716 ])
Dimensions without coordinates: x
"""
import numbagg
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
dim_order = self.obj.dims
return apply_ufunc(
numbagg.move_exp_nansum,
self.obj,
input_core_dims=[[self.dim]],
kwargs=self.kwargs,
output_core_dims=[[self.dim]],
keep_attrs=keep_attrs,
on_missing_core_dim="copy",
dask="parallelized",
).transpose(*dim_order)
def std(self) -> T_DataWithCoords:
"""
Exponentially weighted moving standard deviation.
`keep_attrs` is always True for this method. Drop attrs separately to remove attrs.
Examples
--------
>>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x")
>>> da.rolling_exp(x=2, window_type="span").std()
<xarray.DataArray (x: 5)> Size: 40B
array([ nan, 0. , 0.67936622, 0.42966892, 0.25389527])
Dimensions without coordinates: x
"""
import numbagg
dim_order = self.obj.dims
return apply_ufunc(
numbagg.move_exp_nanstd,
self.obj,
input_core_dims=[[self.dim]],
kwargs=self.kwargs,
output_core_dims=[[self.dim]],
keep_attrs=True,
on_missing_core_dim="copy",
dask="parallelized",
).transpose(*dim_order)
def var(self) -> T_DataWithCoords:
"""
Exponentially weighted moving variance.
`keep_attrs` is always True for this method. Drop attrs separately to remove attrs.
Examples
--------
>>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x")
>>> da.rolling_exp(x=2, window_type="span").var()
<xarray.DataArray (x: 5)> Size: 40B
array([ nan, 0. , 0.46153846, 0.18461538, 0.06446281])
Dimensions without coordinates: x
"""
dim_order = self.obj.dims
import numbagg
return apply_ufunc(
numbagg.move_exp_nanvar,
self.obj,
input_core_dims=[[self.dim]],
kwargs=self.kwargs,
output_core_dims=[[self.dim]],
keep_attrs=True,
on_missing_core_dim="copy",
dask="parallelized",
).transpose(*dim_order)
def cov(self, other: T_DataWithCoords) -> T_DataWithCoords:
"""
Exponentially weighted moving covariance.
`keep_attrs` is always True for this method. Drop attrs separately to remove attrs.
Examples
--------
>>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x")
>>> da.rolling_exp(x=2, window_type="span").cov(da**2)
<xarray.DataArray (x: 5)> Size: 40B
array([ nan, 0. , 1.38461538, 0.55384615, 0.19338843])
Dimensions without coordinates: x
"""
dim_order = self.obj.dims
import numbagg
return apply_ufunc(
numbagg.move_exp_nancov,
self.obj,
other,
input_core_dims=[[self.dim], [self.dim]],
kwargs=self.kwargs,
output_core_dims=[[self.dim]],
keep_attrs=True,
on_missing_core_dim="copy",
dask="parallelized",
).transpose(*dim_order)
def corr(self, other: T_DataWithCoords) -> T_DataWithCoords:
"""
Exponentially weighted moving correlation.
`keep_attrs` is always True for this method. Drop attrs separately to remove attrs.
Examples
--------
>>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x")
>>> da.rolling_exp(x=2, window_type="span").corr(da.shift(x=1))
<xarray.DataArray (x: 5)> Size: 40B
array([ nan, nan, nan, 0.4330127 , 0.48038446])
Dimensions without coordinates: x
"""
dim_order = self.obj.dims
import numbagg
return apply_ufunc(
numbagg.move_exp_nancorr,
self.obj,
other,
input_core_dims=[[self.dim], [self.dim]],
kwargs=self.kwargs,
output_core_dims=[[self.dim]],
keep_attrs=True,
on_missing_core_dim="copy",
dask="parallelized",
).transpose(*dim_order)
|
RollingExp
|
python
|
pandas-dev__pandas
|
pandas/core/arrays/integer.py
|
{
"start": 6264,
"end": 6457
}
|
class ____(IntegerDtype):
type = np.uint32
name: ClassVar[str] = "UInt32"
__doc__ = _dtype_docstring.format(dtype="uint32")
@register_extension_dtype
@set_module("pandas")
|
UInt32Dtype
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/connectors_insights/src/connectors_insights/pylint_plugins/cdk_deprecation_checkers.py
|
{
"start": 725,
"end": 1157
}
|
class ____(BaseChecker):
name = "forbidden-method-name-checker"
msgs = {
"C9001": ('Method name "%s" is forbidden', "forbidden-method-name", "Used when a forbidden method name is detected."),
}
def visit_functiondef(self, node: astroid.node) -> None:
if node.name in FORBIDDEN_METHOD_NAMES:
self.add_message("forbidden-method-name", node=node, args=(node.name,))
|
ForbiddenMethodNameChecker
|
python
|
huggingface__transformers
|
src/transformers/models/ijepa/modeling_ijepa.py
|
{
"start": 14554,
"end": 15077
}
|
class ____(nn.Module):
def __init__(self, config: IJepaConfig):
super().__init__()
self.config = config
self.layer = nn.ModuleList([IJepaLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor) -> BaseModelOutput:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
return BaseModelOutput(last_hidden_state=hidden_states)
|
IJepaEncoder
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pycodestyle/E30.py
|
{
"start": 7523,
"end": 8023
}
|
class ____:
def a(self):
pass
# wrongly indented comment
def b(self):
pass
# end
# E303
def fn():
pass
pass
# end
# E304
@decorator
def function():
pass
# end
# E304
@decorator
# comment E304 not expected
def function():
pass
# end
# E304
@decorator
# comment E304 not expected
# second comment E304 not expected
def function():
pass
# end
# E305:7:1
def fn():
print()
# comment
# another comment
fn()
# end
# E305
|
Test
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/metadata/table.py
|
{
"start": 874,
"end": 1526
}
|
class ____:
other: PublicAttr[Sequence[str]]
"""Descriptor for "table-level" constraints. Presently only one property,
`other` is supported. This contains strings describing arbitrary
table-level constraints. A table-level constraint is a constraint defined
in terms of multiple columns (e.g. col_A > col_B) or in terms of rows.
Args:
other (List[str]): Descriptions of arbitrary table-level constraints.
"""
_DEFAULT_TABLE_CONSTRAINTS = TableConstraints(other=[])
# ########################
# ##### TABLE COLUMN CONSTRAINTS
# ########################
@public
@whitelist_for_serdes
@record_custom
|
TableConstraints
|
python
|
numba__llvmlite
|
llvmlite/ir/instructions.py
|
{
"start": 10724,
"end": 11619
}
|
class ____(Instruction):
def __init__(self, parent, cond, lhs, rhs, name='', flags=()):
assert lhs.type == rhs.type
super(SelectInstr, self).__init__(parent, lhs.type, "select",
[cond, lhs, rhs], name=name,
flags=flags)
@property
def cond(self):
return self.operands[0]
@property
def lhs(self):
return self.operands[1]
@property
def rhs(self):
return self.operands[2]
def descr(self, buf):
buf.append("select {0} {1} {2}, {3} {4}, {5} {6} {7}\n".format(
' '.join(self.flags),
self.cond.type, self.cond.get_reference(),
self.lhs.type, self.lhs.get_reference(),
self.rhs.type, self.rhs.get_reference(),
self._stringify_metadata(leading_comma=True),
))
|
SelectInstr
|
python
|
scipy__scipy
|
scipy/special/tests/test_orthogonal.py
|
{
"start": 8732,
"end": 9998
}
|
class ____:
def test_sh_jacobi(self):
# G^(p,q)_n(x) = n! gamma(n+p)/gamma(2*n+p) * P^(p-q,q-1)_n(2*x-1)
def conv(n, p):
return gamma(n + 1) * gamma(n + p) / gamma(2 * n + p)
psub = np.poly1d([2,-1])
q = 4 * np.random.random()
p = q-1 + 2*np.random.random()
# print("shifted jacobi p,q = ", p, q)
G0 = orth.sh_jacobi(0,p,q)
G1 = orth.sh_jacobi(1,p,q)
G2 = orth.sh_jacobi(2,p,q)
G3 = orth.sh_jacobi(3,p,q)
G4 = orth.sh_jacobi(4,p,q)
G5 = orth.sh_jacobi(5,p,q)
ge0 = orth.jacobi(0,p-q,q-1)(psub) * conv(0,p)
ge1 = orth.jacobi(1,p-q,q-1)(psub) * conv(1,p)
ge2 = orth.jacobi(2,p-q,q-1)(psub) * conv(2,p)
ge3 = orth.jacobi(3,p-q,q-1)(psub) * conv(3,p)
ge4 = orth.jacobi(4,p-q,q-1)(psub) * conv(4,p)
ge5 = orth.jacobi(5,p-q,q-1)(psub) * conv(5,p)
assert_allclose(G0.c, ge0.c, atol=1.5e-13, rtol=0)
assert_allclose(G1.c, ge1.c, atol=1.5e-13, rtol=0)
assert_allclose(G2.c, ge2.c, atol=1.5e-13, rtol=0)
assert_allclose(G3.c, ge3.c, atol=1.5e-13, rtol=0)
assert_allclose(G4.c, ge4.c, atol=1.5e-13, rtol=0)
assert_allclose(G5.c, ge5.c, atol=1.5e-13, rtol=0)
|
TestShJacobi
|
python
|
openai__openai-python
|
src/openai/resources/beta/assistants.py
|
{
"start": 23090,
"end": 45007
}
|
class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncAssistantsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncAssistantsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncAssistantsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncAssistantsWithStreamingResponse(self)
async def create(
self,
*,
model: Union[str, ChatModel],
description: Optional[str] | Omit = omit,
instructions: Optional[str] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
name: Optional[str] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_resources: Optional[assistant_create_params.ToolResources] | Omit = omit,
tools: Iterable[AssistantToolParam] | Omit = omit,
top_p: Optional[float] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Assistant:
"""
Create an assistant with a model and instructions.
Args:
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
description: The description of the assistant. The maximum length is 512 characters.
instructions: The system instructions that the assistant uses. The maximum length is 256,000
characters.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the assistant. The maximum length is 256 characters.
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
response_format: Specifies the format that the model must output. Compatible with
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
message the model generates is valid JSON.
**Important:** when using JSON mode, you **must** also instruct the model to
produce JSON yourself via a system or user message. Without this, the model may
generate an unending stream of whitespace until the generation reaches the token
limit, resulting in a long-running and seemingly "stuck" request. Also note that
the message content may be partially cut off if `finish_reason="length"`, which
indicates the generation exceeded `max_tokens` or the conversation exceeded the
max context length.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
tool_resources: A set of resources that are used by the assistant's tools. The resources are
specific to the type of tool. For example, the `code_interpreter` tool requires
a list of file IDs, while the `file_search` tool requires a list of vector store
IDs.
tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
assistant. Tools can be of types `code_interpreter`, `file_search`, or
`function`.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or temperature but not both.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
"/assistants",
body=await async_maybe_transform(
{
"model": model,
"description": description,
"instructions": instructions,
"metadata": metadata,
"name": name,
"reasoning_effort": reasoning_effort,
"response_format": response_format,
"temperature": temperature,
"tool_resources": tool_resources,
"tools": tools,
"top_p": top_p,
},
assistant_create_params.AssistantCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Assistant,
)
async def retrieve(
self,
assistant_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Assistant:
"""
Retrieves an assistant.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not assistant_id:
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
f"/assistants/{assistant_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Assistant,
)
async def update(
self,
assistant_id: str,
*,
description: Optional[str] | Omit = omit,
instructions: Optional[str] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
model: Union[
str,
Literal[
"gpt-5",
"gpt-5-mini",
"gpt-5-nano",
"gpt-5-2025-08-07",
"gpt-5-mini-2025-08-07",
"gpt-5-nano-2025-08-07",
"gpt-4.1",
"gpt-4.1-mini",
"gpt-4.1-nano",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano-2025-04-14",
"o3-mini",
"o3-mini-2025-01-31",
"o1",
"o1-2024-12-17",
"gpt-4o",
"gpt-4o-2024-11-20",
"gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4.5-preview",
"gpt-4.5-preview-2025-02-27",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-0125-preview",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
]
| Omit = omit,
name: Optional[str] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_resources: Optional[assistant_update_params.ToolResources] | Omit = omit,
tools: Iterable[AssistantToolParam] | Omit = omit,
top_p: Optional[float] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Assistant:
"""Modifies an assistant.
Args:
description: The description of the assistant.
The maximum length is 512 characters.
instructions: The system instructions that the assistant uses. The maximum length is 256,000
characters.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
name: The name of the assistant. The maximum length is 256 characters.
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
response_format: Specifies the format that the model must output. Compatible with
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
message the model generates is valid JSON.
**Important:** when using JSON mode, you **must** also instruct the model to
produce JSON yourself via a system or user message. Without this, the model may
generate an unending stream of whitespace until the generation reaches the token
limit, resulting in a long-running and seemingly "stuck" request. Also note that
the message content may be partially cut off if `finish_reason="length"`, which
indicates the generation exceeded `max_tokens` or the conversation exceeded the
max context length.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
tool_resources: A set of resources that are used by the assistant's tools. The resources are
specific to the type of tool. For example, the `code_interpreter` tool requires
a list of file IDs, while the `file_search` tool requires a list of vector store
IDs.
tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
assistant. Tools can be of types `code_interpreter`, `file_search`, or
`function`.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or temperature but not both.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not assistant_id:
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/assistants/{assistant_id}",
body=await async_maybe_transform(
{
"description": description,
"instructions": instructions,
"metadata": metadata,
"model": model,
"name": name,
"reasoning_effort": reasoning_effort,
"response_format": response_format,
"temperature": temperature,
"tool_resources": tool_resources,
"tools": tools,
"top_p": top_p,
},
assistant_update_params.AssistantUpdateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Assistant,
)
def list(
self,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[Assistant, AsyncCursorPage[Assistant]]:
"""Returns a list of assistants.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
"/assistants",
page=AsyncCursorPage[Assistant],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
},
assistant_list_params.AssistantListParams,
),
),
model=Assistant,
)
async def delete(
self,
assistant_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AssistantDeleted:
"""
Delete an assistant.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not assistant_id:
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._delete(
f"/assistants/{assistant_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=AssistantDeleted,
)
|
AsyncAssistants
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-first-player-to-win-k-games-in-a-row.py
|
{
"start": 42,
"end": 464
}
|
class ____(object):
def findWinningPlayer(self, skills, k):
"""
:type skills: List[int]
:type k: int
:rtype: int
"""
result = cnt = 0
for i in range(1, len(skills)):
if skills[result] < skills[i]:
result = i
cnt = 0
cnt += 1
if cnt == k:
return result
return result
|
Solution
|
python
|
scikit-image__scikit-image
|
src/skimage/_shared/utils.py
|
{
"start": 6322,
"end": 6663
}
|
class ____(metaclass=PatchClassRepr):
"""Signal value to help with deprecating parameters that use None.
This is a proxy object, used to signal that a parameter has not been set.
This is useful if ``None`` is already used for a different purpose or just
to highlight a deprecated parameter in the signature.
"""
|
DEPRECATED
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_observability.py
|
{
"start": 9620,
"end": 23582
}
|
class ____(RuleBasedStateMachine):
value = 0
@rule()
def inc(self):
self.value += 1
@rule()
def dec(self):
self.value -= 1
@invariant()
def limits(self):
assert abs(self.value) <= 100
@xfail_on_crosshair(Why.other, strict=False)
def test_observability_captures_stateful_reprs():
with capture_observations() as ls:
run_state_machine_as_test(UltraSimpleMachine)
for x in ls:
if x.type != "test_case" or x.status == "gave_up":
continue
r = x.representation
assert "state.limits()" in r
assert "state.inc()" in r or "state.dec()" in r # or both
t = x.timing
assert "execute:invariant:limits" in t
has_inc = "generate:rule:inc" in t and "execute:rule:inc" in t
has_dec = "generate:rule:dec" in t and "execute:rule:dec" in t
assert has_inc or has_dec
# BytestringProvider.draw_boolean divides [0, 127] as False and [128, 255]
# as True
@pytest.mark.parametrize(
"buffer, expected_status",
[
# Status.OVERRUN
(b"", "gave_up"),
# Status.INVALID
(b"\x00" + bytes([255]), "gave_up"),
# Status.VALID
(b"\x00\x00", "passed"),
# Status.INTERESTING
(bytes([255]) + b"\x00", "failed"),
],
)
def test_fuzz_one_input_status(buffer, expected_status):
@given(st.booleans(), st.booleans())
def test_fails(should_fail, should_fail_assume):
if should_fail:
raise AssertionError
if should_fail_assume:
assume(False)
with (
capture_observations() as ls,
pytest.raises(AssertionError) if expected_status == "failed" else nullcontext(),
):
test_fails.hypothesis.fuzz_one_input(buffer)
assert len(ls) == 1
assert ls[0].status == expected_status
assert ls[0].how_generated == "fuzz_one_input"
def _decode_choice(value):
if isinstance(value, list):
if value[0] == "integer":
# large integers get cast to string, stored as ["integer", str(value)]
assert isinstance(value[1], str)
return int(value[1])
elif value[0] == "bytes":
assert isinstance(value[1], str)
return base64.b64decode(value[1])
elif value[0] == "float":
assert isinstance(value[1], int)
choice = int_to_float(value[1])
assert math.isnan(choice)
return choice
else:
return value[1]
return value
def _decode_choices(data):
return [_decode_choice(value) for value in data]
def _decode_nodes(data):
return [
ChoiceNode(
type=node["type"],
value=_decode_choice(node["value"]),
constraints=_decode_constraints(node["type"], node["constraints"]),
was_forced=node["was_forced"],
)
for node in data
]
def _decode_constraints(choice_type, data):
if choice_type == "integer":
return {
"min_value": _decode_choice(data["min_value"]),
"max_value": _decode_choice(data["max_value"]),
"weights": (
None
if data["weights"] is None
else {_decode_choice(k): v for k, v in data["weights"]}
),
"shrink_towards": _decode_choice(data["shrink_towards"]),
}
elif choice_type == "float":
return {
"min_value": _decode_choice(data["min_value"]),
"max_value": _decode_choice(data["max_value"]),
"allow_nan": data["allow_nan"],
"smallest_nonzero_magnitude": data["smallest_nonzero_magnitude"],
}
elif choice_type == "string":
return {
"intervals": IntervalSet(tuple(data["intervals"])),
"min_size": _decode_choice(data["min_size"]),
"max_size": _decode_choice(data["max_size"]),
}
elif choice_type == "bytes":
return {
"min_size": _decode_choice(data["min_size"]),
"max_size": _decode_choice(data["max_size"]),
}
elif choice_type == "boolean":
return {"p": data["p"]}
else:
raise ValueError(f"unknown choice type {choice_type}")
def _has_surrogate(choice):
return isinstance(choice, str) and any(0xD800 <= ord(c) <= 0xDFFF for c in choice)
@example([0.0])
@example([-0.0])
@example([SIGNALING_NAN])
@example([math.nan])
@example([math.inf])
@example([-math.inf])
# json.{loads, dumps} does not roundtrip for surrogate pairs; they are combined
# into the single code point by json.loads:
# json.loads(json.dumps("\udbf4\udc00")) == '\U0010d000'
#
# Ignore this case with an `assume`, and add an explicit example to ensure we
# continue to do so.
@example(["\udbf4\udc00"])
@given(st.lists(choices()))
def test_choices_json_roundtrips(choices):
assume(not any(_has_surrogate(choice) for choice in choices))
choices2 = _decode_choices(json.loads(json.dumps(choices_to_json(choices))))
assert choices_key(choices) == choices_key(choices2)
@given(st.lists(nodes()))
def test_nodes_json_roundtrips(nodes):
assume(
not any(
_has_surrogate(node.value)
or any(_has_surrogate(value) for value in node.constraints.values())
for node in nodes
)
)
nodes2 = _decode_nodes(json.loads(json.dumps(nodes_to_json(nodes))))
assert nodes == nodes2
@pytest.mark.parametrize(
"choice, expected",
[
(math.nan, ["float", float_to_int(math.nan)]),
(SIGNALING_NAN, ["float", float_to_int(SIGNALING_NAN)]),
(1, 1),
(-1, -1),
(2**63 + 1, ["integer", str(2**63 + 1)]),
(-(2**63 + 1), ["integer", str(-(2**63 + 1))]),
(1.0, 1.0),
(-0.0, -0.0),
(0.0, 0.0),
(True, True),
(False, False),
(b"a", ["bytes", "YQ=="]),
],
)
def test_choices_to_json_explicit(choice, expected):
assert choices_to_json([choice]) == [expected]
@pytest.mark.parametrize(
"choice_node, expected",
[
(
ChoiceNode(
type="integer",
value=2**63 + 1,
constraints=integer_constr(),
was_forced=False,
),
{
"type": "integer",
"value": ["integer", str(2**63 + 1)],
"constraints": integer_constr(),
"was_forced": False,
},
),
],
)
def test_choice_nodes_to_json_explicit(choice_node, expected):
assert nodes_to_json([choice_node]) == [expected]
def test_metadata_to_json():
# this is mostly a covering test than testing anything particular about
# ObservationMetadata.
@given(st.integers())
def f(n):
pass
with capture_observations(choices=True) as observations:
f()
observations = [obs for obs in observations if obs.type == "test_case"]
for observation in observations:
assert set(
to_jsonable(observation.metadata, avoid_realization=False).keys()
) == {
"traceback",
"reproduction_decorator",
"predicates",
"backend",
"sys.argv",
"os.getpid()",
"imported_at",
"data_status",
"phase",
"interesting_origin",
"choice_nodes",
"choice_spans",
}
assert observation.metadata.choice_nodes is not None
for span in observation.metadata.choice_spans:
assert isinstance(span, Span)
assert 0 <= span.start <= len(observation.metadata.choice_nodes)
assert 0 <= span.end <= len(observation.metadata.choice_nodes)
@contextlib.contextmanager
def restore_callbacks():
callbacks = hypothesis.internal.observability._callbacks.copy()
callbacks_all = hypothesis.internal.observability._callbacks_all_threads.copy()
try:
yield
finally:
hypothesis.internal.observability._callbacks = callbacks
hypothesis.internal.observability._callbacks_all_threads = callbacks_all
@contextlib.contextmanager
def with_collect_coverage(*, value: bool):
original_value = hypothesis.internal.observability.OBSERVABILITY_COLLECT_COVERAGE
hypothesis.internal.observability.OBSERVABILITY_COLLECT_COVERAGE = value
try:
yield
finally:
hypothesis.internal.observability.OBSERVABILITY_COLLECT_COVERAGE = (
original_value
)
def _callbacks():
# respect changes from the restore_callbacks context manager by re-accessing
# its namespace, instead of keeping
# `from hypothesis.internal.observability import _callbacks` around
return hypothesis.internal.observability._callbacks
@skipif_threading
def test_observability_callbacks():
def f(observation):
pass
def g(observation):
pass
thread_id = threading.get_ident()
with restore_callbacks():
assert not observability_enabled()
add_observability_callback(f)
assert _callbacks() == {thread_id: [f]}
assert observability_enabled()
add_observability_callback(g)
assert _callbacks() == {thread_id: [f, g]}
assert observability_enabled()
remove_observability_callback(g)
assert _callbacks() == {thread_id: [f]}
assert observability_enabled()
remove_observability_callback(g)
assert _callbacks() == {thread_id: [f]}
assert observability_enabled()
remove_observability_callback(f)
assert _callbacks() == {}
assert not observability_enabled()
@skipif_threading
def test_observability_callbacks_all_threads():
thread_id = threading.get_ident()
def f(observation, thread_id):
pass
with restore_callbacks():
assert not observability_enabled()
add_observability_callback(f, all_threads=True)
assert hypothesis.internal.observability._callbacks_all_threads == [f]
assert _callbacks() == {}
assert observability_enabled()
add_observability_callback(f)
assert hypothesis.internal.observability._callbacks_all_threads == [f]
assert _callbacks() == {thread_id: [f]}
assert observability_enabled()
# remove_observability_callback removes it both from per-thread and
# all_threads. The semantics of duplicated callbacks is weird enough
# that I don't want to commit to anything here, so I'm leaving this as
# somewhat undefined behavior, and recommending that users simply not
# register a callback both normally and for all threads.
remove_observability_callback(f)
assert hypothesis.internal.observability._callbacks_all_threads == []
assert _callbacks() == {}
assert not observability_enabled()
@checks_deprecated_behaviour
def test_testcase_callbacks_deprecation_bool():
bool(TESTCASE_CALLBACKS)
@checks_deprecated_behaviour
def test_testcase_callbacks_deprecation_append():
with restore_callbacks():
TESTCASE_CALLBACKS.append(lambda x: None)
@checks_deprecated_behaviour
def test_testcase_callbacks_deprecation_remove():
with restore_callbacks():
TESTCASE_CALLBACKS.remove(lambda x: None)
def test_testcase_callbacks():
def f(observation):
pass
def g(observation):
pass
thread_id = threading.get_ident()
with restore_callbacks(), warnings.catch_warnings():
# ignore TESTCASE_CALLBACKS deprecation warnings
warnings.simplefilter("ignore")
assert not bool(TESTCASE_CALLBACKS)
add_observability_callback(f)
assert _callbacks() == {thread_id: [f]}
assert bool(TESTCASE_CALLBACKS)
add_observability_callback(g)
assert _callbacks() == {thread_id: [f, g]}
assert bool(TESTCASE_CALLBACKS)
remove_observability_callback(g)
assert _callbacks() == {thread_id: [f]}
assert bool(TESTCASE_CALLBACKS)
remove_observability_callback(f)
assert _callbacks() == {}
assert not bool(TESTCASE_CALLBACKS)
def test_only_receives_callbacks_from_this_thread():
@given(st.integers())
def g(n):
pass
def test():
count_observations = 0
def callback(observation):
nonlocal count_observations
count_observations += 1
add_observability_callback(callback)
with warnings.catch_warnings():
g()
# one per example, plus one for the overall run
assert count_observations == settings().max_examples + 1
with (
restore_callbacks(),
# Observability tries to record coverage, but we don't currently
# support concurrent coverage collection, and issue a warning instead.
#
# I tried to fix this with:
#
# warnings.filterwarnings(
# "ignore", message=r".*tool id \d+ is already taken by tool scrutineer.*"
# )
#
# but that had a race condition somehow and sometimes still didn't work?? The
# warnings module is not thread-safe until 3.14, I think.
with_collect_coverage(value=False),
):
run_concurrently(test, n=5)
def test_all_threads_callback():
n_threads = 5
# thread_id: count
calls = defaultdict(int)
def global_callback(observation, thread_id):
assert isinstance(observation, (TestCaseObservation, InfoObservation))
assert isinstance(thread_id, int)
calls[thread_id] += 1
@given(st.integers())
def f(n):
pass
with (
with_collect_coverage(value=False),
with_observability_callback(global_callback, all_threads=True),
):
run_concurrently(f, n=n_threads)
assert len(calls) == n_threads
assert all(count == (settings().max_examples + 1) for count in calls.values())
|
UltraSimpleMachine
|
python
|
tensorflow__tensorflow
|
tensorflow/python/compiler/tensorrt/test/bool_test.py
|
{
"start": 1083,
"end": 1987
}
|
class ____(trt_test.TfTrtIntegrationTestBase):
"""Test for boolean operations in TF-TRT."""
def GraphFn(self, x1, x2):
x = math_ops.logical_and(x1, x2)
x = math_ops.logical_or(x, x2)
q = math_ops.not_equal(x, x2)
q = math_ops.logical_not(q)
return array_ops.identity(q, name="output_0")
def GetParams(self):
shape = [2, 32, 32, 3]
return self.BuildParams(self.GraphFn, dtypes.bool, [shape, shape], [shape])
def ExpectedEnginesToBuild(self, run_params):
"""Returns the expected engines to build."""
return ["TRTEngineOp_000"]
def ShouldRunTest(self, run_params):
reason = "Boolean ops are not implemented "
return (run_params.dynamic_shape, reason + "in ImplicitBatch mode") \
if trt_utils.is_linked_tensorrt_version_greater_equal(8, 2, 0) \
else (False, reason + "for TRT < 8.2.0")
if __name__ == "__main__":
test.main()
|
BoolTest
|
python
|
numba__numba
|
numba/tests/test_dispatcher.py
|
{
"start": 21929,
"end": 22100
}
|
class ____(TestSignatureHandling):
"""
Sams as TestSignatureHandling, but in object mode.
"""
jit_args = dict(forceobj=True)
|
TestSignatureHandlingObjectMode
|
python
|
mlflow__mlflow
|
mlflow/types/llm.py
|
{
"start": 12021,
"end": 13250
}
|
class ____(_BaseDataclass):
"""
Definition for function tools (currently the only supported type of tool).
Args:
name (str): The name of the tool.
description (str): A description of what the tool does, and how it should be used.
**Optional**, defaults to ``None``
parameters: A mapping of parameter names to their
definitions. If not provided, this defines a function without parameters.
**Optional**, defaults to ``None``
strict (bool): A flag that represents whether or not the model should
strictly follow the schema provided.
"""
name: str
description: str | None = None
parameters: ToolParamsSchema | None = None
strict: bool = False
def __post_init__(self):
self._validate_field("name", str, True)
self._validate_field("description", str, False)
self._convert_dataclass("parameters", ToolParamsSchema, False)
self._validate_field("strict", bool, True)
def to_tool_definition(self):
"""
Convenience function for wrapping this in a ToolDefinition
"""
return ToolDefinition(type="function", function=self)
@dataclass
|
FunctionToolDefinition
|
python
|
coleifer__peewee
|
examples/graph.py
|
{
"start": 116,
"end": 575
}
|
class ____(Base):
name = TextField(primary_key=True)
def outgoing(self):
return (Node
.select(Node, Edge.weight)
.join(Edge, on=Edge.dest)
.where(Edge.src == self)
.objects())
def incoming(self):
return (Node
.select(Node, Edge.weight)
.join(Edge, on=Edge.src)
.where(Edge.dest == self)
.objects())
|
Node
|
python
|
graphql-python__graphene
|
graphene/utils/tests/test_dataloader.py
|
{
"start": 804,
"end": 1115
}
|
class ____(ObjectType):
skywalker_family = List(CharacterType)
async def resolve_skywalker_family(_, info):
return await info.context.character_loader.load_many(["1", "2", "3"])
mock_batch_load_fn = Mock(
side_effect=lambda character_ids: [get_character(id) for id in character_ids]
)
|
Query
|
python
|
huggingface__transformers
|
src/transformers/models/gemma2/modeling_gemma2.py
|
{
"start": 13924,
"end": 16082
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: Gemma2Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.config = config
self.attention_type = config.layer_types[layer_idx]
self.self_attn = Gemma2Attention(config=config, layer_idx=layer_idx)
self.mlp = Gemma2MLP(config)
self.input_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.pre_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.pre_feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
|
Gemma2DecoderLayer
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/index/collector.py
|
{
"start": 8192,
"end": 8817
}
|
class ____:
"""Represents one response (or page), along with its URL.
:param encoding: the encoding to decode the given content.
:param url: the URL from which the HTML was downloaded.
:param cache_link_parsing: whether links parsed from this page's url
should be cached. PyPI index urls should
have this set to False, for example.
"""
content: bytes
content_type: str
encoding: Optional[str]
url: str
cache_link_parsing: bool = True
def __str__(self) -> str:
return redact_auth_from_url(self.url)
|
IndexContent
|
python
|
openai__openai-python
|
src/openai/types/evals/run_cancel_response.py
|
{
"start": 6108,
"end": 6798
}
|
class ____(BaseModel):
content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent
"""Inputs to the model - can contain template strings."""
role: Literal["user", "assistant", "system", "developer"]
"""The role of the message input.
One of `user`, `assistant`, `system`, or `developer`.
"""
type: Optional[Literal["message"]] = None
"""The type of the message input. Always `message`."""
DataSourceResponsesInputMessagesTemplateTemplate: TypeAlias = Union[
DataSourceResponsesInputMessagesTemplateTemplateChatMessage,
DataSourceResponsesInputMessagesTemplateTemplateEvalItem,
]
|
DataSourceResponsesInputMessagesTemplateTemplateEvalItem
|
python
|
django-extensions__django-extensions
|
django_extensions/db/fields/__init__.py
|
{
"start": 800,
"end": 3122
}
|
class ____:
def check_is_bool(self, attrname):
if not isinstance(getattr(self, attrname), bool):
raise ValueError("'{}' argument must be True or False".format(attrname))
@staticmethod
def _get_fields(model_cls):
return [
(f, f.model if f.model != model_cls else None)
for f in model_cls._meta.get_fields()
if not f.is_relation or f.one_to_one or (f.many_to_one and f.related_model)
]
def get_queryset(self, model_cls, slug_field):
for field, model in self._get_fields(model_cls):
if model and field == slug_field:
return model._default_manager.all()
return model_cls._default_manager.all()
def find_unique(self, model_instance, field, iterator, *args):
# exclude the current model instance from the queryset used in finding
# next valid hash
queryset = self.get_queryset(model_instance.__class__, field)
if model_instance.pk:
queryset = queryset.exclude(pk=model_instance.pk)
# form a kwarg dict used to implement any unique_together constraints
kwargs = {}
for params in model_instance._meta.unique_together:
if self.attname in params:
for param in params:
kwargs[param] = getattr(model_instance, param, None)
# for support django 2.2+
query = Q()
constraints = getattr(model_instance._meta, "constraints", None)
if constraints:
unique_constraints = filter(
lambda c: isinstance(c, UniqueConstraint), constraints
)
for unique_constraint in unique_constraints:
if self.attname in unique_constraint.fields:
condition = {
field: getattr(model_instance, field, None)
for field in unique_constraint.fields
if field != self.attname
}
query &= Q(**condition)
new = next(iterator)
kwargs[self.attname] = new
while not new or queryset.filter(query, **kwargs):
new = next(iterator)
kwargs[self.attname] = new
setattr(model_instance, self.attname, new)
return new
|
UniqueFieldMixin
|
python
|
sqlalchemy__sqlalchemy
|
test/typing/plain_files/orm/mapped_covariant.py
|
{
"start": 853,
"end": 1254
}
|
class ____(Protocol):
# Read-only for simplicity, mutable protocol members are complicated,
# see https://mypy.readthedocs.io/en/latest/common_issues.html#covariant-subtyping-of-mutable-protocol-members-is-rejected
@property
def parent(self) -> Mapped[ParentProtocol]: ...
def get_parent_name(child: ChildProtocol) -> str:
return child.parent.name
# Implementations
|
ChildProtocol
|
python
|
redis__redis-py
|
tests/test_asyncio/test_search.py
|
{
"start": 52604,
"end": 53731
}
|
class ____(AsyncSearchTestsBase):
@pytest.mark.redismod
@pytest.mark.onlynoncluster
@skip_ifmodversion_lt("2.2.0", "search")
@skip_if_server_version_gte("7.9.0")
async def test_config(self, decoded_r: redis.Redis):
assert await decoded_r.ft().config_set("TIMEOUT", "100")
with pytest.raises(redis.ResponseError):
await decoded_r.ft().config_set("TIMEOUT", "null")
res = await decoded_r.ft().config_get("*")
assert "100" == res["TIMEOUT"]
res = await decoded_r.ft().config_get("TIMEOUT")
assert "100" == res["TIMEOUT"]
@pytest.mark.redismod
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("7.9.0")
async def test_config_with_removed_ftconfig(self, decoded_r: redis.Redis):
assert await decoded_r.config_set("timeout", "100")
with pytest.raises(redis.ResponseError):
await decoded_r.config_set("timeout", "null")
res = await decoded_r.config_get("*")
assert "100" == res["timeout"]
res = await decoded_r.config_get("timeout")
assert "100" == res["timeout"]
|
TestConfig
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/yaxis/_autorangeoptions.py
|
{
"start": 235,
"end": 5864
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.yaxis"
_path_str = "layout.yaxis.autorangeoptions"
_valid_props = {
"clipmax",
"clipmin",
"include",
"includesrc",
"maxallowed",
"minallowed",
}
@property
def clipmax(self):
"""
Clip autorange maximum if it goes beyond this value. Has no
effect when `autorangeoptions.maxallowed` is provided.
The 'clipmax' property accepts values of any type
Returns
-------
Any
"""
return self["clipmax"]
@clipmax.setter
def clipmax(self, val):
self["clipmax"] = val
@property
def clipmin(self):
"""
Clip autorange minimum if it goes beyond this value. Has no
effect when `autorangeoptions.minallowed` is provided.
The 'clipmin' property accepts values of any type
Returns
-------
Any
"""
return self["clipmin"]
@clipmin.setter
def clipmin(self, val):
self["clipmin"] = val
@property
def include(self):
"""
Ensure this value is included in autorange.
The 'include' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["include"]
@include.setter
def include(self, val):
self["include"] = val
@property
def includesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `include`.
The 'includesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["includesrc"]
@includesrc.setter
def includesrc(self, val):
self["includesrc"] = val
@property
def maxallowed(self):
"""
Use this value exactly as autorange maximum.
The 'maxallowed' property accepts values of any type
Returns
-------
Any
"""
return self["maxallowed"]
@maxallowed.setter
def maxallowed(self, val):
self["maxallowed"] = val
@property
def minallowed(self):
"""
Use this value exactly as autorange minimum.
The 'minallowed' property accepts values of any type
Returns
-------
Any
"""
return self["minallowed"]
@minallowed.setter
def minallowed(self, val):
self["minallowed"] = val
@property
def _prop_descriptions(self):
return """\
clipmax
Clip autorange maximum if it goes beyond this value.
Has no effect when `autorangeoptions.maxallowed` is
provided.
clipmin
Clip autorange minimum if it goes beyond this value.
Has no effect when `autorangeoptions.minallowed` is
provided.
include
Ensure this value is included in autorange.
includesrc
Sets the source reference on Chart Studio Cloud for
`include`.
maxallowed
Use this value exactly as autorange maximum.
minallowed
Use this value exactly as autorange minimum.
"""
def __init__(
self,
arg=None,
clipmax=None,
clipmin=None,
include=None,
includesrc=None,
maxallowed=None,
minallowed=None,
**kwargs,
):
"""
Construct a new Autorangeoptions object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.yaxis.A
utorangeoptions`
clipmax
Clip autorange maximum if it goes beyond this value.
Has no effect when `autorangeoptions.maxallowed` is
provided.
clipmin
Clip autorange minimum if it goes beyond this value.
Has no effect when `autorangeoptions.minallowed` is
provided.
include
Ensure this value is included in autorange.
includesrc
Sets the source reference on Chart Studio Cloud for
`include`.
maxallowed
Use this value exactly as autorange maximum.
minallowed
Use this value exactly as autorange minimum.
Returns
-------
Autorangeoptions
"""
super().__init__("autorangeoptions")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.yaxis.Autorangeoptions
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.yaxis.Autorangeoptions`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("clipmax", arg, clipmax)
self._set_property("clipmin", arg, clipmin)
self._set_property("include", arg, include)
self._set_property("includesrc", arg, includesrc)
self._set_property("maxallowed", arg, maxallowed)
self._set_property("minallowed", arg, minallowed)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Autorangeoptions
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/loop19.py
|
{
"start": 95,
"end": 303
}
|
class ____:
yyy: int
def method1(self, results: list[Results]):
abc = None
for result in results:
if abc is not None and abc.zzz < result.zzz:
abc = result
|
Foo
|
python
|
pytorch__pytorch
|
torch/_inductor/fx_passes/misc_patterns.py
|
{
"start": 2764,
"end": 5148
}
|
class ____:
numpy_compat: dict[str, tuple[str, ...]] = {
"dim": ("axis",),
"keepdim": ("keepdims",),
"input": ("x", "a", "x1"),
"other": ("x2",),
}
inverse_mapping: dict[str, str]
cache: dict["torch.fx.graph.Target", OrderedSet[str]]
def __init__(self) -> None:
self.cache = {} # callable -> tuple of replaceable args e.g. ["axis"]
self.inverse_mapping = {}
for actual_kwarg, numpy_kwargs in self.numpy_compat.items():
for numpy_kwarg in numpy_kwargs:
assert numpy_kwarg not in self.inverse_mapping
self.inverse_mapping[numpy_kwarg] = actual_kwarg
def __call__(self, graph: torch.fx.Graph):
for node in graph.nodes:
if node.op != "call_function":
continue
if isinstance(node.target, (OpOverload, OpOverloadPacket)):
# only applies to torch ops; e.g. torch.stack(axis=1) works, torch.ops.aten.stack(axis=1) doesn't.
continue
kwargs = node.kwargs
if node.target in self.cache:
replaceable_kwargs = self.cache[node.target]
else:
signatures = torch.fx.operator_schemas.get_signature_for_torch_op(
node.target
)
signatures = () if signatures is None else signatures
replaceable_kwargs = OrderedSet()
for sig in signatures:
for param_name in sig.parameters:
if param_name in self.numpy_compat:
replaceable_kwargs.update(self.numpy_compat[param_name])
self.cache[node.target] = replaceable_kwargs
if not replaceable_kwargs:
continue
new_kwargs = {}
kwargs_changed = False
for k, v in kwargs.items():
if k in replaceable_kwargs:
kwargs_changed = True
new_kwargs[self.inverse_mapping[k]] = v
else:
new_kwargs[k] = v
if kwargs_changed:
node.kwargs = torch.fx.immutable_collections.immutable_dict(new_kwargs)
counters["inductor"]["numpy_compat_normalization"] += 1
numpy_compat_normalization = NumpyCompatNormalization()
|
NumpyCompatNormalization
|
python
|
huggingface__transformers
|
src/transformers/models/smolvlm/modeling_smolvlm.py
|
{
"start": 6721,
"end": 9180
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
# Ignore copy
self.is_causal = False
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
queries,
keys,
values,
attention_mask,
is_causal=self.is_causal,
scaling=self.scale,
dropout=0.0 if not self.training else self.dropout,
)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
|
SmolVLMVisionAttention
|
python
|
PrefectHQ__prefect
|
src/prefect/server/api/clients.py
|
{
"start": 903,
"end": 2388
}
|
class ____:
_http_client: PrefectHttpxAsyncClient
def __init__(self, additional_headers: dict[str, str] | None = None):
from prefect.server.api.server import create_app
additional_headers = additional_headers or {}
# create_app caches application instances, and invoking it with no arguments
# will point it to the the currently running server instance
api_app = create_app()
settings = get_current_settings()
# we pull the auth string from _server_ settings because this client is run on the server
if auth_string_secret := settings.server.api.auth_string:
if auth_string := auth_string_secret.get_secret_value():
token = base64.b64encode(auth_string.encode("utf-8")).decode("utf-8")
additional_headers.setdefault("Authorization", f"Basic {token}")
self._http_client = PrefectHttpxAsyncClient(
transport=httpx.ASGITransport(app=api_app, raise_app_exceptions=False),
headers={**additional_headers},
base_url=f"http://prefect-in-memory{settings.server.api.base_path or '/api'}",
enable_csrf_support=settings.server.api.csrf_protection_enabled,
raise_on_all_errors=False,
)
async def __aenter__(self) -> Self:
await self._http_client.__aenter__()
return self
async def __aexit__(self, *args: Any) -> None:
await self._http_client.__aexit__(*args)
|
BaseClient
|
python
|
huggingface__transformers
|
src/transformers/models/convbert/modeling_convbert.py
|
{
"start": 21600,
"end": 26659
}
|
class ____(nn.Module):
r"""
Compute a single vector summary of a sequence hidden states.
Args:
config ([`ConvBertConfig`]):
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
config class of your model for the default values it uses):
- **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:
- `"last"` -- Take the last token hidden state (like XLNet)
- `"first"` -- Take the first token hidden state (like Bert)
- `"mean"` -- Take the mean of all tokens hidden states
- `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- `"attn"` -- Not implemented now, use multi-head attention
- **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.
- **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes
(otherwise to `config.hidden_size`).
- **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output,
another string or `None` will add no activation.
- **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
- **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
"""
def __init__(self, config: ConvBertConfig):
super().__init__()
self.summary_type = getattr(config, "summary_type", "last")
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = nn.Identity()
if hasattr(config, "summary_use_proj") and config.summary_use_proj:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
activation_string = getattr(config, "summary_activation", None)
self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity()
self.first_dropout = nn.Identity()
if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = nn.Identity()
if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(
self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
) -> torch.FloatTensor:
"""
Compute a single vector summary of a sequence hidden states.
Args:
hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`):
The hidden states of the last layer.
cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):
Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token.
Returns:
`torch.FloatTensor`: The summary of the sequence hidden states.
"""
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = hidden_states.mean(dim=1)
elif self.summary_type == "cls_index":
if cls_index is None:
cls_index = torch.full_like(
hidden_states[..., :1, :],
hidden_states.shape[-2] - 1,
dtype=torch.long,
)
else:
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
@auto_docstring
|
ConvBertSequenceSummary
|
python
|
pytorch__pytorch
|
torch/utils/benchmark/utils/compare.py
|
{
"start": 10712,
"end": 13473
}
|
class ____:
"""Helper class for displaying the results of many measurements in a
formatted table.
The table format is based on the information fields provided in
:class:`torch.utils.benchmark.Timer` (`description`, `label`, `sub_label`,
`num_threads`, etc).
The table can be directly printed using :meth:`print` or casted as a `str`.
For a full tutorial on how to use this class, see:
https://pytorch.org/tutorials/recipes/recipes/benchmark.html
Args:
results: List of Measurement to display.
"""
def __init__(self, results: list[common.Measurement]) -> None:
self._results: list[common.Measurement] = []
self.extend_results(results)
self._trim_significant_figures = False
self._colorize = Colorize.NONE
self._highlight_warnings = False
def __str__(self) -> str:
return "\n".join(self._render())
def extend_results(self, results) -> None:
"""Append results to already stored ones.
All added results must be instances of ``Measurement``.
"""
for r in results:
if not isinstance(r, common.Measurement):
raise ValueError(
"Expected an instance of `Measurement`, " f"got {type(r)} instead."
)
self._results.extend(results)
def trim_significant_figures(self) -> None:
"""Enables trimming of significant figures when building the formatted table."""
self._trim_significant_figures = True
def colorize(self, rowwise=False) -> None:
"""Colorize formatted table.
Colorize columnwise by default.
"""
self._colorize = Colorize.ROWWISE if rowwise else Colorize.COLUMNWISE
def highlight_warnings(self) -> None:
"""Enables warning highlighting when building formatted table."""
self._highlight_warnings = True
def print(self) -> None:
"""Print formatted table"""
print(str(self))
def _render(self):
results = common.Measurement.merge(self._results)
grouped_results = self._group_by_label(results)
output = [self._layout(group) for group in grouped_results.values()]
return output
def _group_by_label(self, results: list[common.Measurement]):
grouped_results: collections.defaultdict[str, list[common.Measurement]] = collections.defaultdict(list)
for r in results:
grouped_results[r.label].append(r)
return grouped_results
def _layout(self, results: list[common.Measurement]):
table = Table(
results,
self._colorize,
self._trim_significant_figures,
self._highlight_warnings
)
return table.render()
|
Compare
|
python
|
doocs__leetcode
|
lcof/面试题59 - II. 队列的最大值/Solution.py
|
{
"start": 0,
"end": 707
}
|
class ____:
def __init__(self):
self.q1 = deque()
self.q2 = deque()
def max_value(self) -> int:
return -1 if not self.q2 else self.q2[0]
def push_back(self, value: int) -> None:
while self.q2 and self.q2[-1] < value:
self.q2.pop()
self.q1.append(value)
self.q2.append(value)
def pop_front(self) -> int:
if not self.q1:
return -1
ans = self.q1.popleft()
if self.q2[0] == ans:
self.q2.popleft()
return ans
# Your MaxQueue object will be instantiated and called as such:
# obj = MaxQueue()
# param_1 = obj.max_value()
# obj.push_back(value)
# param_3 = obj.pop_front()
|
MaxQueue
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/core_api/datamodels/ui/common.py
|
{
"start": 2437,
"end": 2574
}
|
class ____(BaseModel, Generic[E, N]):
"""Base Graph serializer for responses."""
edges: list[E]
nodes: list[N]
|
BaseGraphResponse
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_transaction.py
|
{
"start": 76867,
"end": 78335
}
|
class ____(
CtxManagerJoinIntoAnExternalTransactionFixture, fixtures.MappedTest
):
"""2.0 only recipe for "join into an external transaction" that works
without event handlers
"""
def setup_session(self):
self.trans = self.connection.begin()
if (
self.join_mode.conditional_w_savepoint
or self.join_mode.create_savepoint_w_savepoint
):
self.nested = self.connection.begin_nested()
class A:
pass
clear_mappers()
self.mapper_registry.map_imperatively(A, self.table)
self.A = A
self.session = Session(
self.connection,
join_transaction_mode=(
"create_savepoint"
if (
self.join_mode.create_savepoint
or self.join_mode.create_savepoint_w_savepoint
)
else "conditional_savepoint"
),
)
def teardown_session(self):
self.session.close()
if (
self.join_mode.conditional_w_savepoint
or self.join_mode.create_savepoint_w_savepoint
):
assert not self.nested._deactivated_from_connection
assert self.nested.is_active
self.nested.rollback()
assert not self.trans._deactivated_from_connection
assert self.trans.is_active
self.trans.rollback()
|
ReallyNewJoinIntoAnExternalTransactionTest
|
python
|
encode__django-rest-framework
|
tests/test_fields.py
|
{
"start": 57387,
"end": 57673
}
|
class ____(FieldValues):
"""
Values for `TimeField` with a no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.time(13, 00): datetime.time(13, 00)
}
field = serializers.TimeField(format=None)
|
TestNoOutputFormatTimeField
|
python
|
joke2k__faker
|
tests/providers/test_phone_number.py
|
{
"start": 15030,
"end": 15500
}
|
class ____:
"""Test hu_HU phone number provider methods"""
def test_phone_number(self, faker, num_samples):
pattern: Pattern = re.compile(
r"(?:" r"\+36 \d{2} |" r"\(06\)\d{2}/|" r"\(\d{2}\)/|" r"\d{2}/|" r"06-\d{1,2}/" r")\d{3}[- ]\d{4}",
)
for _ in range(num_samples):
phone_number = faker.phone_number()
assert isinstance(phone_number, str)
assert pattern.fullmatch(phone_number)
|
TestHuHu
|
python
|
pyqtgraph__pyqtgraph
|
tests/test_stability.py
|
{
"start": 1917,
"end": 3340
}
|
class ____(pg.QtCore.QThread):
'''Intended to give the gc an opportunity to run from a non-gui thread.'''
def run(self):
i = 0
while True:
i += 1
if (i % 1000000) == 0:
print('--worker--')
def randItem(items):
return items[randint(0, len(items)-1)]
def p(msg):
print(msg)
sys.stdout.flush()
def createWidget():
p('create widget')
global widgets, allWidgets
if len(widgets) > 50:
return None
widget = randItem(widgetTypes)()
widget.setWindowTitle(widget.__class__.__name__)
widgets.append(widget)
allWidgets[widget] = 1
p(" %s" % widget)
return widget
def setParent():
p('set parent')
global widgets
if len(widgets) < 2:
return
child = parent = None
while child is parent:
child = randItem(widgets)
parent = randItem(widgets)
p(" %s parent of %s" % (parent, child))
child.setParent(parent)
def forgetWidget():
p('forget widget')
global widgets
if len(widgets) < 1:
return
widget = randItem(widgets)
p(' %s' % widget)
widgets.remove(widget)
def showWidget():
p('show widget')
global widgets
if len(widgets) < 1:
return
widget = randItem(widgets)
p(' %s' % widget)
widget.show()
def processEvents():
p('process events')
QtTest.QTest.qWait(25)
|
WorkThread
|
python
|
getsentry__sentry
|
tests/sentry/integrations/bitbucket_server/test_webhook.py
|
{
"start": 2799,
"end": 2978
}
|
class ____(WebhookTestBase):
def test_get_request_fails(self) -> None:
self.get_error_response(self.organization.id, self.integration.id, status_code=405)
|
WebhookGetTest
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/job_base.py
|
{
"start": 1830,
"end": 3098
}
|
class ____(IJob):
def __init__(
self,
job_def: "JobDefinition",
):
self._job_def = job_def
def get_definition(self) -> "JobDefinition":
return self._job_def
def get_repository_definition(self) -> Optional["RepositoryDefinition"]:
return None
def get_subset(
self,
*,
op_selection: Optional[Iterable[str]] = None,
asset_selection: Optional[AbstractSet[AssetKey]] = None,
asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,
) -> "InMemoryJob":
op_selection = set(op_selection) if op_selection else None
return InMemoryJob(
self._job_def.get_subset(
op_selection=op_selection,
asset_selection=asset_selection,
asset_check_selection=asset_check_selection,
)
)
@property
def op_selection(self) -> Optional[AbstractSet[str]]:
return self._job_def.op_selection
@property
def asset_selection(self) -> Optional[AbstractSet[AssetKey]]:
return self._job_def.asset_selection
@property
def asset_check_selection(self) -> Optional[AbstractSet[AssetCheckKey]]:
return self._job_def.asset_check_selection
|
InMemoryJob
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.