language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
RaRe-Technologies__gensim
|
gensim/test/test_fasttext.py
|
{
"start": 73000,
"end": 74089
}
|
class ____(unittest.TestCase):
def test_sanity(self):
m = np.array(range(9))
m.shape = (3, 3)
hash2index = {10: 0, 11: 1, 12: 2}
n = _unpack(m, 25, hash2index)
self.assertTrue(np.all(np.array([0, 1, 2]) == n[10]))
self.assertTrue(np.all(np.array([3, 4, 5]) == n[11]))
self.assertTrue(np.all(np.array([6, 7, 8]) == n[12]))
def test_tricky(self):
m = np.array(range(9))
m.shape = (3, 3)
hash2index = {1: 0, 0: 1, 12: 2}
n = _unpack(m, 25, hash2index)
self.assertTrue(np.all(np.array([3, 4, 5]) == n[0]))
self.assertTrue(np.all(np.array([0, 1, 2]) == n[1]))
self.assertTrue(np.all(np.array([6, 7, 8]) == n[12]))
def test_identity(self):
m = np.array(range(9))
m.shape = (3, 3)
hash2index = {0: 0, 1: 1, 2: 2}
n = _unpack(m, 25, hash2index)
self.assertTrue(np.all(np.array([0, 1, 2]) == n[0]))
self.assertTrue(np.all(np.array([3, 4, 5]) == n[1]))
self.assertTrue(np.all(np.array([6, 7, 8]) == n[2]))
|
UnpackTest
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/triggers/emr.py
|
{
"start": 9648,
"end": 10966
}
|
class ____(AwsBaseWaiterTrigger):
"""
Poll an Emr Serverless application and wait for it to be started.
:param application_id: The ID of the application being polled.
:waiter_delay: polling period in seconds to check for the status
:param waiter_max_attempts: The maximum number of attempts to be made
:param aws_conn_id: Reference to AWS connection id
"""
def __init__(
self,
application_id: str,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
aws_conn_id: str | None = "aws_default",
) -> None:
super().__init__(
serialized_fields={"application_id": application_id},
waiter_name="serverless_app_started",
waiter_args={"applicationId": application_id},
failure_message="Application failed to start",
status_message="Application status is",
status_queries=["application.state", "application.stateDetails"],
return_key="application_id",
return_value=application_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return EmrServerlessHook(self.aws_conn_id)
|
EmrServerlessStartApplicationTrigger
|
python
|
readthedocs__readthedocs.org
|
readthedocs/api/v3/filters.py
|
{
"start": 1396,
"end": 1804
}
|
class ____(filters.FilterSet):
running = filters.BooleanFilter(method="get_running")
class Meta:
model = Build
fields = [
"commit",
"running",
]
def get_running(self, queryset, name, value):
if value:
return queryset.exclude(state__in=BUILD_FINAL_STATES)
return queryset.filter(state__in=BUILD_FINAL_STATES)
|
BuildFilter
|
python
|
pallets__werkzeug
|
tests/test_debug.py
|
{
"start": 6863,
"end": 11346
}
|
class ____:
def test_object_dumping(self):
drg = DebugReprGenerator()
out = drg.dump_object(Foo())
assert re.search("Details for test_debug.Foo object at", out)
assert re.search('<th>x.*<span class="number">42</span>', out, flags=re.DOTALL)
assert re.search('<th>y.*<span class="number">23</span>', out, flags=re.DOTALL)
assert re.search('<th>z.*<span class="number">15</span>', out, flags=re.DOTALL)
out = drg.dump_object({"x": 42, "y": 23})
assert re.search("Contents of", out)
assert re.search('<th>x.*<span class="number">42</span>', out, flags=re.DOTALL)
assert re.search('<th>y.*<span class="number">23</span>', out, flags=re.DOTALL)
out = drg.dump_object({"x": 42, "y": 23, 23: 11})
assert not re.search("Contents of", out)
out = drg.dump_locals({"x": 42, "y": 23})
assert re.search("Local variables in frame", out)
assert re.search('<th>x.*<span class="number">42</span>', out, flags=re.DOTALL)
assert re.search('<th>y.*<span class="number">23</span>', out, flags=re.DOTALL)
def test_debug_dump(self):
old = sys.stdout
sys.stdout = HTMLStringO()
try:
dump([1, 2, 3])
x = sys.stdout.reset()
dump()
y = sys.stdout.reset()
finally:
sys.stdout = old
assert "Details for list object at" in x
assert '<span class="number">1</span>' in x
assert "Local variables in frame" in y
assert "<th>x" in y
assert "<th>old" in y
def test_debug_help(self):
old = sys.stdout
sys.stdout = HTMLStringO()
try:
helper([1, 2, 3])
x = sys.stdout.reset()
finally:
sys.stdout = old
assert "Help on list object" in x
assert "__delitem__" in x
def test_exc_divider_found_on_chained_exception(self):
@Request.application
def app(request):
def do_something():
raise ValueError("inner")
try:
do_something()
except ValueError:
raise KeyError("outer") # noqa: B904
debugged = DebuggedApplication(app)
client = Client(debugged)
response = client.get("/")
data = response.get_data(as_text=True)
assert 'raise ValueError("inner")' in data
assert '<div class="exc-divider">' in data
assert 'raise KeyError("outer")' in data
def test_get_machine_id():
rv = get_machine_id()
assert isinstance(rv, bytes)
@pytest.mark.parametrize("crash", (True, False))
@pytest.mark.dev_server
def test_basic(dev_server, crash):
c = dev_server(use_debugger=True)
r = c.request("/crash" if crash else "")
assert r.status == (500 if crash else 200)
if crash:
assert b"The debugger caught an exception in your WSGI application" in r.data
else:
assert r.json["PATH_INFO"] == "/"
def test_console_closure_variables(monkeypatch):
# restore the original display hook
monkeypatch.setattr(sys, "displayhook", console._displayhook)
c = console.Console()
c.eval("y = 5")
c.eval("x = lambda: y")
ret = c.eval("x()")
assert ret == ">>> x()\n5\n"
@pytest.mark.timeout(2)
def test_chained_exception_cycle():
try:
try:
raise ValueError()
except ValueError:
raise TypeError() # noqa: B904
except TypeError as e:
# create a cycle and make it available outside the except block
e.__context__.__context__ = error = e
# if cycles aren't broken, this will time out
tb = DebugTraceback(error)
assert len(tb.all_tracebacks) == 2
def test_exception_without_traceback():
try:
raise Exception("msg1")
except Exception as e:
# filter_hidden_frames should skip this since it has no traceback
e.__context__ = Exception("msg2")
DebugTraceback(e)
@mock.patch.object(linecache, "getlines", autospec=True)
def test_missing_source_lines(mock_getlines: mock.Mock) -> None:
"""Rendering doesn't fail when the line number is beyond the available
source lines.
"""
mock_getlines.return_value = ["truncated"]
try:
raise ValueError()
except ValueError as e:
tb = DebugTraceback(e)
html = tb.render_traceback_html()
assert "test_debug.py" in html
assert "truncated" not in html
|
TestDebugHelpers
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/typed_dict.py
|
{
"start": 1595,
"end": 2000
}
|
class ____(TypedDict):
genuine: int
nested: SanitizedFieldTypedDict
def test_sanitize_field():
d: NestedTypedDict = _test_source()
_test_sink(d["genuine"])
d: NestedTypedDict = _test_source()
# TODO(T81192268): this should not trigger an issue.
_test_sink(d["nested"]["sanitized"])
bar: NestedTypedDict = _test_source()
_test_sink(bar["nested"]["safe"])
|
NestedTypedDict
|
python
|
wandb__wandb
|
wandb/sdk/wandb_run.py
|
{
"start": 14844,
"end": 147235
}
|
class ____:
"""A unit of computation logged by W&B. Typically, this is an ML experiment.
Call [`wandb.init()`](https://docs.wandb.ai/ref/python/init/) to create a
new run. `wandb.init()` starts a new run and returns a `wandb.Run` object.
Each run is associated with a unique ID (run ID). W&B recommends using
a context (`with` statement) manager to automatically finish the run.
For distributed training experiments, you can either track each process
separately using one run per process or track all processes to a single run.
See [Log distributed training experiments](https://docs.wandb.ai/guides/track/log/distributed-training)
for more information.
You can log data to a run with `wandb.Run.log()`. Anything you log using
`wandb.Run.log()` is sent to that run. See
[Create an experiment](https://docs.wandb.ai/guides/track/create-an-experiment/) or
[`wandb.init`](https://docs.wandb.ai/ref/python/init/) API reference page
or more information.
There is a another `Run` object in the
[`wandb.apis.public`](https://docs.wandb.ai/ref/python/public-api/api/)
namespace. Use this object is to interact with runs that have already been
created.
Attributes:
summary: (Summary) A summary of the run, which is a dictionary-like
object. For more information, see
[Log summary metrics](https://docs.wandb.ai/guides/track/log/log-summary/).
Examples:
Create a run with `wandb.init()`:
```python
import wandb
# Start a new run and log some data
# Use context manager (`with` statement) to automatically finish the run
with wandb.init(entity="entity", project="project") as run:
run.log({"accuracy": acc, "loss": loss})
```
<!-- lazydoc-ignore-init: internal -->
"""
_telemetry_obj: telemetry.TelemetryRecord
_telemetry_obj_active: bool
_telemetry_obj_dirty: bool
_telemetry_obj_flushed: bytes
_teardown_hooks: list[TeardownHook]
_backend: wandb.sdk.backend.backend.Backend | None
_internal_run_interface: wandb.sdk.interface.interface_queue.InterfaceQueue | None
_wl: _WandbSetup | None
_out_redir: redirect.RedirectBase | None
_err_redir: redirect.RedirectBase | None
_redirect_cb: Callable[[str, str], None] | None
_redirect_raw_cb: Callable[[str, str], None] | None
_output_writer: filesystem.CRDedupedFile | None
_atexit_cleanup_called: bool
_hooks: ExitHooks | None
_exit_code: int | None
_run_status_checker: RunStatusChecker | None
_sampled_history: SampledHistoryResponse | None
_final_summary: GetSummaryResponse | None
_poll_exit_handle: MailboxHandle[Result] | None
_poll_exit_response: PollExitResponse | None
_internal_messages_response: InternalMessagesResponse | None
_stdout_slave_fd: int | None
_stderr_slave_fd: int | None
_artifact_slots: list[str]
_init_pid: int
_attach_pid: int
_attach_id: str | None
_is_attached: bool
_is_finished: bool
_settings: Settings
_forked: bool
_launch_artifacts: dict[str, Any] | None
_printer: printer.Printer
summary: wandb_summary.Summary
def __init__(
self,
settings: Settings,
config: dict[str, Any] | None = None,
sweep_config: dict[str, Any] | None = None,
launch_config: dict[str, Any] | None = None,
) -> None:
# pid is set, so we know if this run object was initialized by this process
self._init_pid = os.getpid()
self._attach_id = None
if settings._noop:
# TODO: properly handle setting for disabled mode
self._settings = settings
return
self._init(
settings=settings,
config=config,
sweep_config=sweep_config,
launch_config=launch_config,
)
def _init(
self,
settings: Settings,
config: dict[str, Any] | None = None,
sweep_config: dict[str, Any] | None = None,
launch_config: dict[str, Any] | None = None,
) -> None:
self._settings = settings
self._config = wandb_config.Config()
self._config._set_callback(self._config_callback)
self._config._set_artifact_callback(self._config_artifact_callback)
self._config._set_settings(self._settings)
# The _wandb key is always expected on the run config.
wandb_key = "_wandb"
self._config._update({wandb_key: dict()})
# TODO: perhaps this should be a property that is a noop on a finished run
self.summary = wandb_summary.Summary(
self._summary_get_current_summary_callback,
)
self.summary._set_update_callback(self._summary_update_callback)
self._step = 0
self._starting_step = 0
self._start_runtime = 0
# TODO: eventually would be nice to make this configurable using self._settings._start_time
# need to test (jhr): if you set start time to 2 days ago and run a test for 15 minutes,
# does the total time get calculated right (not as 2 days and 15 minutes)?
self._start_time = time.time()
self._printer = printer.new_printer(settings)
self._torch_history: wandb_torch.TorchHistory | None = None # type: ignore
self._backend = None
self._internal_run_interface = None
self._wl = None
# Avoid calling wandb.Api() repeatedly in _public_api()
self._cached_public_api: PublicApi | None = None
self._hooks = None
self._teardown_hooks = []
self._output_writer = None
self._out_redir = None
self._err_redir = None
self._stdout_slave_fd = None
self._stderr_slave_fd = None
self._exit_code = None
self._exit_result = None
self._used_artifact_slots: dict[str, str] = {}
# Created when the run "starts".
self._run_status_checker = None
self._sampled_history = None
self._final_summary = None
self._poll_exit_response = None
self._internal_messages_response = None
self._poll_exit_handle = None
# Initialize telemetry object
self._telemetry_obj = telemetry.TelemetryRecord()
self._telemetry_obj_active = False
self._telemetry_obj_flushed = b""
self._telemetry_obj_dirty = False
self._atexit_cleanup_called = False
# Initial scope setup for sentry.
# This might get updated when the actual run comes back.
get_sentry().configure_scope(
tags=dict(self._settings),
process_context="user",
)
self._launch_artifact_mapping: dict[str, Any] = {}
self._unique_launch_artifact_sequence_names: dict[str, Any] = {}
# Populate config
config = config or dict()
self._config._update(config, allow_val_change=True, ignore_locked=True)
if sweep_config:
self._config.merge_locked(
sweep_config, user="sweep", _allow_val_change=True
)
if launch_config:
self._config.merge_locked(
launch_config, user="launch", _allow_val_change=True
)
# if run is from a launch queue, add queue id to _wandb config
launch_queue_name = wandb.env.get_launch_queue_name()
if launch_queue_name:
self._config[wandb_key]["launch_queue_name"] = launch_queue_name
launch_queue_entity = wandb.env.get_launch_queue_entity()
if launch_queue_entity:
self._config[wandb_key]["launch_queue_entity"] = launch_queue_entity
launch_trace_id = wandb.env.get_launch_trace_id()
if launch_trace_id:
self._config[wandb_key]["launch_trace_id"] = launch_trace_id
self._attach_id = None
self._is_attached = False
self._is_finished = False
self._attach_pid = os.getpid()
self._forked = False
# for now, use runid as attach id, this could/should be versioned in the future
self._attach_id = self._settings.run_id
def _handle_launch_artifact_overrides(self) -> None:
if self._settings.launch and (os.environ.get("WANDB_ARTIFACTS") is not None):
try:
artifacts: dict[str, Any] = json.loads(
os.environ.get("WANDB_ARTIFACTS", "{}")
)
except (ValueError, SyntaxError):
wandb.termwarn("Malformed WANDB_ARTIFACTS, using original artifacts")
else:
self._initialize_launch_artifact_maps(artifacts)
elif (
self._settings.launch
and self._settings.launch_config_path
and os.path.exists(self._settings.launch_config_path)
):
self.save(self._settings.launch_config_path)
with open(self._settings.launch_config_path) as fp:
launch_config = json.loads(fp.read())
if launch_config.get("overrides", {}).get("artifacts") is not None:
artifacts = launch_config.get("overrides").get("artifacts")
self._initialize_launch_artifact_maps(artifacts)
def _initialize_launch_artifact_maps(self, artifacts: dict[str, Any]) -> None:
for key, item in artifacts.items():
self._launch_artifact_mapping[key] = item
artifact_sequence_tuple_or_slot = key.split(":")
if len(artifact_sequence_tuple_or_slot) == 2:
sequence_name = artifact_sequence_tuple_or_slot[0].split("/")[-1]
if self._unique_launch_artifact_sequence_names.get(sequence_name):
self._unique_launch_artifact_sequence_names.pop(sequence_name)
else:
self._unique_launch_artifact_sequence_names[sequence_name] = item
def _telemetry_callback(self, telem_obj: telemetry.TelemetryRecord) -> None:
if not hasattr(self, "_telemetry_obj") or self._is_finished:
return
self._telemetry_obj.MergeFrom(telem_obj)
self._telemetry_obj_dirty = True
self._telemetry_flush()
def _telemetry_flush(self) -> None:
if not hasattr(self, "_telemetry_obj"):
return
if not self._telemetry_obj_active:
return
if not self._telemetry_obj_dirty:
return
if self._backend and self._backend.interface:
serialized = self._telemetry_obj.SerializeToString()
if serialized == self._telemetry_obj_flushed:
return
self._backend.interface._publish_telemetry(self._telemetry_obj)
self._telemetry_obj_flushed = serialized
self._telemetry_obj_dirty = False
def _freeze(self) -> None:
self._frozen = True
def __setattr__(self, attr: str, value: object) -> None:
if getattr(self, "_frozen", None) and not hasattr(self, attr):
raise Exception(f"Attribute {attr} is not supported on Run object.")
super().__setattr__(attr, value)
def __deepcopy__(self, memo: dict[int, Any]) -> Run:
return self
def __getstate__(self) -> Any:
"""Return run state as a custom pickle."""
# We only pickle in service mode
if not self._settings:
return
_attach_id = self._attach_id
if not _attach_id:
return
return dict(
_attach_id=_attach_id,
_init_pid=self._init_pid,
_is_finished=self._is_finished,
)
def __setstate__(self, state: Any) -> None:
"""Set run state from a custom pickle."""
if not state:
return
_attach_id = state.get("_attach_id")
if not _attach_id:
return
if state["_init_pid"] == os.getpid():
raise RuntimeError("attach in the same process is not supported currently")
self.__dict__.update(state)
@property
def _torch(self) -> wandb_torch.TorchHistory: # type: ignore
if self._torch_history is None:
self._torch_history = wandb_torch.TorchHistory() # type: ignore
return self._torch_history
@property
@_log_to_run
@_attach
def settings(self) -> Settings:
"""A frozen copy of run's Settings object."""
return self._settings.model_copy(deep=True)
@property
@_log_to_run
@_attach
def dir(self) -> str:
"""The directory where files associated with the run are saved."""
return self._settings.files_dir
@property
@_log_to_run
@_attach
def config(self) -> wandb_config.Config:
"""Config object associated with this run."""
return self._config
@property
@_log_to_run
@_attach
def config_static(self) -> wandb_config.ConfigStatic:
"""Static config object associated with this run."""
return wandb_config.ConfigStatic(self._config)
@property
@_log_to_run
@_attach
def name(self) -> str | None:
"""Display name of the run.
Display names are not guaranteed to be unique and may be descriptive.
By default, they are randomly generated.
"""
return self._settings.run_name
@name.setter
@_log_to_run
@_raise_if_finished
def name(self, name: str) -> None:
with telemetry.context(run=self) as tel:
tel.feature.set_run_name = True
self._settings.run_name = name
if self._backend and self._backend.interface:
self._backend.interface.publish_run(self)
@property
@_log_to_run
@_attach
def notes(self) -> str | None:
"""Notes associated with the run, if there are any.
Notes can be a multiline string and can also use markdown and latex
equations inside `$$`, like `$x + 3$`.
"""
return self._settings.run_notes
@notes.setter
@_log_to_run
@_raise_if_finished
def notes(self, notes: str) -> None:
self._settings.run_notes = notes
if self._backend and self._backend.interface:
self._backend.interface.publish_run(self)
@property
@_log_to_run
@_attach
def tags(self) -> tuple | None:
"""Tags associated with the run, if there are any."""
return self._settings.run_tags or ()
@tags.setter
@_log_to_run
@_raise_if_finished
def tags(self, tags: Sequence) -> None:
with telemetry.context(run=self) as tel:
tel.feature.set_run_tags = True
try:
self._settings.run_tags = tuple(tags)
except ValueError as e:
# For runtime tag setting, warn instead of crash
# Extract the core error message without the pydantic wrapper
error_msg = str(e)
if "Value error," in error_msg:
# Extract the actual error message after "Value error, "
error_msg = error_msg.split("Value error, ")[1].split(" [type=")[0]
wandb.termwarn(f"Invalid tag detected: {error_msg} Tags not updated.")
return
if self._backend and self._backend.interface:
self._backend.interface.publish_run(self)
@property
@_log_to_run
@_attach
def id(self) -> str:
"""Identifier for this run."""
assert self._settings.run_id is not None
return self._settings.run_id
@property
@_log_to_run
@_attach
def sweep_id(self) -> str | None:
"""Identifier for the sweep associated with the run, if there is one."""
return self._settings.sweep_id
def _get_path(self) -> str:
return "/".join(
e
for e in [
self._settings.entity,
self._settings.project,
self._settings.run_id,
]
if e is not None
)
@property
@_log_to_run
@_attach
def path(self) -> str:
"""Path to the run.
Run paths include entity, project, and run ID, in the format
`entity/project/run_id`.
"""
return self._get_path()
@property
@_log_to_run
@_attach
def start_time(self) -> float:
"""Unix timestamp (in seconds) of when the run started."""
return self._start_time
@property
@_log_to_run
@_attach
def starting_step(self) -> int:
"""The first step of the run.
<!-- lazydoc-ignore: internal -->
"""
return self._starting_step
@property
@_log_to_run
@_attach
def resumed(self) -> bool:
"""True if the run was resumed, False otherwise."""
return self._settings.resumed
@property
@_log_to_run
@_attach
def step(self) -> int:
"""Current value of the step.
This counter is incremented by `wandb.Run.log()`.
<!-- lazydoc-ignore: internal -->
"""
return self._step
@property
@_log_to_run
@_attach
def offline(self) -> bool:
"""True if the run is offline, False otherwise."""
return self._settings._offline
@property
@_log_to_run
@_attach
def disabled(self) -> bool:
"""True if the run is disabled, False otherwise."""
return self._settings._noop
@property
@_log_to_run
@_attach
def group(self) -> str:
"""Returns the name of the group associated with this run.
Grouping runs together allows related experiments to be organized and
visualized collectively in the W&B UI. This is especially useful for
scenarios such as distributed training or cross-validation, where
multiple runs should be viewed and managed as a unified experiment.
In shared mode, where all processes share the same run object,
setting a group is usually unnecessary, since there is only one
run and no grouping is required.
"""
return self._settings.run_group or ""
@property
@_log_to_run
@_attach
def job_type(self) -> str:
"""Name of the job type associated with the run.
View a run's job type in the run's Overview page in the W&B App.
You can use this to categorize runs by their job type, such as
"training", "evaluation", or "inference". This is useful for organizing
and filtering runs in the W&B UI, especially when you have multiple
runs with different job types in the same project. For more
information, see [Organize runs](https://docs.wandb.ai/guides/runs/#organize-runs).
"""
return self._settings.run_job_type or ""
def project_name(self) -> str:
"""This method is deprecated and will be removed in a future release. Use `run.project` instead.
Name of the W&B project associated with the run.
<!-- lazydoc-ignore: internal -->
"""
deprecation.warn_and_record_deprecation(
feature=Deprecated(run__project_name=True),
message=(
"The project_name method is deprecated and will be removed in a"
" future release. Please use `run.project` instead."
),
)
return self.project
@property
@_log_to_run
@_attach
def project(self) -> str:
"""Name of the W&B project associated with the run."""
assert self._settings.project is not None
return self._settings.project
@_log_to_run
def get_project_url(self) -> str | None:
"""This method is deprecated and will be removed in a future release. Use `run.project_url` instead.
URL of the W&B project associated with the run, if there is one.
Offline runs do not have a project URL.
<!-- lazydoc-ignore: internal -->
"""
deprecation.warn_and_record_deprecation(
feature=Deprecated(run__get_project_url=True),
message=(
"The get_project_url method is deprecated and will be removed in a"
" future release. Please use `run.project_url` instead."
),
)
return self.project_url
@property
@_log_to_run
@_attach
def project_url(self) -> str | None:
"""URL of the W&B project associated with the run, if there is one.
Offline runs do not have a project URL.
"""
if self._settings._offline:
wandb.termwarn("URL not available in offline run")
return None
return self._settings.project_url
@_raise_if_finished
@_log_to_run
@_attach
def log_code(
self,
root: str | None = ".",
name: str | None = None,
include_fn: Callable[[str, str], bool]
| Callable[[str], bool] = _is_py_requirements_or_dockerfile,
exclude_fn: Callable[[str, str], bool]
| Callable[[str], bool] = filenames.exclude_wandb_fn,
) -> Artifact | None:
"""Save the current state of your code to a W&B Artifact.
By default, it walks the current directory and logs all files that end with `.py`.
Args:
root: The relative (to `os.getcwd()`) or absolute path to recursively find code from.
name: (str, optional) The name of our code artifact. By default, we'll name
the artifact `source-$PROJECT_ID-$ENTRYPOINT_RELPATH`. There may be scenarios where you want
many runs to share the same artifact. Specifying name allows you to achieve that.
include_fn: A callable that accepts a file path and (optionally) root path and
returns True when it should be included and False otherwise. This
defaults to `lambda path, root: path.endswith(".py")`.
exclude_fn: A callable that accepts a file path and (optionally) root path and
returns `True` when it should be excluded and `False` otherwise. This
defaults to a function that excludes all files within `<root>/.wandb/`
and `<root>/wandb/` directories.
Examples:
Basic usage
```python
import wandb
with wandb.init() as run:
run.log_code()
```
Advanced usage
```python
import wandb
with wandb.init() as run:
run.log_code(
root="../",
include_fn=lambda path: path.endswith(".py") or path.endswith(".ipynb"),
exclude_fn=lambda path, root: os.path.relpath(path, root).startswith(
"cache/"
),
)
```
Returns:
An `Artifact` object if code was logged
"""
from wandb.sdk.artifacts._internal_artifact import InternalArtifact
if name is None:
if self.settings._jupyter:
notebook_name = None
if self.settings.notebook_name:
notebook_name = self.settings.notebook_name
elif self.settings.x_jupyter_path:
if self.settings.x_jupyter_path.startswith("fileId="):
notebook_name = self.settings.x_jupyter_name
else:
notebook_name = self.settings.x_jupyter_path
name_string = f"{self._settings.project}-{notebook_name}"
else:
name_string = (
f"{self._settings.project}-{self._settings.program_relpath}"
)
name = wandb.util.make_artifact_name_safe(f"source-{name_string}")
art = InternalArtifact(name, "code")
files_added = False
if root is not None:
root = os.path.abspath(root)
for file_path in filenames.filtered_dir(root, include_fn, exclude_fn):
files_added = True
save_name = os.path.relpath(file_path, root)
art.add_file(file_path, name=save_name)
# Add any manually staged files such as ipynb notebooks
for dirpath, _, files in os.walk(self._settings._tmp_code_dir):
for fname in files:
file_path = os.path.join(dirpath, fname)
save_name = os.path.relpath(file_path, self._settings._tmp_code_dir)
files_added = True
art.add_file(file_path, name=save_name)
if not files_added:
wandb.termwarn(
"No relevant files were detected in the specified directory. No code will be logged to your run."
)
return None
artifact = self._log_artifact(art)
self._config.update(
{"_wandb": {"code_path": artifact.name}},
allow_val_change=True,
)
return artifact
@_log_to_run
def get_sweep_url(self) -> str | None:
"""This method is deprecated and will be removed in a future release. Use `run.sweep_url` instead.
The URL of the sweep associated with the run, if there is one.
Offline runs do not have a sweep URL.
<!-- lazydoc-ignore: internal -->
"""
deprecation.warn_and_record_deprecation(
feature=Deprecated(run__get_sweep_url=True),
message=(
"The get_sweep_url method is deprecated and will be removed in a"
" future release. Please use `run.sweep_url` instead."
),
)
return self.sweep_url
@property
@_attach
def sweep_url(self) -> str | None:
"""URL of the sweep associated with the run, if there is one.
Offline runs do not have a sweep URL.
"""
if self._settings._offline:
wandb.termwarn("URL not available in offline run")
return None
return self._settings.sweep_url
@_log_to_run
def get_url(self) -> str | None:
"""This method is deprecated and will be removed in a future release. Use `run.url` instead.
URL of the W&B run, if there is one. Offline runs do not have a URL.
<!-- lazydoc-ignore: internal -->
"""
deprecation.warn_and_record_deprecation(
feature=Deprecated(run__get_url=True),
message=(
"The get_url method is deprecated and will be removed in a"
" future release. Please use `run.url` instead."
),
)
return self.url
@property
@_log_to_run
@_attach
def url(self) -> str | None:
"""The url for the W&B run, if there is one.
Offline runs will not have a url.
"""
if self._settings._offline:
wandb.termwarn("URL not available in offline run")
return None
return self._settings.run_url
@property
@_log_to_run
@_attach
def entity(self) -> str:
"""The name of the W&B entity associated with the run.
Entity can be a username or the name of a team or organization.
"""
return self._settings.entity or ""
def _label_internal(
self,
code: str | None = None,
repo: str | None = None,
code_version: str | None = None,
) -> None:
with telemetry.context(run=self) as tel:
if code and RE_LABEL.match(code):
tel.label.code_string = code
if repo and RE_LABEL.match(repo):
tel.label.repo_string = repo
if code_version and RE_LABEL.match(code_version):
tel.label.code_version = code_version
def _label(
self,
code: str | None = None,
repo: str | None = None,
code_version: str | None = None,
**kwargs: str,
) -> None:
if self._settings.label_disable:
return
for k, v in (("code", code), ("repo", repo), ("code_version", code_version)):
if v and not RE_LABEL.match(v):
wandb.termwarn(
f"Label added for '{k}' with invalid identifier '{v}' (ignored).",
repeat=False,
)
for v in kwargs:
wandb.termwarn(
f"Label added for unsupported key {v!r} (ignored).",
repeat=False,
)
self._label_internal(code=code, repo=repo, code_version=code_version)
# update telemetry in the backend immediately for _label() callers
self._telemetry_flush()
def _label_probe_lines(self, lines: list[str]) -> None:
if not lines:
return
parsed = telemetry._parse_label_lines(lines)
if not parsed:
return
label_dict = {}
code = parsed.get("code") or parsed.get("c")
if code:
label_dict["code"] = code
repo = parsed.get("repo") or parsed.get("r")
if repo:
label_dict["repo"] = repo
code_ver = parsed.get("version") or parsed.get("v")
if code_ver:
label_dict["code_version"] = code_ver
self._label_internal(**label_dict)
def _label_probe_main(self) -> None:
m = sys.modules.get("__main__")
if not m:
return
doc = getattr(m, "__doc__", None)
if not doc:
return
doclines = doc.splitlines()
self._label_probe_lines(doclines)
# TODO: annotate jupyter Notebook class
def _label_probe_notebook(self, notebook: Any) -> None:
logger.info("probe notebook")
lines = None
try:
data = notebook.probe_ipynb()
cell0 = data.get("cells", [])[0]
lines = cell0.get("source")
# kaggle returns a string instead of a list
if isinstance(lines, str):
lines = lines.split()
except Exception as e:
logger.info(f"Unable to probe notebook: {e}")
return
if lines:
self._label_probe_lines(lines)
@_log_to_run
@_attach
def display(self, height: int = 420, hidden: bool = False) -> bool:
"""Display this run in Jupyter."""
if self._settings.silent:
return False
if not ipython.in_jupyter():
return False
try:
from IPython import display
except ImportError:
wandb.termwarn(".display() only works in jupyter environments")
return False
display.display(display.HTML(self.to_html(height, hidden)))
return True
@_log_to_run
@_attach
def to_html(self, height: int = 420, hidden: bool = False) -> str:
"""Generate HTML containing an iframe displaying the current run.
<!-- lazydoc-ignore: internal -->
"""
url = self._settings.run_url + "?jupyter=true"
style = f"border:none;width:100%;height:{height}px;"
prefix = ""
if hidden:
style += "display:none;"
prefix = ipython.toggle_button()
return prefix + f"<iframe src={url!r} style={style!r}></iframe>"
def _repr_mimebundle_(
self, include: Any | None = None, exclude: Any | None = None
) -> dict[str, str]:
return {"text/html": self.to_html(hidden=True)}
@_log_to_run
@_raise_if_finished
def _config_callback(
self,
key: tuple[str, ...] | str | None = None,
val: Any | None = None,
data: dict[str, object] | None = None,
) -> None:
logger.info(f"config_cb {key} {val} {data}")
if self._backend and self._backend.interface:
self._backend.interface.publish_config(key=key, val=val, data=data)
@_log_to_run
def _config_artifact_callback(
self, key: str, val: str | Artifact | dict
) -> Artifact:
from wandb.apis import public
from wandb.sdk.artifacts.artifact import Artifact
# artifacts can look like dicts as they are passed into the run config
# since the run config stores them on the backend as a dict with fields shown
# in wandb.util.artifact_to_json
if _is_artifact_version_weave_dict(val):
assert isinstance(val, dict)
public_api = self._public_api()
artifact = Artifact._from_id(val["id"], public_api.client)
assert artifact
return self.use_artifact(artifact)
elif _is_artifact_string(val):
# this will never fail, but is required to make mypy happy
assert isinstance(val, str)
artifact_string, base_url, is_id = parse_artifact_string(val)
overrides = {}
if base_url is not None:
overrides = {"base_url": base_url}
public_api = public.Api(overrides)
else:
public_api = self._public_api()
if is_id:
artifact = Artifact._from_id(artifact_string, public_api._client)
else:
artifact = public_api._artifact(name=artifact_string)
# in the future we'll need to support using artifacts from
# different instances of wandb.
assert artifact
return self.use_artifact(artifact)
elif _is_artifact_object(val):
return self.use_artifact(val)
else:
raise ValueError(
f"Cannot call _config_artifact_callback on type {type(val)}"
)
def _set_config_wandb(self, key: str, val: Any) -> None:
self._config_callback(key=("_wandb", key), val=val)
@_log_to_run
@_raise_if_finished
def _summary_update_callback(self, summary_record: SummaryRecord) -> None:
with telemetry.context(run=self) as tel:
tel.feature.set_summary = True
if self._backend and self._backend.interface:
self._backend.interface.publish_summary(self, summary_record)
@_log_to_run
def _summary_get_current_summary_callback(self) -> dict[str, Any]:
if self._is_finished:
# TODO: WB-18420: fetch summary from backend and stage it before run is finished
wandb.termwarn("Summary data not available in finished run")
return {}
if not self._backend or not self._backend.interface:
return {}
handle = self._backend.interface.deliver_get_summary()
try:
result = handle.wait_or(timeout=self._settings.summary_timeout)
except TimeoutError:
return {}
get_summary_response = result.response.get_summary_response
return proto_util.dict_from_proto_list(get_summary_response.item)
@_log_to_run
def _metric_callback(self, metric_record: MetricRecord) -> None:
if self._backend and self._backend.interface:
self._backend.interface._publish_metric(metric_record)
@_log_to_run
def _publish_file(self, fname: str) -> None:
"""Mark a run file to be uploaded with the run.
This is a W&B-internal function: it can be used by other internal
wandb code.
Args:
fname: The path to the file in the run's files directory, relative
to the run's files directory.
"""
if not self._backend or not self._backend.interface:
return
files: FilesDict = dict(files=[(GlobStr(fname), "now")])
self._backend.interface.publish_files(files)
def _pop_all_charts(
self,
data: dict[str, Any],
key_prefix: str | None = None,
) -> dict[str, Any]:
"""Pops all charts from a dictionary including nested charts.
This function will return a mapping of the charts and a dot-separated
key for each chart. Indicating the path to the chart in the data dictionary.
"""
keys_to_remove = set()
charts: dict[str, Any] = {}
for k, v in data.items():
key = f"{key_prefix}.{k}" if key_prefix else k
if isinstance(v, Visualize):
keys_to_remove.add(k)
charts[key] = v
elif isinstance(v, CustomChart):
keys_to_remove.add(k)
charts[key] = v
elif isinstance(v, dict):
nested_charts = self._pop_all_charts(v, key)
charts.update(nested_charts)
for k in keys_to_remove:
data.pop(k)
return charts
def _serialize_custom_charts(
self,
data: dict[str, Any],
) -> dict[str, Any]:
"""Process and replace chart objects with their underlying table values.
This processes the chart objects passed to `wandb.Run.log()`, replacing their entries
in the given dictionary (which is saved to the run's history) and adding them
to the run's config.
Args:
data: Dictionary containing data that may include plot objects
Plot objects can be nested in dictionaries, which will be processed recursively.
Returns:
The processed dictionary with custom charts transformed into tables.
"""
if not data:
return data
charts = self._pop_all_charts(data)
for k, v in charts.items():
v.set_key(k)
self._config_callback(
val=v.spec.config_value,
key=v.spec.config_key,
)
if isinstance(v, CustomChart):
data[v.spec.table_key] = v.table
elif isinstance(v, Visualize):
data[k] = v.table
return data
@_log_to_run
def _partial_history_callback(
self,
data: dict[str, Any],
step: int | None = None,
commit: bool | None = None,
) -> None:
if not (self._backend and self._backend.interface):
return
data = data.copy() # avoid modifying the original data
# Serialize custom charts before publishing
data = self._serialize_custom_charts(data)
not_using_tensorboard = len(wandb.patched["tensorboard"]) == 0
self._backend.interface.publish_partial_history(
self,
data,
user_step=self._step,
step=step,
flush=commit,
publish_step=not_using_tensorboard,
)
@_log_to_run
def _console_callback(self, name: str, data: str) -> None:
if self._backend and self._backend.interface:
# nowait=True so that this can be called from an asyncio context.
self._backend.interface.publish_output(name, data, nowait=True)
@_log_to_run
@_raise_if_finished
def _console_raw_callback(self, name: str, data: str) -> None:
# NOTE: console output is only allowed on the process which installed the callback
# this will prevent potential corruption in the socket to the service. Other methods
# are protected by the _attach run decorator, but this callback was installed on the
# write function of stdout and stderr streams.
console_pid = getattr(self, "_attach_pid", 0)
if console_pid != os.getpid():
return
if self._backend and self._backend.interface:
# nowait=True so that this can be called from an asyncio context.
self._backend.interface.publish_output_raw(name, data, nowait=True)
@_log_to_run
def _tensorboard_callback(
self, logdir: str, save: bool = True, root_logdir: str = ""
) -> None:
logger.info("tensorboard callback: %s, %s", logdir, save)
if self._backend and self._backend.interface:
self._backend.interface.publish_tbdata(logdir, save, root_logdir)
def _set_library(self, library: _WandbSetup) -> None:
self._wl = library
def _set_backend(self, backend: wandb.sdk.backend.backend.Backend) -> None:
self._backend = backend
def _set_internal_run_interface(
self,
interface: wandb.sdk.interface.interface_queue.InterfaceQueue,
) -> None:
self._internal_run_interface = interface
def _set_teardown_hooks(self, hooks: list[TeardownHook]) -> None:
self._teardown_hooks = hooks
def _set_run_obj(self, run_obj: RunRecord) -> None: # noqa: C901
if run_obj.starting_step:
self._starting_step = run_obj.starting_step
self._step = run_obj.starting_step
if run_obj.start_time:
self._start_time = run_obj.start_time.ToMicroseconds() / 1e6
if run_obj.runtime:
self._start_runtime = run_obj.runtime
# Grab the config from resuming
if run_obj.config:
c_dict = config_util.dict_no_value_from_proto_list(run_obj.config.update)
# We update the config object here without triggering the callback
self._config._update(c_dict, allow_val_change=True, ignore_locked=True)
# Update the summary, this will trigger an un-needed graphql request :(
if run_obj.summary:
summary_dict = {}
for orig in run_obj.summary.update:
summary_dict[orig.key] = json.loads(orig.value_json)
if summary_dict:
self.summary.update(summary_dict)
# update settings from run_obj
if run_obj.run_id:
self._settings.run_id = run_obj.run_id
if run_obj.entity:
self._settings.entity = run_obj.entity
if run_obj.project:
self._settings.project = run_obj.project
if run_obj.run_group:
self._settings.run_group = run_obj.run_group
if run_obj.job_type:
self._settings.run_job_type = run_obj.job_type
if run_obj.display_name:
self._settings.run_name = run_obj.display_name
if run_obj.notes:
self._settings.run_notes = run_obj.notes
if run_obj.tags:
self._settings.run_tags = tuple(run_obj.tags)
if run_obj.sweep_id:
self._settings.sweep_id = run_obj.sweep_id
if run_obj.host:
self._settings.host = run_obj.host
if run_obj.resumed:
self._settings.resumed = run_obj.resumed
if run_obj.git:
if run_obj.git.remote_url:
self._settings.git_remote_url = run_obj.git.remote_url
if run_obj.git.commit:
self._settings.git_commit = run_obj.git.commit
if run_obj.forked:
self._forked = run_obj.forked
get_sentry().configure_scope(
process_context="user",
tags=dict(self._settings),
)
def _populate_git_info(self) -> None:
from .lib.gitlib import GitRepo
# Use user-provided git info if available, otherwise resolve it from the environment
try:
repo = GitRepo(
root=self._settings.git_root,
remote=self._settings.git_remote,
remote_url=self._settings.git_remote_url,
commit=self._settings.git_commit,
lazy=False,
)
self._settings.git_remote_url = repo.remote_url
self._settings.git_commit = repo.last_commit
except Exception:
wandb.termwarn("Cannot find valid git repo associated with this directory.")
def _add_singleton(
self, data_type: str, key: str, value: dict[int | str, str]
) -> None:
"""Store a singleton item to wandb config.
A singleton in this context is a piece of data that is continually
logged with the same value in each history step, but represented
as a single item in the config.
We do this to avoid filling up history with a lot of repeated unnecessary data
Add singleton can be called many times in one run, and it will only be
updated when the value changes. The last value logged will be the one
persisted to the server.
"""
value_extra = {"type": data_type, "key": key, "value": value}
if data_type not in self._config["_wandb"]:
self._config["_wandb"][data_type] = {}
if data_type in self._config["_wandb"][data_type]:
old_value = self._config["_wandb"][data_type][key]
else:
old_value = None
if value_extra != old_value:
self._config["_wandb"][data_type][key] = value_extra
self._config.persist()
def _log(
self,
data: dict[str, Any],
step: int | None = None,
commit: bool | None = None,
) -> None:
if not isinstance(data, Mapping):
raise TypeError("wandb.log must be passed a dictionary")
if any(not isinstance(key, str) for key in data.keys()):
raise TypeError("Key values passed to `wandb.log` must be strings.")
self._partial_history_callback(data, step, commit)
if step is not None:
if os.getpid() != self._init_pid or self._is_attached:
wandb.termwarn(
"Note that setting step in multiprocessing can result in data loss. "
"Please use `run.define_metric(...)` to define a custom metric "
"to log your step values.",
repeat=False,
)
# if step is passed in when tensorboard_sync is used we honor the step passed
# to make decisions about how to close out the history record, but will strip
# this history later on in publish_history()
if len(wandb.patched["tensorboard"]) > 0:
wandb.termwarn(
"Step cannot be set when using tensorboard syncing. "
"Please use `run.define_metric(...)` to define a custom metric "
"to log your step values.",
repeat=False,
)
if step > self._step:
self._step = step
if (step is None and commit is None) or commit:
self._step += 1
@_log_to_run
@_raise_if_finished
@_attach
def log(
self,
data: dict[str, Any],
step: int | None = None,
commit: bool | None = None,
) -> None:
"""Upload run data.
Use `log` to log data from runs, such as scalars, images, video,
histograms, plots, and tables. See [Log objects and media](https://docs.wandb.ai/guides/track/log) for
code snippets, best practices, and more.
Basic usage:
```python
import wandb
with wandb.init() as run:
run.log({"train-loss": 0.5, "accuracy": 0.9})
```
The previous code snippet saves the loss and accuracy to the run's
history and updates the summary values for these metrics.
Visualize logged data in a workspace at [wandb.ai](https://wandb.ai),
or locally on a [self-hosted instance](https://docs.wandb.ai/guides/hosting)
of the W&B app, or export data to visualize and explore locally, such as in a
Jupyter notebook, with the [Public API](https://docs.wandb.ai/guides/track/public-api-guide).
Logged values don't have to be scalars. You can log any
[W&B supported Data Type](https://docs.wandb.ai/ref/python/data-types/)
such as images, audio, video, and more. For example, you can use
`wandb.Table` to log structured data. See
[Log tables, visualize and query data](https://docs.wandb.ai/guides/models/tables/tables-walkthrough)
tutorial for more details.
W&B organizes metrics with a forward slash (`/`) in their name
into sections named using the text before the final slash. For example,
the following results in two sections named "train" and "validate":
```python
with wandb.init() as run:
# Log metrics in the "train" section.
run.log(
{
"train/accuracy": 0.9,
"train/loss": 30,
"validate/accuracy": 0.8,
"validate/loss": 20,
}
)
```
Only one level of nesting is supported; `run.log({"a/b/c": 1})`
produces a section named "a/b".
`run.log()` is not intended to be called more than a few times per second.
For optimal performance, limit your logging to once every N iterations,
or collect data over multiple iterations and log it in a single step.
By default, each call to `log` creates a new "step".
The step must always increase, and it is not possible to log
to a previous step. You can use any metric as the X axis in charts.
See [Custom log axes](https://docs.wandb.ai/guides/track/log/customize-logging-axes/)
for more details.
In many cases, it is better to treat the W&B step like
you'd treat a timestamp rather than a training step.
```python
with wandb.init() as run:
# Example: log an "epoch" metric for use as an X axis.
run.log({"epoch": 40, "train-loss": 0.5})
```
It is possible to use multiple `wandb.Run.log()` invocations to log to
the same step with the `step` and `commit` parameters.
The following are all equivalent:
```python
with wandb.init() as run:
# Normal usage:
run.log({"train-loss": 0.5, "accuracy": 0.8})
run.log({"train-loss": 0.4, "accuracy": 0.9})
# Implicit step without auto-incrementing:
run.log({"train-loss": 0.5}, commit=False)
run.log({"accuracy": 0.8})
run.log({"train-loss": 0.4}, commit=False)
run.log({"accuracy": 0.9})
# Explicit step:
run.log({"train-loss": 0.5}, step=current_step)
run.log({"accuracy": 0.8}, step=current_step)
current_step += 1
run.log({"train-loss": 0.4}, step=current_step)
run.log({"accuracy": 0.9}, step=current_step, commit=True)
```
Args:
data: A `dict` with `str` keys and values that are serializable
Python objects including: `int`, `float` and `string`;
any of the `wandb.data_types`; lists, tuples and NumPy arrays
of serializable Python objects; other `dict`s of this
structure.
step: The step number to log. If `None`, then an implicit
auto-incrementing step is used. See the notes in
the description.
commit: If true, finalize and upload the step. If false, then
accumulate data for the step. See the notes in the description.
If `step` is `None`, then the default is `commit=True`;
otherwise, the default is `commit=False`.
Examples:
For more and more detailed examples, see
[our guides to logging](https://docs.wandb.com/guides/track/log).
Basic usage
```python
import wandb
with wandb.init() as run:
run.log({"train-loss": 0.5, "accuracy": 0.9
```
Incremental logging
```python
import wandb
with wandb.init() as run:
run.log({"loss": 0.2}, commit=False)
# Somewhere else when I'm ready to report this step:
run.log({"accuracy": 0.8})
```
Histogram
```python
import numpy as np
import wandb
# sample gradients at random from normal distribution
gradients = np.random.randn(100, 100)
with wandb.init() as run:
run.log({"gradients": wandb.Histogram(gradients)})
```
Image from NumPy
```python
import numpy as np
import wandb
with wandb.init() as run:
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3))
image = wandb.Image(pixels, caption=f"random field {i}")
examples.append(image)
run.log({"examples": examples})
```
Image from PIL
```python
import numpy as np
from PIL import Image as PILImage
import wandb
with wandb.init() as run:
examples = []
for i in range(3):
pixels = np.random.randint(
low=0,
high=256,
size=(100, 100, 3),
dtype=np.uint8,
)
pil_image = PILImage.fromarray(pixels, mode="RGB")
image = wandb.Image(pil_image, caption=f"random field {i}")
examples.append(image)
run.log({"examples": examples})
```
Video from NumPy
```python
import numpy as np
import wandb
with wandb.init() as run:
# axes are (time, channel, height, width)
frames = np.random.randint(
low=0,
high=256,
size=(10, 3, 100, 100),
dtype=np.uint8,
)
run.log({"video": wandb.Video(frames, fps=4)})
```
Matplotlib plot
```python
from matplotlib import pyplot as plt
import numpy as np
import wandb
with wandb.init() as run:
fig, ax = plt.subplots()
x = np.linspace(0, 10)
y = x * x
ax.plot(x, y) # plot y = x^2
run.log({"chart": fig})
```
PR Curve
```python
import wandb
with wandb.init() as run:
run.log({"pr": wandb.plot.pr_curve(y_test, y_probas, labels)})
```
3D Object
```python
import wandb
with wandb.init() as run:
run.log(
{
"generated_samples": [
wandb.Object3D(open("sample.obj")),
wandb.Object3D(open("sample.gltf")),
wandb.Object3D(open("sample.glb")),
]
}
)
```
Raises:
wandb.Error: If called before `wandb.init()`.
ValueError: If invalid data is passed.
"""
if step is not None:
with telemetry.context(run=self) as tel:
tel.feature.set_step_log = True
if self._settings._shared and step is not None:
wandb.termwarn(
"In shared mode, the use of `wandb.log` with the step argument is not supported "
f"and will be ignored. Please refer to {url_registry.url('define-metric')} "
"on how to customize your x-axis.",
repeat=False,
)
self._log(data=data, step=step, commit=commit)
@_log_to_run
@_raise_if_finished
@_attach
def save(
self,
glob_str: str | os.PathLike,
base_path: str | os.PathLike | None = None,
policy: PolicyName = "live",
) -> bool | list[str]:
"""Sync one or more files to W&B.
Relative paths are relative to the current working directory.
A Unix glob, such as "myfiles/*", is expanded at the time `save` is
called regardless of the `policy`. In particular, new files are not
picked up automatically.
A `base_path` may be provided to control the directory structure of
uploaded files. It should be a prefix of `glob_str`, and the directory
structure beneath it is preserved.
When given an absolute path or glob and no `base_path`, one
directory level is preserved as in the example above.
Files are automatically deduplicated: calling `save()` multiple times
on the same file without modifications will not re-upload it.
Args:
glob_str: A relative or absolute path or Unix glob.
base_path: A path to use to infer a directory structure; see examples.
policy: One of `live`, `now`, or `end`.
- live: upload the file as it changes, overwriting the previous version
- now: upload the file once now
- end: upload file when the run ends
Returns:
Paths to the symlinks created for the matched files.
For historical reasons, this may return a boolean in legacy code.
```python
import wandb
run = wandb.init()
run.save("these/are/myfiles/*")
# => Saves files in a "these/are/myfiles/" folder in the run.
run.save("these/are/myfiles/*", base_path="these")
# => Saves files in an "are/myfiles/" folder in the run.
run.save("/Users/username/Documents/run123/*.txt")
# => Saves files in a "run123/" folder in the run. See note below.
run.save("/Users/username/Documents/run123/*.txt", base_path="/Users")
# => Saves files in a "username/Documents/run123/" folder in the run.
run.save("files/*/saveme.txt")
# => Saves each "saveme.txt" file in an appropriate subdirectory
# of "files/".
# Explicitly finish the run since a context manager is not used.
run.finish()
```
"""
if isinstance(glob_str, bytes):
# Preserved for backward compatibility: allow bytes inputs.
glob_str = glob_str.decode("utf-8")
if isinstance(glob_str, str) and (glob_str.startswith(("gs://", "s3://"))):
# Provide a better error message for a common misuse.
wandb.termlog(f"{glob_str} is a cloud storage url, can't save file to W&B.")
return []
# NOTE: We use PurePath instead of Path because WindowsPath doesn't
# like asterisks and errors out in resolve(). It also makes logical
# sense: globs aren't real paths, they're just path-like strings.
glob_path = pathlib.PurePath(glob_str)
resolved_glob_path = pathlib.PurePath(os.path.abspath(glob_path))
if base_path is not None:
base_path = pathlib.Path(base_path)
elif not glob_path.is_absolute():
base_path = pathlib.Path(".")
else:
# Absolute glob paths with no base path get special handling.
wandb.termwarn(
"Saving files without folders. If you want to preserve "
"subdirectories pass base_path to wandb.save, i.e. "
'wandb.save("/mnt/folder/file.h5", base_path="/mnt")',
repeat=False,
)
base_path = resolved_glob_path.parent.parent
if policy not in ("live", "end", "now"):
raise ValueError(
'Only "live", "end" and "now" policies are currently supported.'
)
resolved_base_path = pathlib.PurePath(os.path.abspath(base_path))
return self._save(
resolved_glob_path,
resolved_base_path,
policy,
)
def _save(
self,
glob_path: pathlib.PurePath,
base_path: pathlib.PurePath,
policy: PolicyName,
) -> list[str]:
"""Materialize matched files into the run's files/ dir for syncing.
Strategy:
1) If settings.symlink is True, try symlink.
2) Else (or if symlink fails), try hardlink (same-volume files).
3) Else copy and, if requested policy == "live", downgrade those files to "now".
Args:
glob_path: Absolute path glob pattern for files to save.
base_path: Base path to determine relative directory structure.
policy: Upload policy - "live", "now", or "end".
Returns:
List of absolute paths to files in the wandb run directory.
Raises:
ValueError: If glob_path is invalid relative to base_path.
"""
validate_glob_path(glob_path, base_path)
relative_glob = glob_path.relative_to(base_path)
relative_glob_str = GlobStr(str(relative_glob))
with telemetry.context(run=self) as tel:
tel.feature.save = True
files_root = pathlib.Path(self._settings.files_dir)
preexisting = set(files_root.glob(relative_glob_str))
# Expand sources deterministically.
src_paths = [
pathlib.Path(p).absolute()
for p in sorted(glob.glob(GlobStr(str(base_path / relative_glob_str))))
]
stats = LinkStats()
publish_entries = []
created_targets = set()
for src in src_paths:
# Preserve directory structure under base_path.
rel = pathlib.Path(*src.parts[len(base_path.parts) :])
dst = files_root / rel
created_targets.add(dst)
# If already the same file, just publish with requested policy.
with contextlib.suppress(OSError):
if dst.exists() and src.samefile(dst):
publish_entries.append(
(GlobStr(str(dst.relative_to(files_root))), policy)
)
continue
dst.parent.mkdir(parents=True, exist_ok=True)
unlink_path(dst)
effective_policy = link_or_copy_with_policy(
self._settings, src, dst, policy, stats
)
publish_entries.append(
(GlobStr(str(dst.relative_to(files_root))), effective_policy)
)
# Include pre-existing matches we didn't touch.
for p in sorted(preexisting):
if p not in created_targets:
publish_entries.append(
(GlobStr(str(p.relative_to(files_root))), policy)
)
stats.emit_warnings()
files_dict: FilesDict = {"files": publish_entries}
if self._backend and self._backend.interface:
self._backend.interface.publish_files(files_dict)
abs_targets = {files_root / pathlib.Path(g) for (g, _pol) in publish_entries}
return [str(p) for p in sorted(abs_targets)]
@_log_to_run
@_attach
def restore(
self,
name: str,
run_path: str | None = None,
replace: bool = False,
root: str | None = None,
) -> None | TextIO:
return restore(
name,
run_path or self._get_path(),
replace,
root or self._settings.files_dir,
)
@_log_to_run
@_attach
def finish(
self,
exit_code: int | None = None,
quiet: bool | None = None,
) -> None:
"""Finish a run and upload any remaining data.
Marks the completion of a W&B run and ensures all data is synced to the server.
The run's final state is determined by its exit conditions and sync status.
Run States:
- Running: Active run that is logging data and/or sending heartbeats.
- Crashed: Run that stopped sending heartbeats unexpectedly.
- Finished: Run completed successfully (`exit_code=0`) with all data synced.
- Failed: Run completed with errors (`exit_code!=0`).
- Killed: Run was forcibly stopped before it could finish.
Args:
exit_code: Integer indicating the run's exit status. Use 0 for success,
any other value marks the run as failed.
quiet: Deprecated. Configure logging verbosity using `wandb.Settings(quiet=...)`.
"""
if quiet is not None:
deprecation.warn_and_record_deprecation(
feature=Deprecated(run__finish_quiet=True),
message=(
"The `quiet` argument to `wandb.run.finish()` is deprecated, "
"use `wandb.Settings(quiet=...)` to set this instead."
),
run=self,
)
return self._finish(exit_code)
@_log_to_run
def _finish(
self,
exit_code: int | None = None,
) -> None:
if self._is_finished:
return
assert self._wl
logger.info(f"finishing run {self._get_path()}")
with telemetry.context(run=self) as tel:
tel.feature.finish = True
# Run hooks that need to happen before the last messages to the
# internal service, like Jupyter hooks.
for hook in self._teardown_hooks:
if hook.stage == TeardownStage.EARLY:
hook.call()
# Early-stage hooks may use methods that require _is_finished
# to be False, so we set this after running those hooks.
self._is_finished = True
self._wl.remove_active_run(self)
try:
self._atexit_cleanup(exit_code=exit_code)
# Run hooks that should happen after the last messages to the
# internal service, like detaching the logger.
for hook in self._teardown_hooks:
if hook.stage == TeardownStage.LATE:
hook.call()
self._teardown_hooks = []
# Inform the service that we're done sending messages for this run.
#
# TODO: Why not do this in _atexit_cleanup()?
if self._settings.run_id:
service = self._wl.assert_service()
service.inform_finish(run_id=self._settings.run_id)
finally:
if wandb.run is self:
module.unset_globals()
get_sentry().end_session()
@_log_to_run
@_raise_if_finished
@_attach
def status(
self,
) -> RunStatus:
"""Get sync info from the internal backend, about the current run's sync status."""
if not self._backend or not self._backend.interface:
return RunStatus()
handle_run_status = self._backend.interface.deliver_request_run_status()
result = handle_run_status.wait_or(timeout=None)
sync_data = result.response.run_status_response
sync_time = None
if sync_data.sync_time.seconds:
sync_time = datetime.fromtimestamp(
sync_data.sync_time.seconds + sync_data.sync_time.nanos / 1e9
)
return RunStatus(
sync_items_total=sync_data.sync_items_total,
sync_items_pending=sync_data.sync_items_pending,
sync_time=sync_time,
)
def _add_panel(
self, visualize_key: str, panel_type: str, panel_config: dict
) -> None:
config = {
"panel_type": panel_type,
"panel_config": panel_config,
}
self._config_callback(val=config, key=("_wandb", "visualize", visualize_key))
def _redirect(
self,
stdout_slave_fd: int | None,
stderr_slave_fd: int | None,
console: str | None = None,
) -> None:
if console is None:
console = self._settings.console
# only use raw for service to minimize potential changes
if console == "wrap":
console = "wrap_raw"
logger.info("redirect: %s", console)
out_redir: redirect.RedirectBase
err_redir: redirect.RedirectBase
# raw output handles the output_log writing in the internal process
if console in {"redirect", "wrap_emu"}:
output_log_path = os.path.join(
self._settings.files_dir, filenames.OUTPUT_FNAME
)
# output writer might have been set up, see wrap_fallback case
if not self._output_writer:
self._output_writer = filesystem.CRDedupedFile(
open(output_log_path, "wb")
)
if console == "redirect":
logger.info("Redirecting console.")
out_redir = redirect.Redirect(
src="stdout",
cbs=[
lambda data: self._console_callback("stdout", data),
self._output_writer.write, # type: ignore
],
flush_periodically=(self._settings.mode == "online"),
)
err_redir = redirect.Redirect(
src="stderr",
cbs=[
lambda data: self._console_callback("stderr", data),
self._output_writer.write, # type: ignore
],
flush_periodically=(self._settings.mode == "online"),
)
if os.name == "nt":
def wrap_fallback() -> None:
if self._out_redir:
self._out_redir.uninstall()
if self._err_redir:
self._err_redir.uninstall()
msg = (
"Tensorflow detected. Stream redirection is not supported "
"on Windows when tensorflow is imported. Falling back to "
"wrapping stdout/err."
)
wandb.termlog(msg)
self._redirect(None, None, console="wrap")
add_import_hook("tensorflow", wrap_fallback)
elif console == "wrap_emu":
logger.info("Wrapping output streams.")
out_redir = redirect.StreamWrapper(
src="stdout",
cbs=[
lambda data: self._console_callback("stdout", data),
self._output_writer.write, # type: ignore
],
flush_periodically=(self._settings.mode == "online"),
)
err_redir = redirect.StreamWrapper(
src="stderr",
cbs=[
lambda data: self._console_callback("stderr", data),
self._output_writer.write, # type: ignore
],
flush_periodically=(self._settings.mode == "online"),
)
elif console == "wrap_raw":
logger.info("Wrapping output streams.")
out_redir = redirect.StreamRawWrapper(
src="stdout",
cbs=[
lambda data: self._console_raw_callback("stdout", data),
],
)
err_redir = redirect.StreamRawWrapper(
src="stderr",
cbs=[
lambda data: self._console_raw_callback("stderr", data),
],
)
elif console == "off":
return
else:
raise ValueError("unhandled console")
try:
# save stdout and stderr before installing new write functions
out_redir.install()
err_redir.install()
self._out_redir = out_redir
self._err_redir = err_redir
logger.info("Redirects installed.")
except Exception as e:
wandb.termwarn(f"Failed to redirect: {e}")
logger.exception("Failed to redirect.")
return
def _restore(self) -> None:
logger.info("restore")
# TODO(jhr): drain and shutdown all threads
if self._out_redir:
self._out_redir.uninstall()
if self._err_redir:
self._err_redir.uninstall()
logger.info("restore done")
def _atexit_cleanup(self, exit_code: int | None = None) -> None:
if self._backend is None:
logger.warning("process exited without backend configured")
return
if self._atexit_cleanup_called:
return
self._atexit_cleanup_called = True
exit_code = exit_code or (self._hooks and self._hooks.exit_code) or 0
self._exit_code = exit_code
logger.info(f"got exitcode: {exit_code}")
# Delete this run's "resume" file if the run finished successfully.
#
# This is used by the "auto" resume mode, which resumes from the last
# failed (or unfinished/crashed) run. If we reach this line, then this
# run shouldn't be a candidate for "auto" resume.
if exit_code == 0:
if os.path.exists(self._settings.resume_fname):
os.remove(self._settings.resume_fname)
try:
self._on_finish()
except KeyboardInterrupt:
if not wandb.wandb_agent._is_running(): # type: ignore
wandb.termerror("Control-C detected -- Run data was not synced")
raise
except Exception:
self._console_stop()
logger.exception("Problem finishing run")
wandb.termerror("Problem finishing run")
raise
Run._footer(
sampled_history=self._sampled_history,
final_summary=self._final_summary,
poll_exit_response=self._poll_exit_response,
internal_messages_response=self._internal_messages_response,
settings=self._settings,
printer=self._printer,
)
def _console_start(self) -> None:
logger.info("atexit reg")
self._hooks = ExitHooks()
self._redirect(self._stdout_slave_fd, self._stderr_slave_fd)
def _console_stop(self) -> None:
self._restore()
if self._output_writer:
self._output_writer.close()
self._output_writer = None
def _on_start(self) -> None:
self._header()
if self._settings.save_code and self._settings.code_dir is not None:
self.log_code(self._settings.code_dir)
if self._settings.x_save_requirements:
if self._backend and self._backend.interface:
from wandb.util import working_set
logger.debug(
"Saving list of pip packages installed into the current environment"
)
self._backend.interface.publish_python_packages(working_set())
if self._backend and self._backend.interface and not self._settings._offline:
assert self._settings.run_id
self._run_status_checker = RunStatusChecker(
self._settings.run_id,
interface=self._backend.interface,
settings=self._settings,
)
self._run_status_checker.start()
self._console_start()
self._on_ready()
def _on_attach(self) -> None:
"""Event triggered when run is attached to another run."""
with telemetry.context(run=self) as tel:
tel.feature.attach = True
self._is_attached = True
self._on_ready()
def _register_telemetry_import_hooks(
self,
) -> None:
def _telemetry_import_hook(
run: Run,
module: Any,
) -> None:
with telemetry.context(run=run) as tel:
try:
name = getattr(module, "__name__", None)
if name is not None:
setattr(tel.imports_finish, name, True)
except AttributeError:
return
import_telemetry_set = telemetry.list_telemetry_imports()
import_hook_fn = functools.partial(_telemetry_import_hook, self)
if not self._settings.run_id:
return
for module_name in import_telemetry_set:
register_post_import_hook(
import_hook_fn,
self._settings.run_id,
module_name,
)
def _on_ready(self) -> None:
"""Event triggered when run is ready for the user."""
assert self._wl
self._wl.add_active_run(self)
self._register_telemetry_import_hooks()
# start reporting any telemetry changes
self._telemetry_obj_active = True
self._telemetry_flush()
try:
self._detect_and_apply_job_inputs()
except Exception:
logger.exception("Problem applying launch job inputs")
# object is about to be returned to the user, don't let them modify it
self._freeze()
if not self._settings.resume:
if os.path.exists(self._settings.resume_fname):
os.remove(self._settings.resume_fname)
def _detect_and_apply_job_inputs(self) -> None:
"""If the user has staged launch inputs, apply them to the run."""
from wandb.sdk.launch.inputs.internal import StagedLaunchInputs
StagedLaunchInputs().apply(self)
def _make_job_source_reqs(self) -> tuple[list[str], dict[str, Any], dict[str, Any]]:
from wandb.util import working_set
installed_packages_list = sorted(f"{d.key}=={d.version}" for d in working_set())
input_types = TypeRegistry.type_of(self.config.as_dict()).to_json()
output_types = TypeRegistry.type_of(self.summary._as_dict()).to_json()
return installed_packages_list, input_types, output_types
def _construct_job_artifact(
self,
name: str,
source_dict: JobSourceDict,
installed_packages_list: list[str],
patch_path: os.PathLike | None = None,
) -> Artifact:
from wandb.sdk.artifacts._internal_artifact import InternalArtifact
from wandb.sdk.internal import job_builder
job_artifact = InternalArtifact(name, job_builder.JOB_ARTIFACT_TYPE)
if patch_path and os.path.exists(patch_path):
job_artifact.add_file(FilePathStr(patch_path), "diff.patch")
with job_artifact.new_file("requirements.frozen.txt") as f:
f.write("\n".join(installed_packages_list))
with job_artifact.new_file("wandb-job.json") as f:
f.write(json.dumps(source_dict))
return job_artifact
def _create_image_job(
self,
input_types: dict[str, Any],
output_types: dict[str, Any],
installed_packages_list: list[str],
docker_image_name: str | None = None,
args: list[str] | None = None,
) -> Artifact | None:
docker_image_name = docker_image_name or os.getenv("WANDB_DOCKER")
if not docker_image_name:
return None
name = wandb.util.make_artifact_name_safe(f"job-{docker_image_name}")
s_args: Sequence[str] = args if args is not None else self._settings._args
source_info: JobSourceDict = {
"_version": "v0",
"source_type": "image",
"source": {"image": docker_image_name, "args": s_args},
"input_types": input_types,
"output_types": output_types,
"runtime": self._settings._python,
}
job_artifact = self._construct_job_artifact(
name, source_info, installed_packages_list
)
return job_artifact
def _log_job_artifact_with_image(
self, docker_image_name: str, args: list[str] | None = None
) -> Artifact:
packages, in_types, out_types = self._make_job_source_reqs()
job_artifact = self._create_image_job(
in_types,
out_types,
packages,
args=args,
docker_image_name=docker_image_name,
)
assert job_artifact
artifact = self.log_artifact(job_artifact)
if not artifact:
raise wandb.Error(f"Job Artifact log unsuccessful: {artifact}")
else:
return artifact
def _on_finish(self) -> None:
trigger.call("on_finished")
if self._run_status_checker is not None:
self._run_status_checker.stop()
self._console_stop() # TODO: there's a race here with jupyter console logging
assert self._backend and self._backend.interface
if self._settings.x_update_finish_state:
exit_handle = self._backend.interface.deliver_exit(self._exit_code)
else:
exit_handle = self._backend.interface.deliver_finish_without_exit()
with progress.progress_printer(
self._printer,
default_text="Finishing up...",
) as progress_printer:
# Wait for the run to complete.
wait_with_progress(
exit_handle,
timeout=None,
display_progress=functools.partial(
progress.loop_printing_operation_stats,
progress_printer,
self._backend.interface,
),
)
poll_exit_handle = self._backend.interface.deliver_poll_exit()
result = poll_exit_handle.wait_or(timeout=None)
self._poll_exit_response = result.response.poll_exit_response
internal_messages_handle = self._backend.interface.deliver_internal_messages()
result = internal_messages_handle.wait_or(timeout=None)
self._internal_messages_response = result.response.internal_messages_response
# dispatch all our final requests
final_summary_handle = self._backend.interface.deliver_get_summary()
sampled_history_handle = (
self._backend.interface.deliver_request_sampled_history()
)
result = sampled_history_handle.wait_or(timeout=None)
self._sampled_history = result.response.sampled_history_response
result = final_summary_handle.wait_or(timeout=None)
self._final_summary = result.response.get_summary_response
if self._backend:
self._backend.cleanup()
if self._run_status_checker:
self._run_status_checker.join()
if self._settings.run_id:
self._unregister_telemetry_import_hooks(self._settings.run_id)
@staticmethod
def _unregister_telemetry_import_hooks(run_id: str) -> None:
import_telemetry_set = telemetry.list_telemetry_imports()
for module_name in import_telemetry_set:
unregister_post_import_hook(module_name, run_id)
@_log_to_run
@_raise_if_finished
@_attach
def define_metric(
self,
name: str,
step_metric: str | wandb_metric.Metric | None = None,
step_sync: bool | None = None,
hidden: bool | None = None,
summary: str | None = None,
goal: str | None = None,
overwrite: bool | None = None,
) -> wandb_metric.Metric:
"""Customize metrics logged with `wandb.Run.log()`.
Args:
name: The name of the metric to customize.
step_metric: The name of another metric to serve as the X-axis
for this metric in automatically generated charts.
step_sync: Automatically insert the last value of step_metric into
`wandb.Run.log()` if it is not provided explicitly. Defaults to True
if step_metric is specified.
hidden: Hide this metric from automatic plots.
summary: Specify aggregate metrics added to summary.
Supported aggregations include "min", "max", "mean", "last",
"first", "best", "copy" and "none". "none" prevents a summary
from being generated. "best" is used together with the goal
parameter, "best" is deprecated and should not be used, use
"min" or "max" instead. "copy" is deprecated and should not be
used.
goal: Specify how to interpret the "best" summary type.
Supported options are "minimize" and "maximize". "goal" is
deprecated and should not be used, use "min" or "max" instead.
overwrite: If false, then this call is merged with previous
`define_metric` calls for the same metric by using their
values for any unspecified parameters. If true, then
unspecified parameters overwrite values specified by
previous calls.
Returns:
An object that represents this call but can otherwise be discarded.
"""
if summary and "copy" in summary:
deprecation.warn_and_record_deprecation(
feature=Deprecated(run__define_metric_copy=True),
message="define_metric(summary='copy') is deprecated and will be removed.",
run=self,
)
if (summary and "best" in summary) or goal is not None:
deprecation.warn_and_record_deprecation(
feature=Deprecated(run__define_metric_best_goal=True),
message="define_metric(summary='best', goal=...) is deprecated and will be removed. "
"Use define_metric(summary='min') or define_metric(summary='max') instead.",
run=self,
)
return self._define_metric(
name,
step_metric,
step_sync,
hidden,
summary,
goal,
overwrite,
)
def _define_metric(
self,
name: str,
step_metric: str | wandb_metric.Metric | None = None,
step_sync: bool | None = None,
hidden: bool | None = None,
summary: str | None = None,
goal: str | None = None,
overwrite: bool | None = None,
) -> wandb_metric.Metric:
if not name:
raise wandb.Error("define_metric() requires non-empty name argument")
if isinstance(step_metric, wandb_metric.Metric):
step_metric = step_metric.name
for arg_name, arg_val, exp_type in (
("name", name, str),
("step_metric", step_metric, str),
("step_sync", step_sync, bool),
("hidden", hidden, bool),
("summary", summary, str),
("goal", goal, str),
("overwrite", overwrite, bool),
):
# NOTE: type checking is broken for isinstance and str
if arg_val is not None and not isinstance(arg_val, exp_type):
arg_type = type(arg_val).__name__
raise wandb.Error(
f"Unhandled define_metric() arg: {arg_name} type: {arg_type}"
)
stripped = name[:-1] if name.endswith("*") else name
if "*" in stripped:
raise wandb.Error(
f"Unhandled define_metric() arg: name (glob suffixes only): {name}"
)
summary_ops: Sequence[str] | None = None
if summary:
summary_items = [s.lower() for s in summary.split(",")]
summary_ops = []
valid = {"min", "max", "mean", "best", "last", "copy", "none", "first"}
# TODO: deprecate copy and best
for i in summary_items:
if i not in valid:
raise wandb.Error(f"Unhandled define_metric() arg: summary op: {i}")
summary_ops.append(i)
with telemetry.context(run=self) as tel:
tel.feature.metric_summary = True
# TODO: deprecate goal
goal_cleaned: str | None = None
if goal is not None:
goal_cleaned = goal[:3].lower()
valid_goal = {"min", "max"}
if goal_cleaned not in valid_goal:
raise wandb.Error(f"Unhandled define_metric() arg: goal: {goal}")
with telemetry.context(run=self) as tel:
tel.feature.metric_goal = True
if hidden:
with telemetry.context(run=self) as tel:
tel.feature.metric_hidden = True
if step_sync:
with telemetry.context(run=self) as tel:
tel.feature.metric_step_sync = True
with telemetry.context(run=self) as tel:
tel.feature.metric = True
m = wandb_metric.Metric(
name=name,
step_metric=step_metric,
step_sync=step_sync,
summary=summary_ops,
hidden=hidden,
goal=goal_cleaned,
overwrite=overwrite,
)
m._set_callback(self._metric_callback)
m._commit()
return m
@_log_to_run
@_attach
def watch(
self,
models: torch.nn.Module | Sequence[torch.nn.Module],
criterion: torch.F | None = None, # type: ignore
log: Literal["gradients", "parameters", "all"] | None = "gradients",
log_freq: int = 1000,
idx: int | None = None,
log_graph: bool = False,
) -> None:
"""Hook into given PyTorch model to monitor gradients and the model's computational graph.
This function can track parameters, gradients, or both during training.
Args:
models: A single model or a sequence of models to be monitored.
criterion: The loss function being optimized (optional).
log: Specifies whether to log "gradients", "parameters", or "all".
Set to None to disable logging. (default="gradients").
log_freq: Frequency (in batches) to log gradients and parameters. (default=1000)
idx: Index used when tracking multiple models with `wandb.watch`. (default=None)
log_graph: Whether to log the model's computational graph. (default=False)
Raises:
ValueError:
If `wandb.init()` has not been called or if any of the models are not instances
of `torch.nn.Module`.
"""
wandb.sdk._watch(self, models, criterion, log, log_freq, idx, log_graph)
@_log_to_run
@_attach
def unwatch(
self, models: torch.nn.Module | Sequence[torch.nn.Module] | None = None
) -> None:
"""Remove pytorch model topology, gradient and parameter hooks.
Args:
models: Optional list of pytorch models that have had watch called on them.
"""
wandb.sdk._unwatch(self, models=models)
@_log_to_run
@_raise_if_finished
@_attach
def link_artifact(
self,
artifact: Artifact,
target_path: str,
aliases: list[str] | None = None,
) -> Artifact:
"""Link the artifact to a collection.
The term “link” refers to pointers that connect where W&B stores the
artifact and where the artifact is accessible in the registry. W&B
does not duplicate artifacts when you link an artifact to a collection.
View linked artifacts in the Registry UI for the specified collection.
Args:
artifact: The artifact object to link to the collection.
target_path: The path of the collection. Path consists of the prefix
"wandb-registry-" along with the registry name and the
collection name `wandb-registry-{REGISTRY_NAME}/{COLLECTION_NAME}`.
aliases: Add one or more aliases to the linked artifact. The
"latest" alias is automatically applied to the most recent artifact
you link.
Returns:
The linked artifact.
"""
from .artifacts._validators import ArtifactPath
if artifact.is_draft() and not artifact._is_draft_save_started():
artifact = self._log_artifact(artifact)
if self._settings._offline:
# TODO: implement offline mode + sync
raise NotImplementedError
# Normalize the target "entity/project/collection" with defaults
# inferred from this run's entity and project, if needed.
#
# HOWEVER, if the target path is a registry collection, avoid setting
# the target entity to the run's entity. Instead, delegate to
# Artifact.link() to resolve the required org entity.
target = ArtifactPath.from_str(target_path)
if not target.is_registry_path():
target = target.with_defaults(prefix=self.entity, project=self.project)
return artifact.link(target.to_str(), aliases)
@_log_to_run
@_raise_if_finished
@_attach
def use_artifact(
self,
artifact_or_name: str | Artifact,
type: str | None = None,
aliases: list[str] | None = None,
use_as: str | None = None,
) -> Artifact:
"""Declare an artifact as an input to a run.
Call `download` or `file` on the returned object to get the contents locally.
Args:
artifact_or_name: The name of the artifact to use. May be prefixed
with the name of the project the artifact was logged to
("entity" or "entity/project"). If no
entity is specified in the name, the Run or API setting's entity is used.
Valid names can be in the following forms
- name:version
- name:alias
type: The type of artifact to use.
aliases: Aliases to apply to this artifact
use_as: This argument is deprecated and does nothing.
Returns:
An `Artifact` object.
Examples:
```python
import wandb
run = wandb.init(project="<example>")
# Use an artifact by name and alias
artifact_a = run.use_artifact(artifact_or_name="<name>:<alias>")
# Use an artifact by name and version
artifact_b = run.use_artifact(artifact_or_name="<name>:v<version>")
# Use an artifact by entity/project/name:alias
artifact_c = run.use_artifact(
artifact_or_name="<entity>/<project>/<name>:<alias>"
)
# Use an artifact by entity/project/name:version
artifact_d = run.use_artifact(
artifact_or_name="<entity>/<project>/<name>:v<version>"
)
# Explicitly finish the run since a context manager is not used.
run.finish()
```
"""
from wandb.apis import internal
from wandb.sdk.artifacts.artifact import Artifact
if self._settings._offline:
raise TypeError("Cannot use artifact when in offline mode.")
api = internal.Api(
default_settings={
"entity": self._settings.entity,
"project": self._settings.project,
}
)
api.set_current_run_id(self._settings.run_id)
if use_as is not None:
deprecation.warn_and_record_deprecation(
feature=Deprecated(run__use_artifact_use_as=True),
message=(
"`use_as` argument is deprecated and does not affect the behaviour of `run.use_artifact`"
),
)
if isinstance(artifact_or_name, str):
name = artifact_or_name
public_api = self._public_api()
artifact = public_api._artifact(type=type, name=name)
if type is not None and type != artifact.type:
raise ValueError(
f"Supplied type {type} does not match type {artifact.type} of artifact {artifact.name}"
)
api.use_artifact(
artifact.id,
entity_name=self._settings.entity,
project_name=self._settings.project,
artifact_entity_name=artifact.entity,
artifact_project_name=artifact.project,
)
else:
artifact = artifact_or_name
if aliases is None:
aliases = []
elif isinstance(aliases, str):
aliases = [aliases]
if isinstance(artifact_or_name, Artifact) and artifact.is_draft():
if use_as is not None:
wandb.termwarn(
"Indicating use_as is not supported when using a draft artifact"
)
self._log_artifact(
artifact,
aliases=aliases,
is_user_created=True,
use_after_commit=True,
)
artifact.wait()
elif isinstance(artifact, Artifact) and not artifact.is_draft():
api.use_artifact(
artifact.id,
artifact_entity_name=artifact.entity,
artifact_project_name=artifact.project,
)
else:
raise ValueError(
'You must pass an artifact name (e.g. "pedestrian-dataset:v1"), '
"an instance of `wandb.Artifact`, or `wandb.Api().artifact()` to `use_artifact`"
)
if self._backend and self._backend.interface:
self._backend.interface.publish_use_artifact(artifact)
return artifact
@_log_to_run
@_raise_if_finished
@_attach
def log_artifact(
self,
artifact_or_path: Artifact | StrPath,
name: str | None = None,
type: str | None = None,
aliases: list[str] | None = None,
tags: list[str] | None = None,
) -> Artifact:
"""Declare an artifact as an output of a run.
Args:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
tags: (list, optional) Tags to apply to this artifact, if any.
Returns:
An `Artifact` object.
"""
return self._log_artifact(
artifact_or_path,
name=name,
type=type,
aliases=aliases,
tags=tags,
)
@_log_to_run
@_raise_if_finished
@_attach
def upsert_artifact(
self,
artifact_or_path: Artifact | str,
name: str | None = None,
type: str | None = None,
aliases: list[str] | None = None,
distributed_id: str | None = None,
) -> Artifact:
"""Declare (or append to) a non-finalized artifact as output of a run.
Note that you must call run.finish_artifact() to finalize the artifact.
This is useful when distributed jobs need to all contribute to the same artifact.
Args:
artifact_or_path: A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
name: An artifact name. May be prefixed with "entity/project". Defaults
to the basename of the path prepended with the current run ID
if not specified. Valid names can be in the following forms:
- name:version
- name:alias
- digest
type: The type of artifact to log. Common examples include `dataset`, `model`.
aliases: Aliases to apply to this artifact, defaults to `["latest"]`.
distributed_id: Unique string that all distributed jobs share. If None,
defaults to the run's group name.
Returns:
An `Artifact` object.
"""
if self._settings.run_group is None and distributed_id is None:
raise TypeError(
"Cannot upsert artifact unless run is in a group or distributed_id is provided"
)
if distributed_id is None:
distributed_id = self._settings.run_group or ""
return self._log_artifact(
artifact_or_path,
name=name,
type=type,
aliases=aliases,
distributed_id=distributed_id,
finalize=False,
)
@_log_to_run
@_raise_if_finished
@_attach
def finish_artifact(
self,
artifact_or_path: Artifact | str,
name: str | None = None,
type: str | None = None,
aliases: list[str] | None = None,
distributed_id: str | None = None,
) -> Artifact:
"""Finishes a non-finalized artifact as output of a run.
Subsequent "upserts" with the same distributed ID will result in a new version.
Args:
artifact_or_path: A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: The type of artifact to log, examples include `dataset`, `model`
aliases: Aliases to apply to this artifact,
defaults to `["latest"]`
distributed_id: Unique string that all distributed jobs share. If None,
defaults to the run's group name.
Returns:
An `Artifact` object.
"""
if self._settings.run_group is None and distributed_id is None:
raise TypeError(
"Cannot finish artifact unless run is in a group or distributed_id is provided"
)
if distributed_id is None:
distributed_id = self._settings.run_group or ""
return self._log_artifact(
artifact_or_path,
name,
type,
aliases,
distributed_id=distributed_id,
finalize=True,
)
def _log_artifact(
self,
artifact_or_path: Artifact | StrPath,
name: str | None = None,
type: str | None = None,
aliases: list[str] | None = None,
tags: list[str] | None = None,
distributed_id: str | None = None,
finalize: bool = True,
is_user_created: bool = False,
use_after_commit: bool = False,
) -> Artifact:
from .artifacts._validators import validate_aliases, validate_tags
if not finalize and distributed_id is None:
raise TypeError("Must provide distributed_id if artifact is not finalize")
if aliases is not None:
aliases = validate_aliases(aliases)
# Check if artifact tags are supported
if tags is not None:
tags = validate_tags(tags)
artifact, aliases = self._prepare_artifact(
artifact_or_path, name, type, aliases
)
artifact.metadata = {**artifact.metadata} # triggers validation
artifact.distributed_id = distributed_id
self._assert_can_log_artifact(artifact)
if self._backend and self._backend.interface:
if not self._settings._offline:
handle = self._backend.interface.deliver_artifact(
self,
artifact,
aliases,
tags,
self.step,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
artifact._set_save_handle(handle, self._public_api().client)
else:
self._backend.interface.publish_artifact(
self,
artifact,
aliases,
tags,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
elif self._internal_run_interface:
self._internal_run_interface.publish_artifact(
self,
artifact,
aliases,
tags,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
return artifact
def _public_api(self, overrides: dict[str, str] | None = None) -> PublicApi:
if self._cached_public_api is not None:
return self._cached_public_api
# NOTE: PublicApi is only for type checking, still need to import
from wandb.apis import public
overrides = {"run": self._settings.run_id} # type: ignore
if not self._settings._offline:
overrides["entity"] = self._settings.entity or ""
overrides["project"] = self._settings.project or ""
self._cached_public_api = public.Api(overrides, api_key=self._settings.api_key)
return self._cached_public_api
# TODO(jhr): annotate this
def _assert_can_log_artifact(self, artifact) -> None: # type: ignore
import requests
from wandb.sdk.artifacts.artifact import Artifact
if self._settings._offline:
return
try:
public_api = self._public_api()
entity = public_api.settings["entity"]
project = public_api.settings["project"]
expected_type = Artifact._expected_type(
entity, project, artifact.name, public_api.client
)
except requests.exceptions.RequestException:
# Just return early if there is a network error. This is
# ok, as this function is intended to help catch an invalid
# type early, but not a hard requirement for valid operation.
return
if expected_type is not None and artifact.type != expected_type:
raise ValueError(
f"Artifact {artifact.name} already exists with type '{expected_type}'; "
f"cannot create another with type '{artifact.type}'"
)
if entity and artifact._source_entity and entity != artifact._source_entity:
raise ValueError(
f"Artifact {artifact.name} is owned by entity "
f"'{artifact._source_entity}'; it can't be moved to '{entity}'"
)
if project and artifact._source_project and project != artifact._source_project:
raise ValueError(
f"Artifact {artifact.name} exists in project "
f"'{artifact._source_project}'; it can't be moved to '{project}'"
)
def _prepare_artifact(
self,
artifact_or_path: Artifact | StrPath,
name: str | None = None,
type: str | None = None,
aliases: list[str] | None = None,
) -> tuple[Artifact, list[str]]:
from wandb.sdk.artifacts.artifact import Artifact
if isinstance(artifact_or_path, (str, os.PathLike)):
name = (
name
or f"run-{self._settings.run_id}-{os.path.basename(artifact_or_path)}"
)
artifact = Artifact(name, type or "unspecified")
if os.path.isfile(artifact_or_path):
artifact.add_file(str(artifact_or_path))
elif os.path.isdir(artifact_or_path):
artifact.add_dir(str(artifact_or_path))
elif "://" in str(artifact_or_path):
artifact.add_reference(str(artifact_or_path))
else:
raise ValueError(
"path must be a file, directory or external"
"reference like s3://bucket/path"
)
else:
artifact = artifact_or_path
if not isinstance(artifact, Artifact):
raise TypeError(
"You must pass an instance of wandb.Artifact or a "
"valid file path to log_artifact"
)
artifact.finalize()
return artifact, _resolve_aliases(aliases)
@_log_to_run
@_raise_if_finished
@_attach
def log_model(
self,
path: StrPath,
name: str | None = None,
aliases: list[str] | None = None,
) -> None:
"""Logs a model artifact containing the contents inside the 'path' to a run and marks it as an output to this run.
The name of model artifact can only contain alphanumeric characters,
underscores, and hyphens.
Args:
path: (str) A path to the contents of this model,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
name: A name to assign to the model artifact that
the file contents will be added to. This will default to the
basename of the path prepended with the current run id if
not specified.
aliases: Aliases to apply to the created model artifact,
defaults to `["latest"]`
Raises:
ValueError: If name has invalid special characters.
Returns:
None
"""
self._log_artifact(
artifact_or_path=path, name=name, type="model", aliases=aliases
)
@_log_to_run
@_raise_if_finished
@_attach
def use_model(self, name: str) -> FilePathStr:
"""Download the files logged in a model artifact 'name'.
Args:
name: A model artifact name. 'name' must match the name of an existing logged
model artifact. May be prefixed with `entity/project/`. Valid names
can be in the following forms
- model_artifact_name:version
- model_artifact_name:alias
Returns:
path (str): Path to downloaded model artifact file(s).
Raises:
AssertionError: If model artifact 'name' is of a type that does
not contain the substring 'model'.
"""
if self._settings._offline:
# Downloading artifacts is not supported when offline.
raise RuntimeError("`use_model` not supported in offline mode.")
artifact = self.use_artifact(artifact_or_name=name)
if "model" not in str(artifact.type.lower()):
raise AssertionError(
"You can only use this method for 'model' artifacts."
" For an artifact to be a 'model' artifact, its type property"
" must contain the substring 'model'."
)
path = artifact.download()
# If returned directory contains only one file, return path to that file
dir_list = os.listdir(path)
if len(dir_list) == 1:
return FilePathStr(os.path.join(path, dir_list[0]))
return path
@_log_to_run
@_raise_if_finished
@_attach
def link_model(
self,
path: StrPath,
registered_model_name: str,
name: str | None = None,
aliases: list[str] | None = None,
) -> Artifact | None:
"""Log a model artifact version and link it to a registered model in the model registry.
Linked model versions are visible in the UI for the specified registered model.
This method will:
- Check if 'name' model artifact has been logged. If so, use the artifact version that matches the files
located at 'path' or log a new version. Otherwise log files under 'path' as a new model artifact, 'name'
of type 'model'.
- Check if registered model with name 'registered_model_name' exists in the 'model-registry' project.
If not, create a new registered model with name 'registered_model_name'.
- Link version of model artifact 'name' to registered model, 'registered_model_name'.
- Attach aliases from 'aliases' list to the newly linked model artifact version.
Args:
path: (str) A path to the contents of this model, can be in the
following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
registered_model_name: The name of the registered model that the
model is to be linked to. A registered model is a collection of
model versions linked to the model registry, typically
representing a team's specific ML Task. The entity that this
registered model belongs to will be derived from the run.
name: The name of the model artifact that files in 'path' will be
logged to. This will default to the basename of the path
prepended with the current run id if not specified.
aliases: Aliases that will only be applied on this linked artifact
inside the registered model. The alias "latest" will always be
applied to the latest version of an artifact that is linked.
Raises:
AssertionError: If registered_model_name is a path or
if model artifact 'name' is of a type that does not contain
the substring 'model'.
ValueError: If name has invalid special characters.
Returns:
The linked artifact if linking was successful, otherwise `None`.
"""
name_parts = registered_model_name.split("/")
if len(name_parts) != 1:
raise AssertionError(
"Please provide only the name of the registered model."
" Do not append the entity or project name."
)
project = "model-registry"
target_path = self.entity + "/" + project + "/" + registered_model_name
public_api = self._public_api()
try:
artifact = public_api._artifact(name=f"{name}:latest")
if "model" not in str(artifact.type.lower()):
raise AssertionError(
"You can only use this method for 'model' artifacts."
" For an artifact to be a 'model' artifact, its type"
" property must contain the substring 'model'."
)
artifact = self._log_artifact(
artifact_or_path=path, name=name, type=artifact.type
)
except (ValueError, CommError):
artifact = self._log_artifact(
artifact_or_path=path, name=name, type="model"
)
return self.link_artifact(
artifact=artifact, target_path=target_path, aliases=aliases
)
@_log_to_run
@_raise_if_finished
@_attach
def alert(
self,
title: str,
text: str,
level: str | AlertLevel | None = None,
wait_duration: int | float | timedelta | None = None,
) -> None:
"""Create an alert with the given title and text.
Args:
title: The title of the alert, must be less than 64 characters long.
text: The text body of the alert.
level: The alert level to use, either: `INFO`, `WARN`, or `ERROR`.
wait_duration: The time to wait (in seconds) before sending another
alert with this title.
"""
level = level or AlertLevel.INFO
level_str: str = level.value if isinstance(level, AlertLevel) else level
if level_str not in {lev.value for lev in AlertLevel}:
raise ValueError("level must be one of 'INFO', 'WARN', or 'ERROR'")
wait_duration = wait_duration or timedelta(minutes=1)
if isinstance(wait_duration, int) or isinstance(wait_duration, float):
wait_duration = timedelta(seconds=wait_duration)
elif not callable(getattr(wait_duration, "total_seconds", None)):
raise TypeError(
"wait_duration must be an int, float, or datetime.timedelta"
)
wait_duration = int(wait_duration.total_seconds() * 1000)
if self._backend and self._backend.interface:
self._backend.interface.publish_alert(title, text, level_str, wait_duration)
def __enter__(self) -> Run:
return self
def __exit__(
self,
exc_type: type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> bool:
exception_raised = exc_type is not None
if exception_raised:
traceback.print_exception(exc_type, exc_val, exc_tb)
exit_code = 1 if exception_raised else 0
self._finish(exit_code=exit_code)
return not exception_raised
@_log_to_run
@_raise_if_finished
@_attach
def mark_preempting(self) -> None:
"""Mark this run as preempting.
Also tells the internal process to immediately report this to server.
"""
if self._backend and self._backend.interface:
self._backend.interface.publish_preempting()
@property
@_log_to_run
@_raise_if_finished
@_attach
def _system_metrics(self) -> dict[str, list[tuple[datetime, float]]]:
"""Returns a dictionary of system metrics.
Returns:
A dictionary of system metrics.
"""
from wandb.proto import wandb_internal_pb2
def pb_to_dict(
system_metrics_pb: wandb_internal_pb2.GetSystemMetricsResponse,
) -> dict[str, list[tuple[datetime, float]]]:
res = {}
for metric, records in system_metrics_pb.system_metrics.items():
measurements = []
for record in records.record:
# Convert timestamp to datetime
dt = datetime.fromtimestamp(
record.timestamp.seconds, tz=timezone.utc
)
dt = dt.replace(microsecond=record.timestamp.nanos // 1000)
measurements.append((dt, record.value))
res[metric] = measurements
return res
if not self._backend or not self._backend.interface:
return {}
handle = self._backend.interface.deliver_get_system_metrics()
try:
result = handle.wait_or(timeout=1)
except TimeoutError:
return {}
else:
try:
response = result.response.get_system_metrics_response
return pb_to_dict(response) if response else {}
except Exception:
logger.exception("Error getting system metrics.")
return {}
# ------------------------------------------------------------------------------
# HEADER
# ------------------------------------------------------------------------------
def _header(self) -> None:
self._header_wandb_version_info()
self._header_sync_info()
self._header_run_info()
def _header_wandb_version_info(self) -> None:
if self._settings.quiet or self._settings.silent:
return
# TODO: add this to a higher verbosity level
self._printer.display(f"Tracking run with wandb version {wandb.__version__}")
def _header_sync_info(self) -> None:
sync_location_msg = f"Run data is saved locally in {self._printer.files(self._settings.sync_dir)}"
if self._settings._offline:
offline_warning = (
f"W&B syncing is set to {self._printer.code('`offline`')} "
f"in this directory. Run {self._printer.code('`wandb online`')} "
f"or set {self._printer.code('WANDB_MODE=online')} "
"to enable cloud syncing."
)
self._printer.display([offline_warning, sync_location_msg])
else:
messages = [sync_location_msg]
if not self._printer.supports_html:
disable_sync_msg = (
f"Run {self._printer.code('`wandb offline`')} to turn off syncing."
)
messages.append(disable_sync_msg)
if not self._settings.quiet and not self._settings.silent:
self._printer.display(messages)
def _header_run_info(self) -> None:
settings, printer = self._settings, self._printer
if settings._offline or settings.silent:
return
run_url = settings.run_url
project_url = settings.project_url
sweep_url = settings.sweep_url
run_state_str = (
"Resuming run"
if settings.resumed or settings.resume_from
else "Syncing run"
)
run_name = settings.run_name
if not run_name:
return
if printer.supports_html:
import wandb.jupyter
if not wandb.jupyter.display_if_magic_is_used(self):
run_line = f"<strong>{printer.link(run_url, run_name)}</strong>"
project_line, sweep_line = "", ""
if not settings.quiet:
doc_html = printer.link(url_registry.url("developer-guide"), "docs")
project_html = printer.link(project_url, "Weights & Biases")
project_line = f"to {project_html} ({doc_html})"
if sweep_url:
sweep_line = f"Sweep page: {printer.link(sweep_url, sweep_url)}"
printer.display(
[f"{run_state_str} {run_line} {project_line}", sweep_line],
)
elif run_name:
printer.display(f"{run_state_str} {printer.name(run_name)}")
if not settings.quiet:
# TODO: add verbosity levels and add this to higher levels
printer.display(
f"{printer.emoji('star')} View project at {printer.link(project_url)}"
)
if sweep_url:
printer.display(
f"{printer.emoji('broom')} View sweep at {printer.link(sweep_url)}"
)
printer.display(
f"{printer.emoji('rocket')} View run at {printer.link(run_url)}",
)
# ------------------------------------------------------------------------------
# FOOTER
# ------------------------------------------------------------------------------
# Note: All the footer methods are static methods since we want to share the printing logic
# with the service execution path that doesn't have access to the run instance
@staticmethod
def _footer(
sampled_history: SampledHistoryResponse | None = None,
final_summary: GetSummaryResponse | None = None,
poll_exit_response: PollExitResponse | None = None,
internal_messages_response: InternalMessagesResponse | None = None,
*,
settings: Settings,
printer: printer.Printer,
) -> None:
Run._footer_history_summary_info(
history=sampled_history,
summary=final_summary,
settings=settings,
printer=printer,
)
Run._footer_sync_info(
poll_exit_response=poll_exit_response,
settings=settings,
printer=printer,
)
Run._footer_log_dir_info(settings=settings, printer=printer)
Run._footer_internal_messages(
internal_messages_response=internal_messages_response,
settings=settings,
printer=printer,
)
@staticmethod
def _footer_sync_info(
poll_exit_response: PollExitResponse | None = None,
*,
settings: Settings,
printer: printer.Printer,
) -> None:
if settings.silent:
return
if settings._offline:
if not settings.quiet:
printer.display(
[
"You can sync this run to the cloud by running:",
printer.code(f"wandb sync {settings.sync_dir}"),
],
)
return
info = []
if settings.run_name and settings.run_url:
info.append(
f"{printer.emoji('rocket')} View run {printer.name(settings.run_name)} at: {printer.link(settings.run_url)}"
)
if settings.project_url:
info.append(
f"{printer.emoji('star')} View project at: {printer.link(settings.project_url)}"
)
if poll_exit_response and poll_exit_response.file_counts:
logger.info("logging synced files")
file_counts = poll_exit_response.file_counts
info.append(
f"Synced {file_counts.wandb_count} W&B file(s), {file_counts.media_count} media file(s), "
f"{file_counts.artifact_count} artifact file(s) and {file_counts.other_count} other file(s)",
)
printer.display(info)
@staticmethod
def _footer_log_dir_info(
*,
settings: Settings,
printer: printer.Printer,
) -> None:
if settings.quiet or settings.silent:
return
log_dir = settings.log_user or settings.log_internal
if log_dir:
log_dir = os.path.dirname(log_dir.replace(os.getcwd(), "."))
printer.display(
f"Find logs at: {printer.files(log_dir)}",
)
@staticmethod
def _footer_history_summary_info(
history: SampledHistoryResponse | None = None,
summary: GetSummaryResponse | None = None,
*,
settings: Settings,
printer: printer.Printer,
) -> None:
if settings.quiet or settings.silent:
return
panel: list[str] = []
if history and (
history_grid := Run._footer_history(history, printer, settings)
):
panel.append(history_grid)
if summary and (
summary_grid := Run._footer_summary(summary, printer, settings)
):
panel.append(summary_grid)
if panel:
printer.display(printer.panel(panel))
@staticmethod
def _footer_history(
history: SampledHistoryResponse,
printer: printer.Printer,
settings: Settings,
) -> str | None:
"""Returns the run history formatted for printing to the console."""
sorted_history_items = sorted(
(item for item in history.item if not item.key.startswith("_")),
key=lambda item: item.key,
)
history_rows: list[list[str]] = []
for item in sorted_history_items:
if len(history_rows) >= settings.max_end_of_run_history_metrics:
break
values = wandb.util.downsample(
item.values_float or item.values_int,
40,
)
if sparkline := printer.sparklines(values):
history_rows.append([item.key, sparkline])
if not history_rows:
return None
if len(history_rows) < len(sorted_history_items):
remaining = len(sorted_history_items) - len(history_rows)
history_rows.append([f"+{remaining:,d}", "..."])
return printer.grid(history_rows, "Run history:")
@staticmethod
def _footer_summary(
summary: GetSummaryResponse,
printer: printer.Printer,
settings: Settings,
) -> str | None:
"""Returns the run summary formatted for printing to the console."""
sorted_summary_items = sorted(
(
item
for item in summary.item
if not item.key.startswith("_") and not item.nested_key
),
key=lambda item: item.key,
)
summary_rows: list[list[str]] = []
skipped = 0
for item in sorted_summary_items:
if len(summary_rows) >= settings.max_end_of_run_summary_metrics:
break
try:
value = json.loads(item.value_json)
except json.JSONDecodeError:
logger.exception(f"Error decoding summary[{item.key!r}]")
skipped += 1
continue
if isinstance(value, str):
value = value[:20] + "..." * (len(value) >= 20)
summary_rows.append([item.key, value])
elif isinstance(value, numbers.Number):
value = round(value, 5) if isinstance(value, float) else value
summary_rows.append([item.key, str(value)])
else:
skipped += 1
if not summary_rows:
return None
if len(summary_rows) < len(sorted_summary_items) - skipped:
remaining = len(sorted_summary_items) - len(summary_rows) - skipped
summary_rows.append([f"+{remaining:,d}", "..."])
return printer.grid(summary_rows, "Run summary:")
@staticmethod
def _footer_internal_messages(
internal_messages_response: InternalMessagesResponse | None = None,
*,
settings: Settings,
printer: printer.Printer,
) -> None:
if settings.quiet or settings.silent:
return
if not internal_messages_response:
return
for message in internal_messages_response.messages.warning:
printer.display(message, level="warn")
# We define this outside of the run context to support restoring before init
def restore(
name: str,
run_path: str | None = None,
replace: bool = False,
root: str | None = None,
) -> None | TextIO:
"""Download the specified file from cloud storage.
File is placed into the current directory or run directory.
By default, will only download the file if it doesn't already exist.
Args:
name: The name of the file.
run_path: Optional path to a run to pull files from, i.e. `username/project_name/run_id`
if wandb.init has not been called, this is required.
replace: Whether to download the file even if it already exists locally
root: The directory to download the file to. Defaults to the current
directory or the run directory if wandb.init was called.
Returns:
None if it can't find the file, otherwise a file object open for reading.
Raises:
CommError: If W&B can't connect to the W&B backend.
ValueError: If the file is not found or can't find run_path.
"""
from wandb.apis import public
is_disabled = wandb.run is not None and wandb.run.disabled
run = None if is_disabled else wandb.run
if run_path is None:
if run is not None:
run_path = run.path
else:
raise ValueError(
"run_path required when calling wandb.restore before wandb.init"
)
if root is None:
if run is not None:
root = run.dir
api = public.Api()
api_run = api.run(run_path)
if root is None:
root = os.getcwd()
path = os.path.join(root, name)
if os.path.exists(path) and replace is False:
return open(path)
if is_disabled:
return None
files = api_run.files([name])
if len(files) == 0:
return None
# if the file does not exist, the file has an md5 of 0
if files[0].md5 == "0":
raise ValueError(f"File {name} not found in {run_path or root}.")
return files[0].download(root=root, replace=True)
# propagate our doc string to the runs restore method
try:
Run.restore.__doc__ = restore.__doc__
except AttributeError:
pass
def finish(
exit_code: int | None = None,
quiet: bool | None = None,
) -> None:
"""Finish a run and upload any remaining data.
Marks the completion of a W&B run and ensures all data is synced to the server.
The run's final state is determined by its exit conditions and sync status.
Run States:
- Running: Active run that is logging data and/or sending heartbeats.
- Crashed: Run that stopped sending heartbeats unexpectedly.
- Finished: Run completed successfully (`exit_code=0`) with all data synced.
- Failed: Run completed with errors (`exit_code!=0`).
Args:
exit_code: Integer indicating the run's exit status. Use 0 for success,
any other value marks the run as failed.
quiet: Deprecated. Configure logging verbosity using `wandb.Settings(quiet=...)`.
"""
if wandb.run:
wandb.run.finish(exit_code=exit_code, quiet=quiet)
|
Run
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/runnables/graph.py
|
{
"start": 3372,
"end": 3775
}
|
class ____(Enum):
"""Enum for different curve styles supported by Mermaid."""
BASIS = "basis"
BUMP_X = "bumpX"
BUMP_Y = "bumpY"
CARDINAL = "cardinal"
CATMULL_ROM = "catmullRom"
LINEAR = "linear"
MONOTONE_X = "monotoneX"
MONOTONE_Y = "monotoneY"
NATURAL = "natural"
STEP = "step"
STEP_AFTER = "stepAfter"
STEP_BEFORE = "stepBefore"
@dataclass
|
CurveStyle
|
python
|
huggingface__transformers
|
src/transformers/cache_utils.py
|
{
"start": 24830,
"end": 26735
}
|
class ____(QuantizedLayer):
def __init__(
self,
nbits: int = 4,
axis_key: int = 0,
axis_value: int = 0,
q_group_size: int = 64,
residual_length: int = 128,
):
super().__init__(
nbits=nbits,
axis_key=axis_key,
axis_value=axis_value,
q_group_size=q_group_size,
residual_length=residual_length,
)
# We need to import quanto here to avoid circular imports due to optimum/quanto/models/transformers_models.py
if is_quanto_greater("0.2.5", accept_dev=True):
from optimum.quanto import MaxOptimizer, qint2, qint4
else:
raise ImportError(
"You need optimum-quanto package version to be greater or equal than 0.2.5 to use `QuantoQuantizedCache`. "
)
if self.nbits not in [2, 4]:
raise ValueError(f"`nbits` for `quanto` backend has to be one of [`2`, `4`] but got {self.nbits}")
if self.axis_key not in [0, -1]:
raise ValueError(f"`axis_key` for `quanto` backend has to be one of [`0`, `-1`] but got {self.axis_key}")
if self.axis_value not in [0, -1]:
raise ValueError(
f"`axis_value` for `quanto` backend has to be one of [`0`, `-1`] but got {self.axis_value}"
)
self.qtype = qint4 if self.nbits == 4 else qint2
self.optimizer = MaxOptimizer() # hardcode as it's the only one for per-channel quantization
def _quantize(self, tensor, axis):
from optimum.quanto import quantize_weight
scale, zeropoint = self.optimizer(tensor, self.qtype, axis, self.q_group_size)
qtensor = quantize_weight(tensor, self.qtype, axis, scale, zeropoint, self.q_group_size)
return qtensor
def _dequantize(self, qtensor):
return qtensor.dequantize()
|
QuantoQuantizedLayer
|
python
|
django__django
|
tests/auth_tests/test_management.py
|
{
"start": 53213,
"end": 59323
}
|
class ____(TransactionTestCase):
available_apps = [
"django.contrib.contenttypes",
"django.contrib.auth",
"auth_tests",
]
databases = {"default", "other"}
def setUp(self):
app_config = apps.get_app_config("auth_tests")
models.signals.post_migrate.connect(
self.assertOperationsInjected, sender=app_config
)
self.addCleanup(
models.signals.post_migrate.disconnect,
self.assertOperationsInjected,
sender=app_config,
)
def assertOperationsInjected(self, plan, **kwargs):
for migration, _backward in plan:
operations = iter(migration.operations)
for operation in operations:
if isinstance(operation, migrations.RenameModel):
next_operation = next(operations)
self.assertIsInstance(next_operation, RenamePermission)
self.assertEqual(next_operation.app_label, migration.app_label)
self.assertEqual(next_operation.old_model, operation.old_name)
self.assertEqual(next_operation.new_model, operation.new_name)
def test_permission_rename(self):
ct = ContentType.objects.create(app_label="auth_tests", model="oldmodel")
actions = ["add", "change", "delete", "view"]
for action in actions:
Permission.objects.create(
codename=f"{action}_oldmodel",
name=f"Can {action} old model",
content_type=ct,
)
call_command("migrate", "auth_tests", verbosity=0)
for action in actions:
self.assertFalse(
Permission.objects.filter(codename=f"{action}_oldmodel").exists()
)
self.assertTrue(
Permission.objects.filter(codename=f"{action}_newmodel").exists()
)
call_command(
"migrate",
"auth_tests",
"zero",
database="default",
interactive=False,
verbosity=0,
)
for action in actions:
self.assertTrue(
Permission.objects.filter(codename=f"{action}_oldmodel").exists()
)
self.assertFalse(
Permission.objects.filter(codename=f"{action}_newmodel").exists()
)
def test_permission_rename_other_db(self):
ct = ContentType.objects.using("default").create(
app_label="auth_tests", model="oldmodel"
)
permission = Permission.objects.using("default").create(
codename="add_oldmodel",
name="Can add old model",
content_type=ct,
)
# RenamePermission respects the database.
call_command("migrate", "auth_tests", verbosity=0, database="other")
permission.refresh_from_db()
self.assertEqual(permission.codename, "add_oldmodel")
self.assertFalse(
Permission.objects.using("other").filter(codename="add_oldmodel").exists()
)
self.assertTrue(
Permission.objects.using("other").filter(codename="add_newmodel").exists()
)
@mock.patch(
"django.db.router.allow_migrate_model",
return_value=False,
)
def test_rename_skipped_if_router_disallows(self, _):
ct = ContentType.objects.create(app_label="auth_tests", model="oldmodel")
Permission.objects.create(
codename="change_oldmodel",
name="Can change old model",
content_type=ct,
)
# The rename operation should not be there when disallowed by router.
app_config = apps.get_app_config("auth_tests")
models.signals.post_migrate.disconnect(
self.assertOperationsInjected, sender=app_config
)
call_command(
"migrate",
"auth_tests",
database="default",
interactive=False,
verbosity=0,
)
self.assertTrue(Permission.objects.filter(codename="change_oldmodel").exists())
self.assertFalse(Permission.objects.filter(codename="change_newmodel").exists())
call_command(
"migrate",
"auth_tests",
"zero",
database="default",
interactive=False,
verbosity=0,
)
def test_rename_backward_does_nothing_if_no_permissions(self):
Permission.objects.filter(content_type__app_label="auth_tests").delete()
call_command(
"migrate",
"auth_tests",
"zero",
database="default",
interactive=False,
verbosity=0,
)
self.assertFalse(
Permission.objects.filter(
codename__in=["change_oldmodel", "change_newmodel"]
).exists()
)
def test_rename_permission_conflict(self):
ct = ContentType.objects.create(app_label="auth_tests", model="oldmodel")
Permission.objects.create(
codename="change_newmodel",
name="Can change new model",
content_type=ct,
)
Permission.objects.create(
codename="change_oldmodel",
name="Can change old model",
content_type=ct,
)
call_command(
"migrate",
"auth_tests",
database="default",
interactive=False,
verbosity=0,
)
self.assertTrue(
Permission.objects.filter(
codename="change_oldmodel",
name="Can change old model",
).exists()
)
self.assertEqual(
Permission.objects.filter(
codename="change_newmodel",
name="Can change new model",
).count(),
1,
)
call_command(
"migrate",
"auth_tests",
"zero",
database="default",
interactive=False,
verbosity=0,
)
|
PermissionRenameOperationsTests
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_utils.py
|
{
"start": 17800,
"end": 20366
}
|
class ____(_fixtures.FixtureTest):
run_inserts = None
def _cases():
return testing.combinations(
(orm_util,), (Session,), argnames="ormutil"
)
@_cases()
def test_identity_key_1(self, ormutil):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
key = ormutil.identity_key(User, [1])
eq_(key, (User, (1,), None))
key = ormutil.identity_key(User, ident=[1])
eq_(key, (User, (1,), None))
@_cases()
def test_identity_key_scalar(self, ormutil):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
key = ormutil.identity_key(User, 1)
eq_(key, (User, (1,), None))
key = ormutil.identity_key(User, ident=1)
eq_(key, (User, (1,), None))
@_cases()
def test_identity_key_2(self, ormutil):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
s = fixture_session()
u = User(name="u1")
s.add(u)
s.flush()
key = ormutil.identity_key(instance=u)
eq_(key, (User, (u.id,), None))
@_cases()
@testing.combinations("dict", "row", "mapping", argnames="rowtype")
def test_identity_key_3(self, ormutil, rowtype):
"""test a real Row works with identity_key.
this was broken w/ 1.4 future mode as we are assuming a mapping
here. to prevent regressions, identity_key now accepts any of
dict, RowMapping, Row for the "row".
found_during_type_annotation
"""
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
if rowtype == "dict":
row = {users.c.id: 1, users.c.name: "Frank"}
elif rowtype in ("mapping", "row"):
row = result.result_tuple([users.c.id, users.c.name])((1, "Frank"))
if rowtype == "mapping":
row = row._mapping
key = ormutil.identity_key(User, row=row)
eq_(key, (User, (1,), None))
def test_identity_key_token(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
key = orm_util.identity_key(User, [1], identity_token="token")
eq_(key, (User, (1,), "token"))
key = orm_util.identity_key(User, ident=[1], identity_token="token")
eq_(key, (User, (1,), "token"))
|
IdentityKeyTest
|
python
|
spack__spack
|
lib/spack/spack/util/windows_registry.py
|
{
"start": 5342,
"end": 6221
}
|
class ____(RegistryKey):
"""Subclass of RegistryKey to represent the prebaked, always open registry HKEY constants"""
def __init__(self, hkey_constant):
hkey_name = hkey_constant
# This class is instantiated at module import time
# on non Windows platforms, winreg would not have been
# imported. For this reason we can't reference winreg yet,
# so handle is none for now to avoid invalid references to a module.
# _handle provides a workaround to prevent null references to self.handle
# when coupled with the handle property.
super(_HKEY_CONSTANT, self).__init__(hkey_name, None)
def _get_hkey(self, key):
return getattr(winreg, key)
@property
def hkey(self):
if not self._handle:
self._handle = self._get_hkey(self.path)
return self._handle
|
_HKEY_CONSTANT
|
python
|
pytorch__pytorch
|
torch/distributed/elastic/agent/server/api.py
|
{
"start": 5112,
"end": 7684
}
|
class ____:
"""A worker instance.
Contrast this with ``WorkerSpec`` that represents the specifications of a
worker. A ``Worker`` is created from a ``WorkerSpec``. A ``Worker`` is to
a ``WorkerSpec`` as an object is to a class.
The ``id`` of the worker is interpreted
by the specific implementation of ``ElasticAgent``. For a local
agent, it could be the ``pid (int)`` of the worker, for a remote
agent it could be encoded as ``host:port (string)``.
Args:
id (Any): uniquely identifies a worker (interpreted by the agent)
local_rank (int): local rank of the worker
global_rank (int): global rank of the worker
role_rank (int): rank of the worker across all workers that have the same role
world_size (int): number of workers (globally)
role_world_size (int): number of workers that have the same role
"""
__slots__ = [
"id",
"local_rank",
"global_rank",
"role_rank",
"world_size",
"role_world_size",
]
def __init__(
self,
local_rank: int,
global_rank: int = -1,
role_rank: int = -1,
world_size: int = -1,
role_world_size: int = -1,
):
# unique identifier for this worker
self.id: Any = None
# rank of the worker among workers with the same role being monitored
# by the same ``agent`` instance.
self.local_rank: int = local_rank
# rank of the worker among all the workers across all roles
# across all ``agent`` instances.
# Global rank is not stable between re-rendezvous.
self.global_rank: int = global_rank
# rank of the worker among all the workers with the same role
# across all ``agent`` instances.
# Role rank is not stable between re-rendezvous.
self.role_rank: int = role_rank
# total number of workers (globally). Due to elasticity
# the world size may change between re-rendezvous.
self.world_size: int = world_size
# total number of workers that share the same role. Due to elasticity
# the role world size may change between re-rendezvous.
self.role_world_size: int = role_world_size
def __str__(self):
return (
f"local_rank={self.local_rank},global_rank={self.global_rank}"
f",role_rank={self.role_rank},world_size={self.world_size}"
f",role_world_size={self.role_world_size}"
)
def __repr__(self):
return str(self)
|
Worker
|
python
|
pypa__warehouse
|
tests/functional/manage/test_views.py
|
{
"start": 701,
"end": 4419
}
|
class ____:
def test_save_account(self, pyramid_services, user_service, db_request):
breach_service = pretend.stub()
organization_service = pretend.stub()
pyramid_services.register_service(user_service, IUserService, None)
pyramid_services.register_service(
breach_service, IPasswordBreachedService, None
)
pyramid_services.register_service(
organization_service, IOrganizationService, None
)
user = UserFactory.create(name="old name")
EmailFactory.create(primary=True, verified=True, public=True, user=user)
db_request.user = user
db_request.method = "POST"
db_request.path = "/manage/accounts/"
db_request.POST = MultiDict({"name": "new name", "public_email": ""})
views.ManageVerifiedAccountViews(db_request).save_account()
user = user_service.get_user(user.id)
assert user.name == "new name"
assert user.public_email is None
def test_changing_password_succeeds(self, webtest, socket_enabled):
"""A user can log in, and change their password."""
# create a User
user = UserFactory.create(
with_verified_primary_email=True,
with_terms_of_service_agreement=True,
clear_pwd="password",
)
UserUniqueLoginFactory.create(
user=user, ip_address=REMOTE_ADDR, status=UniqueLoginStatus.CONFIRMED
)
# visit login page
login_page = webtest.get("/account/login/", status=HTTPStatus.OK)
# Fill & submit the login form
login_form = login_page.forms["login-form"]
anonymous_csrf_token = login_form["csrf_token"].value
login_form["username"] = user.username
login_form["password"] = "password"
login_form["csrf_token"] = anonymous_csrf_token
two_factor_page = login_form.submit().follow(status=HTTPStatus.OK)
two_factor_form = two_factor_page.forms["totp-auth-form"]
two_factor_form["csrf_token"] = anonymous_csrf_token
# Generate the correct TOTP value from the known secret
two_factor_form["totp_value"] = (
_get_totp(user.totp_secret).generate(time.time()).decode()
)
logged_in = two_factor_form.submit().follow(status=HTTPStatus.OK)
assert logged_in.html.find(
"title", string="Warehouse · The Python Package Index"
)
# Now visit the change password page
change_password_page = logged_in.goto("/manage/account/", status=HTTPStatus.OK)
# Ensure that the CSRF token changes once logged in and a session is established
logged_in_csrf_token = change_password_page.html.find(
"input", {"name": "csrf_token"}
)["value"]
assert anonymous_csrf_token != logged_in_csrf_token
# Fill & submit the change password form
new_password = faker.Faker().password() # a secure-enough password for testing
change_password_form = change_password_page.forms["change-password-form"]
change_password_form["csrf_token"] = logged_in_csrf_token
change_password_form["password"] = "password"
change_password_form["new_password"] = new_password
change_password_form["password_confirm"] = new_password
change_password_form.submit().follow(status=HTTPStatus.OK)
# Request the JavaScript-enabled flash messages directly to get the message
resp = webtest.get("/_includes/unauthed/flash-messages/", status=HTTPStatus.OK)
success_message = resp.html.find("span", {"class": "notification-bar__message"})
assert success_message.text == "Password updated"
|
TestManageAccount
|
python
|
Delgan__loguru
|
tests/test_coroutine_sink.py
|
{
"start": 289,
"end": 15759
}
|
class ____:
async def __call__(self, msg):
await asyncio.sleep(0.01)
print(msg, end="")
def test_coroutine_function(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
logger.add(async_writer, format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_async_callable_sink(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
logger.add(AsyncWriter(), format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_concurrent_execution(capsys):
async def task(i):
logger.debug("=> {}", i)
async def main():
tasks = [task(i) for i in range(10)]
await asyncio.gather(*tasks)
await logger.complete()
logger.add(async_writer, format="{message}")
asyncio.run(main())
out, err = capsys.readouterr()
assert err == ""
assert sorted(out.splitlines()) == sorted("=> %d" % i for i in range(10))
def test_recursive_coroutine(capsys):
async def task(i):
if i == 0:
await logger.complete()
return
logger.info("{}!", i)
await task(i - 1)
logger.add(async_writer, format="{message}")
asyncio.run(task(9))
out, err = capsys.readouterr()
assert err == ""
assert sorted(out.splitlines()) == sorted("%d!" % i for i in range(1, 10))
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_using_another_event_loop(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
with new_event_loop_context() as loop:
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_run_multiple_different_loops(capsys):
async def worker(i):
logger.debug("Message {}", i)
await logger.complete()
logger.add(async_writer, format="{message}", loop=None)
asyncio.run(worker(1))
asyncio.run(worker(2))
out, err = capsys.readouterr()
assert err == ""
assert out == "Message 1\nMessage 2\n"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_run_multiple_same_loop(capsys):
async def worker(i):
logger.debug("Message {}", i)
await logger.complete()
with new_event_loop_context() as loop:
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker(1))
loop.run_until_complete(worker(2))
out, err = capsys.readouterr()
assert err == ""
assert out == "Message 1\nMessage 2\n"
def test_using_sink_without_running_loop_not_none(capsys):
with new_event_loop_context() as loop:
logger.add(sys.stderr, format="=> {message}")
logger.add(async_writer, format="{message}", loop=loop)
logger.info("A message")
loop.run_until_complete(logger.complete())
out, err = capsys.readouterr()
assert err == "=> A message\n"
assert out == "A message\n"
def test_using_sink_without_running_loop_none(capsys):
with new_event_loop_context() as loop:
logger.add(sys.stderr, format="=> {message}")
logger.add(async_writer, format="{message}", loop=None)
logger.info("A message")
loop.run_until_complete(logger.complete())
out, err = capsys.readouterr()
assert err == "=> A message\n"
assert out == ""
@pytest.mark.skipif(sys.version_info >= (3, 16), reason="The 'set_event_loop' function is removed")
def test_global_loop_not_used(capsys):
with new_event_loop_context() as loop:
with set_event_loop_context(loop):
logger.add(sys.stderr, format="=> {message}")
logger.add(async_writer, format="{message}", loop=None)
logger.info("A message")
loop.run_until_complete(logger.complete())
out, err = capsys.readouterr()
assert err == "=> A message\n"
assert out == ""
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_complete_in_another_run(capsys):
async def worker_1():
logger.debug("A")
async def worker_2():
logger.debug("B")
await logger.complete()
with new_event_loop_context() as loop:
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker_1())
loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\nB\n"
assert err == ""
def test_tasks_cancelled_on_remove(capsys):
logger.add(async_writer, format="{message}", catch=False)
async def foo():
logger.info("A")
logger.info("B")
logger.info("C")
logger.remove()
await logger.complete()
asyncio.run(foo())
out, err = capsys.readouterr()
assert out == err == ""
def test_remove_without_tasks(capsys):
logger.add(async_writer, format="{message}", catch=False)
logger.remove()
async def foo():
logger.info("!")
await logger.complete()
asyncio.run(foo())
out, err = capsys.readouterr()
assert out == err == ""
def test_complete_without_tasks(capsys):
logger.add(async_writer, catch=False)
async def worker():
await logger.complete()
asyncio.run(worker())
out, err = capsys.readouterr()
assert out == err == ""
def test_complete_stream_noop(capsys):
logger.add(sys.stderr, format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
out, err = capsys.readouterr()
assert out == ""
assert err == "A\nB\nC\nD\n"
def test_complete_file_noop(tmp_path):
filepath = tmp_path / "test.log"
logger.add(filepath, format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
assert filepath.read_text() == "A\nB\nC\nD\n"
def test_complete_function_noop():
out = ""
def write(msg):
nonlocal out
out += msg
logger.add(write, format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
assert out == "A\nB\nC\nD\n"
def test_complete_standard_noop(capsys):
logger.add(logging.StreamHandler(sys.stderr), format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
out, err = capsys.readouterr()
assert out == ""
assert err == "A\nB\nC\nD\n"
def test_exception_in_coroutine_caught(capsys):
async def sink(msg):
raise Exception("Oh no")
async def main():
logger.add(sink, catch=True)
logger.info("Hello world")
await asyncio.sleep(0.1)
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Hello world.*\}", lines[1])
assert lines[-2] == "Exception: Oh no"
assert lines[-1] == "--- End of logging error ---"
def test_exception_in_coroutine_not_caught(capsys, caplog):
async def sink(msg):
raise ValueError("Oh no")
async def main():
logger.add(sink, catch=False)
logger.info("Hello world")
await asyncio.sleep(0.1)
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
assert out == err == ""
records = caplog.records
assert len(records) == 1
record = records[0]
message = record.getMessage()
assert "Logging error in Loguru Handler" not in message
assert "was never retrieved" not in message
exc_type, exc_value, _ = record.exc_info
assert exc_type is ValueError
assert str(exc_value) == "Oh no"
def test_exception_in_coroutine_during_complete_caught(capsys):
async def sink(msg):
await asyncio.sleep(0.1)
raise Exception("Oh no")
async def main():
logger.add(sink, catch=True)
logger.info("Hello world")
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Hello world.*\}", lines[1])
assert lines[-2] == "Exception: Oh no"
assert lines[-1] == "--- End of logging error ---"
def test_exception_in_coroutine_during_complete_not_caught(capsys, caplog):
async def sink(msg):
await asyncio.sleep(0.1)
raise ValueError("Oh no")
async def main():
logger.add(sink, catch=False)
logger.info("Hello world")
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
assert out == err == ""
records = caplog.records
assert len(records) == 1
record = records[0]
message = record.getMessage()
assert "Logging error in Loguru Handler" not in message
assert "was never retrieved" not in message
exc_type, exc_value, _ = record.exc_info
assert exc_type is ValueError
assert str(exc_value) == "Oh no"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_enqueue_coroutine_loop(capsys):
with new_event_loop_context() as loop:
logger.add(async_writer, enqueue=True, loop=loop, format="{message}", catch=False)
async def worker():
logger.info("A")
await logger.complete()
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_enqueue_coroutine_from_inside_coroutine_without_loop(capsys):
with new_event_loop_context() as loop:
async def worker():
logger.add(async_writer, enqueue=True, loop=None, format="{message}", catch=False)
logger.info("A")
await logger.complete()
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_custom_complete_function(capsys):
awaited = False
class Handler:
def write(self, message):
print(message, end="")
async def complete(self):
nonlocal awaited
awaited = True
async def worker():
logger.info("A")
await logger.complete()
logger.add(Handler(), catch=False, format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
assert awaited
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
@pytest.mark.parametrize("loop_is_none", [True, False])
def test_complete_from_another_loop(capsys, loop_is_none):
with new_event_loop_context() as main_loop, new_event_loop_context() as second_loop:
loop = None if loop_is_none else main_loop
logger.add(async_writer, loop=loop, format="{message}")
async def worker_1():
logger.info("A")
async def worker_2():
await logger.complete()
main_loop.run_until_complete(worker_1())
second_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == err == ""
main_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_complete_from_multiple_threads_loop_is_none(capsys):
async def worker(i):
for _ in range(100):
await asyncio.sleep(0)
logger.info("{:03}", i)
await logger.complete()
async def sink(msg):
print(msg, end="")
def worker_(i):
asyncio.run(worker(i))
logger.add(sink, catch=False, format="{message}")
threads = [threading.Thread(target=worker_, args=(i,)) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
out, err = capsys.readouterr()
assert sorted(out.splitlines()) == ["{:03}".format(i) for i in range(10) for _ in range(100)]
assert err == ""
def test_complete_from_multiple_threads_loop_is_not_none(capsys):
async def worker(i):
for _ in range(100):
await asyncio.sleep(0)
logger.info("{:03}", i)
await logger.complete()
async def sink(msg):
print(msg, end="")
def worker_(i):
asyncio.run(worker(i))
with new_event_loop_context() as loop:
logger.add(sink, catch=False, format="{message}", loop=loop)
threads = [threading.Thread(target=worker_, args=(i,)) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
async def complete():
await logger.complete()
loop.run_until_complete(complete())
out, err = capsys.readouterr()
assert sorted(out.splitlines()) == ["{:03}".format(i) for i in range(10) for _ in range(100)]
assert err == ""
def test_complete_and_sink_write_concurrency():
count = 1000
n = 0
async def sink(message):
nonlocal n
n += 1
async def some_task():
for _ in range(count):
logger.info("Message")
await asyncio.sleep(0)
async def another_task():
for _ in range(count):
await logger.complete()
await asyncio.sleep(0)
async def main():
logger.remove()
logger.add(sink, catch=False)
await asyncio.gather(some_task(), another_task())
asyncio.run(main())
assert n == count
def test_complete_and_contextualize_concurrency():
called = False
async def main():
logging_event = asyncio.Event()
contextualize_event = asyncio.Event()
async def sink(message):
nonlocal called
logging_event.set()
await contextualize_event.wait()
called = True
async def logging_task():
logger.info("Message")
await logger.complete()
async def contextualize_task():
with logger.contextualize():
contextualize_event.set()
await logging_event.wait()
logger.remove()
logger.add(sink, catch=False)
await asyncio.gather(logging_task(), contextualize_task())
asyncio.run(main())
assert called
async def async_subworker(logger_):
logger_.info("Child")
await logger_.complete()
async def async_mainworker(logger_):
logger_.info("Main")
await logger_.complete()
def subworker(logger_):
with new_event_loop_context() as loop:
loop.run_until_complete(async_subworker(logger_))
|
AsyncWriter
|
python
|
openai__openai-python
|
src/openai/_exceptions.py
|
{
"start": 3084,
"end": 3216
}
|
class ____(APIStatusError):
status_code: Literal[400] = 400 # pyright: ignore[reportIncompatibleVariableOverride]
|
BadRequestError
|
python
|
sympy__sympy
|
sympy/holonomic/recurrence.py
|
{
"start": 9036,
"end": 10344
}
|
class ____:
"""
A Holonomic Sequence is a type of sequence satisfying a linear homogeneous
recurrence relation with Polynomial coefficients. Alternatively, A sequence
is Holonomic if and only if its generating function is a Holonomic Function.
"""
def __init__(self, recurrence, u0=[]):
self.recurrence = recurrence
if not isinstance(u0, list):
self.u0 = [u0]
else:
self.u0 = u0
if len(self.u0) == 0:
self._have_init_cond = False
else:
self._have_init_cond = True
self.n = recurrence.parent.base.gens[0]
def __repr__(self):
str_sol = 'HolonomicSequence(%s, %s)' % ((self.recurrence).__repr__(), sstr(self.n))
if not self._have_init_cond:
return str_sol
else:
cond_str = ''
for seq_str, i in enumerate(self.u0):
cond_str += ', u(%s) = %s' % (sstr(seq_str), sstr(i))
sol = str_sol + cond_str
return sol
__str__ = __repr__
def __eq__(self, other):
if self.recurrence != other.recurrence or self.n != other.n:
return False
if self._have_init_cond and other._have_init_cond:
return self.u0 == other.u0
return True
|
HolonomicSequence
|
python
|
falconry__falcon
|
falcon/media/multipart.py
|
{
"start": 20808,
"end": 23778
}
|
class ____:
"""Defines a set of configurable multipart form parser options.
An instance of this class is exposed via the
:attr:`MultipartFormHandler.parse_options
<falcon.media.MultipartFormHandler.parse_options>` attribute.
The handler's options are also passed down to every :class:`BodyPart`
it instantiates.
See also: :ref:`multipart_parser_conf`.
"""
default_charset: str
"""The default character encoding for
:meth:`text fields <BodyPart.get_text>` (default ``utf-8``).
"""
max_body_part_count: int
"""The maximum number of body parts in the form (default ``64``).
If the form contains more parts than this number, an instance of
:class:`.MultipartParseError` will be raised. If this option is set to 0,
no limit will be imposed by the parser.
"""
max_secure_filename_length: int | None
"""The maximum number characters for a secure filename (default ``None``).
The value of this option is passed as the `max_length` keyword argument to
:func:`~.secure_filename` when evaluating the
:attr:`BodyPart.secure_filename` property.
Note:
In Falcon 5.0, the default value of this option will change to a
reasonable finite number (e.g., 64 or 96) of characters.
.. versionadded:: 4.1
"""
max_body_part_buffer_size: int
"""The maximum number of bytes to buffer and return when the
:meth:`BodyPart.get_data` method is called (default ``1 MiB``).
If the body part size exceeds this value, an instance of
:class:`.MultipartParseError` will be raised.
"""
max_body_part_headers_size: int
"""The maximum size (in bytes) of the body part headers structure
(default ``8192``).
If the body part headers size exceeds this value, an instance of
:class:`.MultipartParseError` will be raised.
"""
media_handlers: Handlers
"""A dict-like object for configuring the media-types to handle.
By default, handlers are provided for the ``application/json`` and
``application/x-www-form-urlencoded`` media types.
"""
if TYPE_CHECKING:
_DEFAULT_HANDLERS: ClassVar[Handlers]
else:
_DEFAULT_HANDLERS = None
__slots__ = (
'default_charset',
'max_body_part_buffer_size',
'max_body_part_count',
'max_body_part_headers_size',
'max_secure_filename_length',
'media_handlers',
)
def __init__(self) -> None:
self.default_charset = 'utf-8'
self.max_body_part_buffer_size = 1024 * 1024
self.max_body_part_count = 64
self.max_body_part_headers_size = 8192
self.max_secure_filename_length = None
# NOTE(myusko,vytas): Here we create a copy of _DEFAULT_HANDLERS in
# order to prevent the modification of the class variable whenever
# parse_options.media_handlers are customized.
self.media_handlers = self._DEFAULT_HANDLERS.copy()
|
MultipartParseOptions
|
python
|
doocs__leetcode
|
solution/2400-2499/2413.Smallest Even Multiple/Solution.py
|
{
"start": 0,
"end": 109
}
|
class ____:
def smallestEvenMultiple(self, n: int) -> int:
return n if n % 2 == 0 else n * 2
|
Solution
|
python
|
ray-project__ray
|
python/ray/_private/thirdparty/pathspec/pathspec.py
|
{
"start": 207,
"end": 7027
}
|
class ____(object):
"""
The :class:`PathSpec` class is a wrapper around a list of compiled
:class:`.Pattern` instances.
"""
def __init__(self, patterns):
"""
Initializes the :class:`PathSpec` instance.
*patterns* (:class:`~collections.abc.Collection` or :class:`~collections.abc.Iterable`)
yields each compiled pattern (:class:`.Pattern`).
"""
self.patterns = patterns if isinstance(patterns, Collection) else list(patterns)
"""
*patterns* (:class:`~collections.abc.Collection` of :class:`.Pattern`)
contains the compiled patterns.
"""
def __eq__(self, other):
"""
Tests the equality of this path-spec with *other* (:class:`PathSpec`)
by comparing their :attr:`~PathSpec.patterns` attributes.
"""
if isinstance(other, PathSpec):
paired_patterns = izip_longest(self.patterns, other.patterns)
return all(a == b for a, b in paired_patterns)
else:
return NotImplemented
def __len__(self):
"""
Returns the number of compiled patterns this path-spec contains
(:class:`int`).
"""
return len(self.patterns)
def __add__(self, other):
"""
Combines the :attr:`Pathspec.patterns` patterns from two
:class:`PathSpec` instances.
"""
if isinstance(other, PathSpec):
return PathSpec(self.patterns + other.patterns)
else:
return NotImplemented
def __iadd__(self, other):
"""
Adds the :attr:`Pathspec.patterns` patterns from one :class:`PathSpec`
instance to this instance.
"""
if isinstance(other, PathSpec):
self.patterns += other.patterns
return self
else:
return NotImplemented
@classmethod
def from_lines(cls, pattern_factory, lines):
"""
Compiles the pattern lines.
*pattern_factory* can be either the name of a registered pattern
factory (:class:`str`), or a :class:`~collections.abc.Callable` used
to compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
pattern (:class:`str`). This simply has to yield each line so it can
be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`)
or the result from :meth:`str.splitlines`.
Returns the :class:`PathSpec` instance.
"""
if isinstance(pattern_factory, string_types):
pattern_factory = util.lookup_pattern(pattern_factory)
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if not util._is_iterable(lines):
raise TypeError("lines:{!r} is not an iterable.".format(lines))
lines = [pattern_factory(line) for line in lines if line]
return cls(lines)
def match_file(self, file, separators=None):
"""
Matches the file to this path-spec.
*file* (:class:`str` or :class:`~pathlib.PurePath`) is the file path
to be matched against :attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`)
optionally contains the path separators to normalize. See
:func:`~pathspec.util.normalize_file` for more information.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
norm_file = util.normalize_file(file, separators=separators)
return util.match_file(self.patterns, norm_file)
def match_entries(self, entries, separators=None):
"""
Matches the entries to this path-spec.
*entries* (:class:`~collections.abc.Iterable` of :class:`~util.TreeEntry`)
contains the entries to be matched against :attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`;
or :data:`None`) optionally contains the path separators to
normalize. See :func:`~pathspec.util.normalize_file` for more
information.
Returns the matched entries (:class:`~collections.abc.Iterable` of
:class:`~util.TreeEntry`).
"""
if not util._is_iterable(entries):
raise TypeError("entries:{!r} is not an iterable.".format(entries))
entry_map = util._normalize_entries(entries, separators=separators)
match_paths = util.match_files(self.patterns, iterkeys(entry_map))
for path in match_paths:
yield entry_map[path]
def match_files(self, files, separators=None):
"""
Matches the files to this path-spec.
*files* (:class:`~collections.abc.Iterable` of :class:`str; or
:class:`pathlib.PurePath`) contains the file paths to be matched
against :attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`;
or :data:`None`) optionally contains the path separators to
normalize. See :func:`~pathspec.util.normalize_file` for more
information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
"""
if not util._is_iterable(files):
raise TypeError("files:{!r} is not an iterable.".format(files))
file_map = util.normalize_files(files, separators=separators)
matched_files = util.match_files(self.patterns, iterkeys(file_map))
for path in matched_files:
yield file_map[path]
def match_tree_entries(self, root, on_error=None, follow_links=None):
"""
Walks the specified root path for all files and matches them to this
path-spec.
*root* (:class:`str`; or :class:`pathlib.PurePath`) is the root
directory to search.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. See
:func:`~pathspec.util.iter_tree_entries` for more information.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolic links that resolve to directories. See
:func:`~pathspec.util.iter_tree_files` for more information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
"""
entries = util.iter_tree_entries(root, on_error=on_error, follow_links=follow_links)
return self.match_entries(entries)
def match_tree_files(self, root, on_error=None, follow_links=None):
"""
Walks the specified root path for all files and matches them to this
path-spec.
*root* (:class:`str`; or :class:`pathlib.PurePath`) is the root
directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. See
:func:`~pathspec.util.iter_tree_files` for more information.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolic links that resolve to directories. See
:func:`~pathspec.util.iter_tree_files` for more information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
"""
files = util.iter_tree_files(root, on_error=on_error, follow_links=follow_links)
return self.match_files(files)
# Alias `match_tree_files()` as `match_tree()`.
match_tree = match_tree_files
|
PathSpec
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1053621,
"end": 1054357
}
|
class ____(sgqlc.types.relay.Connection):
"""The connection type for WorkflowRun."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("WorkflowRunEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("WorkflowRun"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
|
WorkflowRunConnection
|
python
|
getsentry__sentry
|
src/sentry/eventtypes/nel.py
|
{
"start": 33,
"end": 249
}
|
class ____(DefaultEvent):
key = "nel"
def extract_metadata(self, data):
metadata = super().extract_metadata(data)
metadata["uri"] = data.get("request").get("url")
return metadata
|
NelEvent
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_indexing.py
|
{
"start": 2269,
"end": 2790
}
|
class ____(Benchmark):
params = ['C', 'F']
param_names = ['order']
def setup(self, order):
shape = (64, 64, 64)
# emulate gh-30156: boolean assignment into a Fortran/C array
self.base = np.zeros(shape, dtype=np.uint32, order=order)
mask = np.random.RandomState(0).rand(*self.base.shape) > 0.5
self.mask = mask.copy(order)
self.value = np.uint32(7)
def time_boolean_assign_scalar(self, order):
self.base[self.mask] = self.value
|
BooleanAssignmentOrder
|
python
|
django__django
|
django/forms/widgets.py
|
{
"start": 11238,
"end": 11804
}
|
class ____(Widget):
"""
Base class for all <input> widgets.
"""
input_type = None # Subclasses must define this.
template_name = "django/forms/widgets/input.html"
def __init__(self, attrs=None):
if attrs is not None:
attrs = attrs.copy()
self.input_type = attrs.pop("type", self.input_type)
super().__init__(attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context["widget"]["type"] = self.input_type
return context
|
Input
|
python
|
neetcode-gh__leetcode
|
python/0344-reverse-string.py
|
{
"start": 0,
"end": 280
}
|
class ____:
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
l = 0
r = len(s) - 1
while l < r:
s[l],s[r] = s[r],s[l]
l += 1
r -= 1
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/selling-pieces-of-wood.py
|
{
"start": 52,
"end": 657
}
|
class ____(object):
def sellingWood(self, m, n, prices):
"""
:type m: int
:type n: int
:type prices: List[List[int]]
:rtype: int
"""
dp = [[0]*(n+1) for i in xrange(m+1)]
for h, w, p in prices:
dp[h][w] = p
for i in xrange(1, m+1):
for j in xrange(1, n+1):
for k in xrange(1, i//2+1):
dp[i][j] = max(dp[i][j], dp[k][j]+dp[i-k][j])
for k in xrange(1, j//2+1):
dp[i][j] = max(dp[i][j], dp[i][k]+dp[i][j-k])
return dp[m][n]
|
Solution
|
python
|
xlwings__xlwings
|
xlwings/__init__.py
|
{
"start": 564,
"end": 4533
}
|
class ____(XlwingsError):
pass
# API
from .main import (
App,
Book,
Chart,
Engine,
Name,
Picture,
Range,
RangeColumns,
RangeRows,
Shape,
Sheet,
apps,
books,
engines,
load,
sheets,
view,
)
from .utils import xlserial_to_datetime as to_datetime
__all__ = (
"App",
"Book",
"Chart",
"Engine",
"Name",
"Picture",
"Range",
"RangeColumns",
"RangeRows",
"Shape",
"Sheet",
"apps",
"books",
"engines",
"load",
"sheets",
"view",
"to_datetime",
)
# Populate engines list
has_pywin32 = False
if sys.platform.startswith("win"):
try:
from . import _xlwindows
engines.add(Engine(impl=_xlwindows.engine))
has_pywin32 = True
except ImportError:
pass
if sys.platform.startswith("darwin"):
try:
from . import _xlmac
engines.add(Engine(impl=_xlmac.engine))
except ImportError:
pass
try:
from .pro import _xlofficejs, _xlremote
engines.add(Engine(impl=_xlremote.engine))
engines.add(Engine(impl=_xlofficejs.engine))
__pro__ = True
except (ImportError, LicenseError, AttributeError):
__pro__ = False
try:
# Separately handled in case the Rust extension is missing
from .pro import _xlcalamine
engines.add(Engine(impl=_xlcalamine.engine))
except (ImportError, LicenseError, AttributeError):
pass
if "excel" in [engine.name for engine in engines]:
# An active engine only really makes sense for the interactive mode with a desktop
# installation of Excel. Still, you could activate an engine explicitly via
# xw.engines["engine_name"].activate() which might be useful for testing purposes.
engines.active = engines["excel"]
# UDFs
if sys.platform.startswith("win") and has_pywin32:
from .com_server import serve
from .udfs import (
get_udf_module,
import_udfs,
xlarg as arg,
xlfunc as func,
xlret as ret,
xlsub as script,
xlsub as sub,
)
# This generates the modules for early-binding under %TEMP%\gen_py\3.x
# generated via makepy.py -i, but using an old minor=2, as it still seems to
# generate the most recent version of it whereas it would fail if the minor is
# higher than what exists on the machine. Allowing it to fail silently, as this is
# only a hard requirement for ComRange in udf.py which is only used for async funcs,
# legacy dynamic arrays, and the 'caller' argument.
try:
from win32com.client import gencache
gencache.EnsureModule(
"{00020813-0000-0000-C000-000000000046}", lcid=0, major=1, minor=2
)
except: # noqa: E722
pass
elif __pro__:
from xlwings.server import arg, func, ret, script # noqa: F401
else:
def func(f=None, *args, **kwargs):
@wraps(f)
def inner(f):
return f
if f is None:
return inner
else:
return inner(f)
def sub(f=None, *args, **kwargs):
@wraps(f)
def inner(f):
return f
if f is None:
return inner
else:
return inner(f)
script = sub
def ret(*args, **kwargs):
def inner(f):
return f
return inner
def arg(*args, **kwargs):
def inner(f):
return f
return inner
def raise_missing_pywin32():
raise ImportError(
"Couldn't find 'pywin32'. Install it via"
"'pip install pywin32' or 'conda install pywin32'."
)
serve = raise_missing_pywin32
get_udf_module = raise_missing_pywin32
import_udfs = raise_missing_pywin32
# This follows the Office Script/Office.js convention to make the constants available
# in the top-level namespace. Should be done for all constants with xlwings 1.0.
from .constants import ObjectHandleIcons # noqa: F401
|
NoSuchObjectError
|
python
|
huggingface__transformers
|
src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
{
"start": 1373,
"end": 2718
}
|
class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Spectrogram generation loss.
duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length + 1)`, *optional*):
Outputs of the duration predictor.
pitch_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
Outputs of the pitch predictor.
energy_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
Outputs of the energy predictor.
"""
loss: Optional[torch.FloatTensor] = None
spectrogram: Optional[torch.FloatTensor] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
duration_outputs: Optional[torch.LongTensor] = None
pitch_outputs: Optional[torch.FloatTensor] = None
energy_outputs: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`FastSpeech2ConformerWithHifiGan`].
"""
)
|
FastSpeech2ConformerModelOutput
|
python
|
openai__openai-python
|
src/openai/types/realtime/realtime_mcp_list_tools_param.py
|
{
"start": 271,
"end": 611
}
|
class ____(TypedDict, total=False):
input_schema: Required[object]
"""The JSON schema describing the tool's input."""
name: Required[str]
"""The name of the tool."""
annotations: Optional[object]
"""Additional annotations about the tool."""
description: Optional[str]
"""The description of the tool."""
|
Tool
|
python
|
spack__spack
|
lib/spack/spack/llnl/util/link_tree.py
|
{
"start": 1295,
"end": 11994
}
|
class ____(fs.BaseDirectoryVisitor):
"""
Visitor that produces actions:
- An ordered list of directories to create in dst
- A list of files to link in dst
- A list of merge conflicts in dst/
"""
def __init__(
self, ignore: Optional[Callable[[str], bool]] = None, normalize_paths: bool = False
):
self.ignore = ignore if ignore is not None else lambda f: False
# On case-insensitive filesystems, normalize paths to detect duplications
self.normalize_paths = normalize_paths
# When mapping <src root> to <dst root>/<projection>, we need to prepend the <projection>
# bit to the relative path in the destination dir.
self.projection: str = ""
# Two files f and g conflict if they are not os.path.samefile(f, g) and they are both
# projected to the same destination file. These conflicts are not necessarily fatal, and
# can be resolved or ignored. For example <prefix>/LICENSE or
# <site-packages>/<namespace>/__init__.py conflicts can be ignored).
self.file_conflicts: List[MergeConflict] = []
# When we have to create a dir where a file is, or a file where a dir is, we have fatal
# errors, listed here.
self.fatal_conflicts: List[MergeConflict] = []
# What directories we have to make; this is an ordered dict, so that we have a fast lookup
# and can run mkdir in order.
self.directories: Dict[str, Tuple[str, str]] = {}
# If the visitor is configured to normalize paths, keep a map of
# normalized path to: original path, root directory + relative path
self._directories_normalized: Dict[str, Tuple[str, str, str]] = {}
# Files to link. Maps dst_rel to (src_root, src_rel). This is an ordered dict, where files
# are guaranteed to be grouped by src_root in the order they were visited.
self.files: Dict[str, Tuple[str, str]] = {}
# If the visitor is configured to normalize paths, keep a map of
# normalized path to: original path, root directory + relative path
self._files_normalized: Dict[str, Tuple[str, str, str]] = {}
def _in_directories(self, proj_rel_path: str) -> bool:
"""
Check if a path is already in the directory list
"""
if self.normalize_paths:
return proj_rel_path.lower() in self._directories_normalized
else:
return proj_rel_path in self.directories
def _directory(self, proj_rel_path: str) -> Tuple[str, str, str]:
"""
Get the directory that is mapped to a path
"""
if self.normalize_paths:
return self._directories_normalized[proj_rel_path.lower()]
else:
return (proj_rel_path, *self.directories[proj_rel_path])
def _del_directory(self, proj_rel_path: str):
"""
Remove a directory from the list of directories
"""
del self.directories[proj_rel_path]
if self.normalize_paths:
del self._directories_normalized[proj_rel_path.lower()]
def _add_directory(self, proj_rel_path: str, root: str, rel_path: str):
"""
Add a directory to the list of directories.
Also stores the normalized version for later lookups
"""
self.directories[proj_rel_path] = (root, rel_path)
if self.normalize_paths:
self._directories_normalized[proj_rel_path.lower()] = (proj_rel_path, root, rel_path)
def _in_files(self, proj_rel_path: str) -> bool:
"""
Check if a path is already in the files list
"""
if self.normalize_paths:
return proj_rel_path.lower() in self._files_normalized
else:
return proj_rel_path in self.files
def _file(self, proj_rel_path: str) -> Tuple[str, str, str]:
"""
Get the file that is mapped to a path
"""
if self.normalize_paths:
return self._files_normalized[proj_rel_path.lower()]
else:
return (proj_rel_path, *self.files[proj_rel_path])
def _del_file(self, proj_rel_path: str):
"""
Remove a file from the list of files
"""
del self.files[proj_rel_path]
if self.normalize_paths:
del self._files_normalized[proj_rel_path.lower()]
def _add_file(self, proj_rel_path: str, root: str, rel_path: str):
"""
Add a file to the list of files
Also stores the normalized version for later lookups
"""
self.files[proj_rel_path] = (root, rel_path)
if self.normalize_paths:
self._files_normalized[proj_rel_path.lower()] = (proj_rel_path, root, rel_path)
def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
"""
Register a directory if dst / rel_path is not blocked by a file or ignored.
"""
proj_rel_path = os.path.join(self.projection, rel_path)
if self.ignore(rel_path):
# Don't recurse when dir is ignored.
return False
elif self._in_files(proj_rel_path):
# A file-dir conflict is fatal except if they're the same file (symlinked dir).
src_a = os.path.join(*self._file(proj_rel_path))
src_b = os.path.join(root, rel_path)
if not _samefile(src_a, src_b):
self.fatal_conflicts.append(
MergeConflict(dst=proj_rel_path, src_a=src_a, src_b=src_b)
)
return False
# Remove the link in favor of the dir.
existing_proj_rel_path, _, _ = self._file(proj_rel_path)
self._del_file(existing_proj_rel_path)
self._add_directory(proj_rel_path, root, rel_path)
return True
elif self._in_directories(proj_rel_path):
# No new directory, carry on.
return True
else:
# Register new directory.
self._add_directory(proj_rel_path, root, rel_path)
return True
def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bool:
"""
Replace symlinked dirs with actual directories when possible in low depths,
otherwise handle it as a file (i.e. we link to the symlink).
Transforming symlinks into dirs makes it more likely we can merge directories,
e.g. when <prefix>/lib -> <prefix>/subdir/lib.
We only do this when the symlink is pointing into a subdirectory from the
symlink's directory, to avoid potential infinite recursion; and only at a
constant level of nesting, to avoid potential exponential blowups in file
duplication.
"""
if self.ignore(rel_path):
return False
# Only follow symlinked dirs in <prefix>/**/**/*
if depth > 1:
handle_as_dir = False
else:
# Only follow symlinked dirs when pointing deeper
src = os.path.join(root, rel_path)
real_parent = os.path.realpath(os.path.dirname(src))
real_child = os.path.realpath(src)
handle_as_dir = real_child.startswith(real_parent)
if handle_as_dir:
return self.before_visit_dir(root, rel_path, depth)
self.visit_file(root, rel_path, depth, symlink=True)
return False
def visit_file(self, root: str, rel_path: str, depth: int, *, symlink: bool = False) -> None:
proj_rel_path = os.path.join(self.projection, rel_path)
if self.ignore(rel_path):
pass
elif self._in_directories(proj_rel_path):
# Can't create a file where a dir is, unless they are the same file (symlinked dir),
# in which case we simply drop the symlink in favor of the actual dir.
src_a = os.path.join(*self._directory(proj_rel_path))
src_b = os.path.join(root, rel_path)
if not symlink or not _samefile(src_a, src_b):
self.fatal_conflicts.append(
MergeConflict(dst=proj_rel_path, src_a=src_a, src_b=src_b)
)
elif self._in_files(proj_rel_path):
# When two files project to the same path, they conflict iff they are distinct.
# If they are the same (i.e. one links to the other), register regular files rather
# than symlinks. The reason is that in copy-type views, we need a copy of the actual
# file, not the symlink.
src_a = os.path.join(*self._file(proj_rel_path))
src_b = os.path.join(root, rel_path)
if not _samefile(src_a, src_b):
# Distinct files produce a conflict.
self.file_conflicts.append(
MergeConflict(dst=proj_rel_path, src_a=src_a, src_b=src_b)
)
return
if not symlink:
# Remove the link in favor of the actual file. The del is necessary to maintain the
# order of the files dict, which is grouped by root.
existing_proj_rel_path, _, _ = self._file(proj_rel_path)
self._del_file(existing_proj_rel_path)
self._add_file(proj_rel_path, root, rel_path)
else:
# Otherwise register this file to be linked.
self._add_file(proj_rel_path, root, rel_path)
def visit_symlinked_file(self, root: str, rel_path: str, depth: int) -> None:
# Treat symlinked files as ordinary files (without "dereferencing")
self.visit_file(root, rel_path, depth, symlink=True)
def set_projection(self, projection: str) -> None:
self.projection = os.path.normpath(projection)
# Todo, is this how to check in general for empty projection?
if self.projection == ".":
self.projection = ""
return
# If there is a projection, we'll also create the directories
# it consists of, and check whether that's causing conflicts.
path = ""
for part in self.projection.split(os.sep):
path = os.path.join(path, part)
if not self._in_files(path):
self._add_directory(path, "<projection>", path)
else:
# Can't create a dir where a file is.
_, src_a_root, src_a_relpath = self._file(path)
self.fatal_conflicts.append(
MergeConflict(
dst=path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join("<projection>", path),
)
)
|
SourceMergeVisitor
|
python
|
Textualize__textual
|
tests/snapshot_tests/snapshot_apps/command_palette_discovery.py
|
{
"start": 92,
"end": 797
}
|
class ____(Provider):
def goes_nowhere_does_nothing(self) -> None:
pass
async def discover(self) -> Hits:
for n in range(10):
command = f"This is a test of this code {n}"
yield DiscoveryHit(
command,
self.goes_nowhere_does_nothing,
command,
)
async def search(self, query: str) -> Hits:
matcher = self.matcher(query)
for n in range(10):
command = f"This should not appear {n}"
yield Hit(
n / 10,
matcher.highlight(command),
self.goes_nowhere_does_nothing,
command,
)
|
TestSource
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/events.py
|
{
"start": 86762,
"end": 89087
}
|
class ____(Request):
"""
For each task, get a list of metrics for which the requested event type was reported
:param tasks: Task IDs
:type tasks: Sequence[str]
:param event_type: Event type
:type event_type: EventTypeEnum
"""
_service = "events"
_action = "get_task_metrics"
_version = "2.13"
_schema = {
"definitions": {
"event_type_enum": {
"enum": [
"training_stats_scalar",
"training_stats_vector",
"training_debug_image",
"plot",
"log",
],
"type": "string",
}
},
"properties": {
"event_type": {
"$ref": "#/definitions/event_type_enum",
"description": "Event type",
},
"tasks": {
"description": "Task IDs",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["tasks"],
"type": "object",
}
def __init__(self, tasks: List[str], event_type: Any = None, **kwargs: Any) -> None:
super(GetTaskMetricsRequest, self).__init__(**kwargs)
self.tasks = tasks
self.event_type = event_type
@schema_property("tasks")
def tasks(self) -> List[str]:
return self._property_tasks
@tasks.setter
def tasks(self, value: List[str]) -> None:
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
@schema_property("event_type")
def event_type(self) -> Any:
return self._property_event_type
@event_type.setter
def event_type(self, value: Any) -> None:
if value is None:
self._property_event_type = None
return
if isinstance(value, six.string_types):
try:
value = EventTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "event_type", enum.Enum)
self._property_event_type = value
|
GetTaskMetricsRequest
|
python
|
huggingface__transformers
|
src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py
|
{
"start": 26898,
"end": 28886
}
|
class ____(nn.Module):
def __init__(self, config: Phi4MultimodalAudioConfig):
super().__init__()
self.config = config
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.dropout_rate
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True)
self.k_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True)
self.v_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=True)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
**kwargs,
):
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
attention_interface: Callable = simple_eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, _ = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output
|
Phi4MultimodalAudioAttention
|
python
|
GoogleCloudPlatform__python-docs-samples
|
dialogflow-cx/streaming_detect_intent_infinite.py
|
{
"start": 8661,
"end": 23975
}
|
class ____:
"""Manages the interaction with the Dialogflow CX Streaming API."""
def __init__(
self,
agent_name: str,
language_code: str,
single_utterance: bool,
model: str | None,
voice: str | None,
sample_rate: int,
dialogflow_timeout: float,
debug: bool,
) -> None:
"""Initializes the Dialogflow CX Streaming API client."""
try:
_, project, _, location, _, agent_id = agent_name.split("/")
except ValueError:
raise ValueError(
"Invalid agent name format. Expected format: projects/<project>/locations/<location>/agents/<agent_id>"
)
if location != "global":
client_options = ClientOptions(
api_endpoint=f"{location}-dialogflow.googleapis.com",
quota_project_id=project,
)
else:
client_options = ClientOptions(quota_project_id=project)
self.client = dialogflowcx_v3.SessionsAsyncClient(client_options=client_options)
self.agent_name = agent_name
self.language_code = language_code
self.single_utterance = single_utterance
self.model = model
self.session_id = str(uuid.uuid4())
self.dialogflow_timeout = dialogflow_timeout
self.debug = debug
self.sample_rate = sample_rate
self.voice = voice
if self.debug:
logger.setLevel(logging.DEBUG)
logger.debug("Debug logging enabled")
async def generate_streaming_detect_intent_requests(
self, audio_queue: asyncio.Queue
) -> AsyncGenerator[dialogflowcx_v3.StreamingDetectIntentRequest, None]:
"""Generates the requests for the streaming API."""
audio_config = dialogflowcx_v3.InputAudioConfig(
audio_encoding=dialogflowcx_v3.AudioEncoding.AUDIO_ENCODING_LINEAR_16,
sample_rate_hertz=self.sample_rate,
model=self.model,
single_utterance=self.single_utterance,
)
query_input = dialogflowcx_v3.QueryInput(
language_code=self.language_code,
audio=dialogflowcx_v3.AudioInput(config=audio_config),
)
output_audio_config = dialogflowcx_v3.OutputAudioConfig(
audio_encoding=dialogflowcx_v3.OutputAudioEncoding.OUTPUT_AUDIO_ENCODING_LINEAR_16,
sample_rate_hertz=self.sample_rate,
synthesize_speech_config=(
dialogflowcx_v3.SynthesizeSpeechConfig(
voice=dialogflowcx_v3.VoiceSelectionParams(name=self.voice)
)
if self.voice
else None
),
)
# First request contains session ID, query input audio config, and output audio config
request = dialogflowcx_v3.StreamingDetectIntentRequest(
session=f"{self.agent_name}/sessions/{self.session_id}",
query_input=query_input,
enable_partial_response=True,
output_audio_config=output_audio_config,
)
if self.debug:
logger.debug(f"Sending initial request: {request}")
yield request
# Subsequent requests contain audio only
while True:
try:
chunk = await audio_queue.get()
if chunk is None:
logger.debug(
"[generate_streaming_detect_intent_requests] Received None chunk, signaling end of utterance"
)
break # Exit the generator
request = dialogflowcx_v3.StreamingDetectIntentRequest(
query_input=dialogflowcx_v3.QueryInput(
audio=dialogflowcx_v3.AudioInput(audio=chunk)
)
)
yield request
except asyncio.CancelledError:
logger.debug(
"[generate_streaming_detect_intent_requests] Audio queue processing was cancelled"
)
break
async def streaming_detect_intent(
self,
audio_queue: asyncio.Queue,
) -> AsyncGenerator[dialogflowcx_v3.StreamingDetectIntentResponse, None]:
"""Transcribes the audio into text and yields each response."""
requests_generator = self.generate_streaming_detect_intent_requests(audio_queue)
retry_policy = retries.AsyncRetry(
predicate=retries.if_exception_type(ServiceUnavailable),
initial=0.5,
maximum=60.0,
multiplier=2.0,
timeout=300.0,
on_error=lambda e: logger.warning(f"Retrying due to error: {e}"),
)
async def streaming_request_with_retry() -> (
AsyncGenerator[dialogflowcx_v3.StreamingDetectIntentResponse, None]
):
async def api_call():
logger.debug("Initiating streaming request")
return await self.client.streaming_detect_intent(
requests=requests_generator
)
response_stream = await retry_policy(api_call)()
return response_stream
try:
responses = await streaming_request_with_retry()
# Use async for to iterate over the responses, WITH timeout
response_iterator = responses.__aiter__() # Get the iterator
while True:
try:
response = await asyncio.wait_for(
response_iterator.__anext__(), timeout=self.dialogflow_timeout
)
if self.debug and response:
response_copy = MessageToDict(response._pb)
if response_copy.get("detectIntentResponse"):
response_copy["detectIntentResponse"][
"outputAudio"
] = "REMOVED"
logger.debug(f"Received response: {response_copy}")
yield response
except StopAsyncIteration:
logger.debug("End of response stream")
break
except asyncio.TimeoutError:
logger.warning("Timeout waiting for response from Dialogflow.")
continue # Continue to the next iteration, don't break
except GoogleAPIError as e: # Keep error handling
logger.error(f"Error: {e}")
if e.code == 500: # Consider making this more robust
logger.warning("Encountered a 500 error during iteration.")
except GoogleAPIError as e:
logger.error(f"Error: {e}")
if e.code == 500:
logger.warning("Encountered a 500 error during iteration.")
async def push_to_audio_queue(
audio_generator: AsyncGenerator, audio_queue: asyncio.Queue
) -> None:
"""Pushes audio chunks from a generator to an asyncio queue."""
try:
async for chunk in audio_generator:
await audio_queue.put(chunk)
except Exception as e:
logger.error(f"Error in push_to_audio_queue: {e}")
async def listen_print_loop(
responses: AsyncGenerator[dialogflowcx_v3.StreamingDetectIntentResponse, None],
audioIO: AudioIO,
audio_queue: asyncio.Queue,
dialogflow_timeout: float,
) -> bool:
"""Iterates through server responses and prints them."""
response_iterator = responses.__aiter__()
while True:
try:
response = await asyncio.wait_for(
response_iterator.__anext__(), timeout=dialogflow_timeout
)
if (
response
and response.detect_intent_response
and response.detect_intent_response.output_audio
):
audioIO.play_audio(response.detect_intent_response.output_audio)
if (
response
and response.detect_intent_response
and response.detect_intent_response.query_result
):
query_result = response.detect_intent_response.query_result
# Check for end_interaction in response messages
if query_result.response_messages:
for message in query_result.response_messages:
if message.text:
logger.info(f"Dialogflow output: {message.text.text[0]}")
if message._pb.HasField("end_interaction"):
logger.info("End interaction detected.")
return False # Signal to *not* restart the loop (exit)
if query_result.intent and query_result.intent.display_name:
logger.info(f"Detected intent: {query_result.intent.display_name}")
# ensure audio stream restarts
return True
elif response and response.recognition_result:
transcript = response.recognition_result.transcript
if transcript:
if response.recognition_result.is_final:
logger.info(f"Final transcript: {transcript}")
await audio_queue.put(None) # Signal end of input
else:
print(
colored(transcript, "yellow"),
end="\r",
)
else:
logger.debug("No transcript in recognition result.")
except StopAsyncIteration:
logger.debug("End of response stream in listen_print_loop")
break
except asyncio.TimeoutError:
logger.warning("Timeout waiting for response in listen_print_loop")
continue # Crucial: Continue, don't return, on timeout
except Exception as e:
logger.error(f"Error in listen_print_loop: {e}")
return False # Exit on any error within the loop
return True # Always return after the async for loop completes
async def handle_audio_input_output(
dialogflow_streaming: DialogflowCXStreaming,
audioIO: AudioIO,
audio_queue: asyncio.Queue,
) -> None:
"""Handles audio input and output concurrently."""
async def cancel_push_task(push_task: asyncio.Task | None) -> None:
"""Helper function to cancel push task safely."""
if push_task is not None and not push_task.done():
push_task.cancel()
try:
await push_task
except asyncio.CancelledError:
logger.debug("Push task cancelled successfully")
push_task = None
try:
push_task = asyncio.create_task(
push_to_audio_queue(audioIO.generator(), audio_queue)
)
while True: # restart streaming here.
responses = dialogflow_streaming.streaming_detect_intent(audio_queue)
should_continue = await listen_print_loop(
responses,
audioIO,
audio_queue,
dialogflow_streaming.dialogflow_timeout,
)
if not should_continue:
logger.debug(
"End interaction detected, exiting handle_audio_input_output"
)
await cancel_push_task(push_task)
break # exit while loop
logger.debug("Restarting audio streaming loop")
except asyncio.CancelledError:
logger.warning("Handling of audio input/output was cancelled.")
await cancel_push_task(push_task)
except Exception as e:
logger.error(f"An unexpected error occurred: {e}")
async def main(
agent_name: str,
language_code: str = DEFAULT_LANGUAGE_CODE,
single_utterance: bool = False,
model: str | None = None,
voice: str | None = None,
sample_rate: int = DEFAULT_SAMPLE_RATE,
dialogflow_timeout: float = DEFAULT_DIALOGFLOW_TIMEOUT,
debug: bool = False,
) -> None:
"""Start bidirectional streaming from microphone input to speech API"""
chunk_size = int(sample_rate * CHUNK_SECONDS)
audioIO = AudioIO(sample_rate, chunk_size)
dialogflow_streaming = DialogflowCXStreaming(
agent_name,
language_code,
single_utterance,
model,
voice,
sample_rate,
dialogflow_timeout,
debug,
)
logger.info(f"Chunk size: {audioIO.chunk_size}")
logger.info(f"Using input device: {audioIO.input_device_name}")
logger.info(f"Using output device: {audioIO.output_device_name}")
# Signal handler function
def signal_handler(sig: int, frame: any) -> None:
print(colored("\nExiting gracefully...", "yellow"))
audioIO.closed = True # Signal to stop the main loop
sys.exit(0)
# Set the signal handler for Ctrl+C (SIGINT)
signal.signal(signal.SIGINT, signal_handler)
with audioIO:
logger.info(f"NEW REQUEST: {get_current_time() / 1000}")
audio_queue = asyncio.Queue()
try:
# Apply overall timeout to the entire interaction
await asyncio.wait_for(
handle_audio_input_output(dialogflow_streaming, audioIO, audio_queue),
timeout=dialogflow_streaming.dialogflow_timeout,
)
except asyncio.TimeoutError:
logger.error(
f"Dialogflow interaction timed out after {dialogflow_streaming.dialogflow_timeout} seconds."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("agent_name", help="Agent Name")
parser.add_argument(
"--language_code",
type=str,
default=DEFAULT_LANGUAGE_CODE,
help="Specify the language code (default: en-US)",
)
parser.add_argument(
"--single_utterance",
action="store_true",
help="Enable single utterance mode (default: False)",
)
parser.add_argument(
"--model",
type=str,
default=None,
help="Specify the speech recognition model to use (default: None)",
)
parser.add_argument(
"--voice",
type=str,
default=None,
help="Specify the voice for output audio (default: None)",
)
parser.add_argument(
"--sample_rate",
type=int,
default=DEFAULT_SAMPLE_RATE,
help="Specify the sample rate in Hz (default: 16000)",
)
parser.add_argument(
"--dialogflow_timeout",
type=float,
default=DEFAULT_DIALOGFLOW_TIMEOUT,
help="Specify the Dialogflow API timeout in seconds (default: 60)",
)
parser.add_argument(
"--debug",
action="store_true",
help="Enable debug logging",
)
args = parser.parse_args()
asyncio.run(
main(
args.agent_name,
args.language_code,
args.single_utterance,
args.model,
args.voice,
args.sample_rate,
args.dialogflow_timeout,
args.debug,
)
)
# [END dialogflow_streaming_detect_intent_infinite]
|
DialogflowCXStreaming
|
python
|
spyder-ide__spyder
|
spyder/plugins/variableexplorer/widgets/objecteditor.py
|
{
"start": 762,
"end": 5775
}
|
class ____(QObject):
def __init__(self):
QObject.__init__(self)
self.dialogs = {}
self.namespace = None
def set_namespace(self, namespace):
self.namespace = namespace
def create_dialog(self, dialog, refname, func):
self.dialogs[id(dialog)] = dialog, refname, func
dialog.accepted.connect(
lambda eid=id(dialog): self.editor_accepted(eid))
dialog.rejected.connect(
lambda eid=id(dialog): self.editor_rejected(eid))
dialog.show()
dialog.activateWindow()
dialog.raise_()
def editor_accepted(self, dialog_id):
dialog, refname, func = self.dialogs[dialog_id]
self.namespace[refname] = func(dialog)
self.dialogs.pop(dialog_id)
def editor_rejected(self, dialog_id):
self.dialogs.pop(dialog_id)
keeper = DialogKeeper()
def create_dialog(obj, obj_name):
"""Creates the editor dialog and returns a tuple (dialog, func) where func
is the function to be called with the dialog instance as argument, after
quitting the dialog box
The role of this intermediate function is to allow easy monkey-patching.
(uschmitt suggested this indirection here so that he can monkey patch
oedit to show eMZed related data)
"""
# Local import
conv_func = lambda data: data
readonly = not is_known_type(obj)
if isinstance(obj, np.ndarray) and np.ndarray is not FakeObject:
dialog = ArrayEditor()
if not dialog.setup_and_check(obj, title=obj_name,
readonly=readonly):
return
elif (isinstance(obj, PIL.Image.Image) and PIL.Image is not FakeObject
and np.ndarray is not FakeObject):
dialog = ArrayEditor()
data = np.array(obj)
if not dialog.setup_and_check(data, title=obj_name,
readonly=readonly):
return
conv_func = lambda data: PIL.Image.fromarray(data, mode=obj.mode)
elif (isinstance(obj, (pd.DataFrame, pd.Series)) and
pd.DataFrame is not FakeObject):
dialog = DataFrameEditor()
if not dialog.setup_and_check(obj):
return
elif isinstance(obj, str):
dialog = TextEditor(obj, title=obj_name, readonly=readonly)
else:
dialog = CollectionsEditor()
dialog.setup(obj, title=obj_name, readonly=readonly)
def end_func(dialog):
return conv_func(dialog.get_value())
return dialog, end_func
def oedit(obj, modal=True, namespace=None, app=None):
"""Edit the object 'obj' in a GUI-based editor and return the edited copy
(if Cancel is pressed, return None)
The object 'obj' is a container
Supported container types:
dict, list, set, tuple, str/unicode or numpy.array
(instantiate a new QApplication if necessary,
so it can be called directly from the interpreter)
"""
if modal:
obj_name = ''
else:
assert isinstance(obj, str)
obj_name = obj
if namespace is None:
namespace = globals()
keeper.set_namespace(namespace)
obj = namespace[obj_name]
# keep QApplication reference alive in the Python interpreter:
namespace['__qapp__'] = app
result = create_dialog(obj, obj_name)
if result is None:
return
dialog, end_func = result
if modal:
if dialog.exec_():
return end_func(dialog)
else:
keeper.create_dialog(dialog, obj_name, end_func)
import os
if os.name == 'nt' and app:
app.exec_()
#==============================================================================
# Tests
#==============================================================================
def test():
"""Run object editor test"""
# Local import
from spyder.utils.qthelpers import qapplication
app = qapplication() # analysis:ignore
data = np.random.randint(1, 256, size=(100, 100)).astype('uint8')
image = PIL.Image.fromarray(data)
example = {'str': 'kjkj kj k j j kj k jkj',
'list': [1, 3, 4, 'kjkj', None],
'set': {1, 2, 1, 3, None, 'A', 'B', 'C', True, False},
'dict': {'d': 1, 'a': np.random.rand(10, 10), 'b': [1, 2]},
'float': 1.2233,
'array': np.random.rand(10, 10),
'image': image,
'date': datetime.date(1945, 5, 8),
'datetime': datetime.datetime(1945, 5, 8),
}
image = oedit(image)
class Foobar(object):
def __init__(self):
self.text = "toto"
foobar = Foobar()
print(oedit(foobar, app=app)) # spyder: test-skip
print(oedit(example, app=app)) # spyder: test-skip
print(oedit(np.random.rand(10, 10), app=app)) # spyder: test-skip
print(oedit(oedit.__doc__, app=app)) # spyder: test-skip
print(example) # spyder: test-skip
if __name__ == "__main__":
test()
|
DialogKeeper
|
python
|
spyder-ide__spyder
|
spyder/plugins/console/utils/ansihandler.py
|
{
"start": 197,
"end": 4173
}
|
class ____(object):
"""ANSI Escape sequences handler"""
if os.name == 'nt':
# Windows terminal colors:
ANSI_COLORS = ( # Normal, Bright/Light
('#000000', '#808080'), # 0: black
('#800000', '#ff0000'), # 1: red
('#008000', '#00ff00'), # 2: green
('#808000', '#ffff00'), # 3: yellow
('#000080', '#0000ff'), # 4: blue
('#800080', '#ff00ff'), # 5: magenta
('#008080', '#00ffff'), # 6: cyan
('#c0c0c0', '#ffffff'), # 7: white
)
elif os.name == 'mac':
# Terminal.app colors:
ANSI_COLORS = ( # Normal, Bright/Light
('#000000', '#818383'), # 0: black
('#C23621', '#FC391F'), # 1: red
('#25BC24', '#25BC24'), # 2: green
('#ADAD27', '#EAEC23'), # 3: yellow
('#492EE1', '#5833FF'), # 4: blue
('#D338D3', '#F935F8'), # 5: magenta
('#33BBC8', '#14F0F0'), # 6: cyan
('#CBCCCD', '#E9EBEB'), # 7: white
)
else:
# xterm colors:
ANSI_COLORS = ( # Normal, Bright/Light
('#000000', '#7F7F7F'), # 0: black
('#CD0000', '#ff0000'), # 1: red
('#00CD00', '#00ff00'), # 2: green
('#CDCD00', '#ffff00'), # 3: yellow
('#0000EE', '#5C5CFF'), # 4: blue
('#CD00CD', '#ff00ff'), # 5: magenta
('#00CDCD', '#00ffff'), # 6: cyan
('#E5E5E5', '#ffffff'), # 7: white
)
def __init__(self):
self.intensity = 0
self.italic = None
self.bold = None
self.underline = None
self.foreground_color = None
self.background_color = None
self.default_foreground_color = 30
self.default_background_color = 47
def set_code(self, code):
assert isinstance(code, int)
if code == 0:
# Reset all settings
self.reset()
elif code == 1:
# Text color intensity
self.intensity = 1
# The following line is commented because most terminals won't
# change the font weight, against ANSI standard recommendation:
# self.bold = True
elif code == 3:
# Italic on
self.italic = True
elif code == 4:
# Underline simple
self.underline = True
elif code == 22:
# Normal text color intensity
self.intensity = 0
self.bold = False
elif code == 23:
# No italic
self.italic = False
elif code == 24:
# No underline
self.underline = False
elif code >= 30 and code <= 37:
# Text color
self.foreground_color = code
elif code == 39:
# Default text color
self.foreground_color = self.default_foreground_color
elif code >= 40 and code <= 47:
# Background color
self.background_color = code
elif code == 49:
# Default background color
self.background_color = self.default_background_color
self.set_style()
def set_style(self):
"""
Set font style with the following attributes:
'foreground_color', 'background_color', 'italic',
'bold' and 'underline'
"""
raise NotImplementedError
def reset(self):
self.current_format = None
self.intensity = 0
self.italic = False
self.bold = False
self.underline = False
self.foreground_color = None
self.background_color = None
|
ANSIEscapeCodeHandler
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeVarDefaultClass2.py
|
{
"start": 3385,
"end": 3941
}
|
class ____(Generic[T1, T2, *Ts1]): ...
ta1 = ClassTA()
reveal_type(ta1, expected_text="ClassTA[str, str, str, str]")
ta2 = ClassTA[int]()
reveal_type(ta2, expected_text="ClassTA[int, int, int, int]")
ta3 = ClassTA[int, float]()
reveal_type(ta3, expected_text="ClassTA[int, float, int, float]")
ta4 = ClassTA[int, float, *tuple[None, ...]]()
reveal_type(ta4, expected_text="ClassTA[int, float, *tuple[None, ...]]")
# This should generate an error because Ts1 depends on T2.
# It should also produce an error because T2 comes after a TypeVarTuple.
|
ClassTA
|
python
|
optuna__optuna
|
optuna/_gp/acqf.py
|
{
"start": 6457,
"end": 6881
}
|
class ____(BaseAcquisitionFunc):
def __init__(
self,
gpr: GPRegressor,
search_space: SearchSpace,
beta: float,
) -> None:
self._gpr = gpr
self._beta = beta
super().__init__(gpr.length_scales, search_space)
def eval_acqf(self, x: torch.Tensor) -> torch.Tensor:
mean, var = self._gpr.posterior(x)
return mean + torch.sqrt(self._beta * var)
|
UCB
|
python
|
jina-ai__jina
|
jina/excepts.py
|
{
"start": 1744,
"end": 1879
}
|
class ____(Exception, BaseJinaException):
"""Exception when an image name can not be found either local & remote"""
|
BadImageNameError
|
python
|
pytorch__pytorch
|
test/inductor/test_static_cuda_launcher.py
|
{
"start": 715,
"end": 15596
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
self.tmp_files = []
def tearDown(self):
super().tearDown()
for tmp_file in self.tmp_files:
try:
os.remove(tmp_file.name)
except OSError:
pass
def write_cubin_to_tmp(self, kernel: CompiledKernel) -> str:
"""
Only used for tests where we don't have a cubin path.
"""
if hasattr(kernel, "_cubin_path"):
return
# Just used by tests for now.
# TODO: derive cubin_path from wherever triton stores the cubin file on disk.
tmp_file = tempfile.NamedTemporaryFile(mode="wb", delete=False)
with tmp_file:
tmp_file.write(kernel.asm["cubin"])
self.tmp_files.append(tmp_file)
return tmp_file.name
def _make_launcher(
self,
compiled_kernel: CompiledKernel,
) -> StaticallyLaunchedCudaKernel:
"""
Compiles a Triton kernel with the provided *args,
writes its cubin to the temporary file, and returns the file path.
"""
cubin_file = self.write_cubin_to_tmp(compiled_kernel)
compiled_kernel._cubin_path = cubin_file
result = StaticallyLaunchedCudaKernel(compiled_kernel)
# Test reload cubin from raw here
old_cubin_path = result.cubin_path
assert old_cubin_path is not None
result.cubin_path = None
result.reload_cubin_from_raw(old_cubin_path)
device_interface = get_interface_for_device("cuda")
result.load_kernel(device_interface.current_device())
return result
@skipIfRocm
def test_basic(self):
@triton.jit
def simple_kernel(arg0, arg1):
x = tl.load(arg0)
y = arg1
tl.store(arg0, x + y)
arg0 = torch.zeros(1, dtype=torch.int32, device="cuda")
arg1 = 5
args = (arg0, arg1)
compiled_kernel = simple_kernel[(1,)](*args)
launcher = self._make_launcher(compiled_kernel)
self.assertEqual(arg0, torch.tensor([5], dtype=torch.int32, device="cuda"))
self.assertEqual(launcher.arg_tys, "Oi")
new_arg0 = torch.zeros(1, dtype=torch.int32, device="cuda")
device_interface = get_interface_for_device("cuda")
stream = device_interface.get_raw_stream(device_interface.current_device())
launcher.run(1, 1, 1, stream, new_arg0, arg1)
self.assertEqual(new_arg0, arg0)
# I wish I could macro all int types this into a single unit test on a loop, but
# 1. variables aren't allowed as type annotations in python
# 2. triton relies on inspect.get_source to get the type annotations
# so I can't even use exec() to generate the test cases.
# So we'll just make a few kernels by hand
@skipIfRocm
def test_unsigned_integers(self):
@triton.jit
def unsigned_integers(
arg0, arg1: tl.uint8, arg2: tl.uint16, arg3: tl.uint32, arg4: tl.uint64
):
x = tl.load(arg0)
y = arg1 + arg2 + arg3 + arg4
tl.store(arg0, x + y)
arg0 = torch.zeros(1, dtype=torch.uint64, device="cuda")
# Using small numbers creates a Literal type which triton treats as a constant
args = (arg0, 50, 50, 50, 50)
compiled_kernel = unsigned_integers[1,](*args)
launcher = self._make_launcher(compiled_kernel)
self.assertEqual(arg0, torch.tensor([200], dtype=torch.uint64, device="cuda"))
self.assertEqual(launcher.arg_tys, "OBHIK")
new_arg0 = torch.zeros(1, dtype=torch.uint64, device="cuda")
device_interface = get_interface_for_device("cuda")
stream = device_interface.get_raw_stream(device_interface.current_device())
launcher.run(1, 1, 1, stream, new_arg0, 50, 50, 50, 50)
self.assertEqual(new_arg0, arg0)
@skipIfRocm
def test_signed_integers(self):
@triton.jit
def signed_integers(
arg0, arg1: tl.int8, arg2: tl.int16, arg3: tl.int32, arg4: tl.int64
):
x = tl.load(arg0)
y = arg1 + arg2 + arg3 + arg4
tl.store(arg0, x + y)
arg0 = torch.zeros(1, dtype=torch.int64, device="cuda")
# Using small numbers creates a Literal type which triton treats as a constant
args = (arg0, 50, 50, 50, 50)
compiled_kernel = signed_integers[1,](*args)
launcher = self._make_launcher(compiled_kernel)
self.assertEqual(arg0, torch.tensor([200], dtype=torch.int64, device="cuda"))
self.assertEqual(launcher.arg_tys, "Obhil")
new_arg0 = torch.zeros(1, dtype=torch.int64, device="cuda")
device_interface = get_interface_for_device("cuda")
stream = device_interface.get_raw_stream(device_interface.current_device())
launcher.run(1, 1, 1, stream, new_arg0, 50, 50, 50, 50)
self.assertEqual(new_arg0, arg0)
@skipIfRocm
def test_basic_1arg(self):
@triton.jit
def simple_kernel_1_arg(arg0):
x = tl.load(arg0)
tl.store(arg0, x + 1)
arg0 = torch.zeros(1, dtype=torch.int32, device="cuda")
compiled_kernel = simple_kernel_1_arg[1,](arg0)
launcher = self._make_launcher(compiled_kernel)
self.assertEqual(arg0, torch.tensor([1], dtype=torch.int32, device="cuda"))
self.assertEqual(launcher.arg_tys, "O")
new_arg0 = torch.zeros(1, dtype=torch.int32, device="cuda")
device_interface = get_interface_for_device("cuda")
stream = device_interface.get_raw_stream(device_interface.current_device())
launcher.run(
1,
1,
1,
stream,
new_arg0,
)
self.assertEqual(new_arg0, arg0)
@skipIfRocm
def test_constexpr(self):
# Constexprs are compiled directly into the cubin file,
# so we never need to pass it to StaticCudaLauncher.
@triton.jit
def kernel_constexpr(arg0, CONSTANT: tl.constexpr):
x = tl.load(arg0)
tl.store(arg0, x + CONSTANT)
# Can't use make_launcher because constexpr needs to be constant
arg0 = torch.zeros(1, dtype=torch.int32, device="cuda")
compiled_kernel = kernel_constexpr[(1,)](arg0, CONSTANT=5)
launcher = self._make_launcher(compiled_kernel)
self.assertEqual(arg0, torch.tensor([5], dtype=torch.int32, device="cuda"))
self.assertEqual(launcher.arg_tys, "O")
new_arg0 = torch.zeros(1, dtype=torch.int32, device="cuda")
device_interface = get_interface_for_device("cuda")
stream = device_interface.get_raw_stream(device_interface.current_device())
launcher.run(
1,
1,
1,
stream,
new_arg0,
)
self.assertEqual(new_arg0, arg0)
@skipIfRocm
def test_implied_constant(self):
"""xnumel is unused in this kernel, but isn't explicitly marked as a constexpr"""
# This kernel was generated by inductor so it has a bunch of unused arguments. We don't change it
@triton.jit
def triton_red_fused_any_isinf_0(
in_ptr0,
out_ptr0,
xnumel, # noqa: F841
r0_numel,
XBLOCK: tl.constexpr,
R0_BLOCK: tl.constexpr,
):
xnumel = 1 # noqa: F841
rnumel = r0_numel # noqa: F841
RBLOCK: tl.constexpr = R0_BLOCK # noqa: F841
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None] # noqa: F841
xmask = tl.full([XBLOCK, R0_BLOCK], True, tl.int1) # noqa: F841
r0_base = tl.arange(0, R0_BLOCK)[None, :]
rbase = r0_base # noqa: F841
_tmp3 = tl.full([XBLOCK, R0_BLOCK], False, tl.int1)
for r0_offset in range(0, r0_numel, R0_BLOCK):
r0_index = r0_offset + r0_base
r0_mask = r0_index < r0_numel
roffset = r0_offset # noqa: F841
rindex = r0_index # noqa: F841
r0_0 = r0_index
tmp0 = tl.load(
in_ptr0 + (r0_0), r0_mask, eviction_policy="evict_first", other=0.0
)
tmp1 = libdevice.isinf(tmp0).to(tl.int1)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, R0_BLOCK])
tmp4 = _tmp3 | tmp2
_tmp3 = tl.where(r0_mask, tmp4, _tmp3)
tmp3 = triton_helpers.any(_tmp3.to(tl.int8), 1)[:, None].to(tl.int1)
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None)
arg0 = torch.tensor([0.0, 0.5, float("inf"), 5], device="cuda")
arg1 = torch.tensor([False], device="cuda")
arg2 = torch.tensor([False], device="cuda")
compiled_kernel = triton_red_fused_any_isinf_0[1,](
arg0, arg1, 1, 128, XBLOCK=1, R0_BLOCK=1
)
launcher = self._make_launcher(compiled_kernel)
device_interface = get_interface_for_device("cuda")
stream = device_interface.get_raw_stream(device_interface.current_device())
# Don't pass in xnumel, as it is a constant
launcher.run(1, 1, 1, stream, arg0, arg2, 128)
self.assertEqual(arg1, arg2)
@skipIfRocm
def test_kernel_no_args(self):
# Just an easy way to test incompatible number of arguments
@triton.jit
def kernel_no_op():
pass
compiled_kernel = kernel_no_op[(1,)]()
launcher = self._make_launcher(compiled_kernel)
device_interface = get_interface_for_device("cuda")
stream = device_interface.get_raw_stream(device_interface.current_device())
launcher.run(1, 1, 1, stream)
@skipIfRocm
def test_high_shared_mem(self):
@triton.jit
def simple_kernel(arg0, arg1):
x = tl.load(arg0)
y = arg1
tl.store(arg0, x + y)
arg0 = torch.zeros(1, dtype=torch.int32, device="cuda")
arg1 = 5
args = (arg0, arg1)
compiled_kernel = simple_kernel[(1,)](*args)
# Allocate 50 KB of memory
compiled_kernel.shared = 50000
launcher = self._make_launcher(compiled_kernel)
self.assertEqual(arg0, torch.tensor([5], dtype=torch.int32, device="cuda"))
self.assertEqual(launcher.arg_tys, "Oi")
new_arg0 = torch.zeros(1, dtype=torch.int32, device="cuda")
device_interface = get_interface_for_device("cuda")
stream = device_interface.get_raw_stream(device_interface.current_device())
launcher.slow_launch_kernel = True
launcher.run(1, 1, 1, stream, new_arg0, arg1)
self.assertEqual(new_arg0, arg0)
@skipIfRocm
def test_too_high_shared_mem(self):
@triton.jit
def simple_kernel(arg0, arg1):
x = tl.load(arg0)
y = arg1
tl.store(arg0, x + y)
arg0 = torch.zeros(1, dtype=torch.int32, device="cuda")
arg1 = 5
args = (arg0, arg1)
compiled_kernel = simple_kernel[(1,)](*args)
# Allocate too much shared memory
compiled_kernel.shared = 99999999
self.assertRaisesRegex(
RuntimeError,
"out of resource: simple_kernel",
lambda: self._make_launcher(compiled_kernel),
)
@skipIfRocm
def test_kernel_empty_tensor(self):
# Triton kernel generated by torch.compile of the following:
# @torch.compile()
# def foo(x, y):
# return torch.cat(((x * 4), y + 10))
# Running with example input:
# torch._dynamo.decorators.mark_unbacked(t, 0)
# x = torch.rand(0, device="cuda")
# y = torch.rand(20, device="cuda")
@triton.jit
def triton_poi_fused_cat_0(
in_ptr0, in_ptr1, out_ptr0, ks0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0).to(tl.int64) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:].to(tl.int64)
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp3 = ks0
tmp4 = tmp0 < tmp3
tmp5 = tl.load(
in_ptr0 + (x0), xmask & tmp4, eviction_policy="evict_last", other=0.0
)
tmp6 = 4.0
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp13 = tl.load(
in_ptr1 + (x0 + ((-1) * ks0)),
xmask & tmp10,
eviction_policy="evict_last",
other=0.0,
)
tmp14 = 10.0
tmp15 = tmp13 + tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tl.store(out_ptr0 + (x0), tmp18, xmask)
arg0 = 0
arg1 = torch.randn(0, device="cuda")
arg2 = torch.randn(20, device="cuda")
buf0 = torch.empty(20, device="cuda")
buf1 = torch.empty(20, device="cuda")
xnumel = 20 + arg0
compiled_kernel = triton_poi_fused_cat_0[(1,)](
arg1, arg2, buf0, arg0, xnumel, XBLOCK=32
)
launcher = self._make_launcher(compiled_kernel)
device_interface = get_interface_for_device("cuda")
stream = device_interface.get_raw_stream(device_interface.current_device())
launcher.run(1, 1, 1, stream, arg1, arg2, buf1, arg0, xnumel)
self.assertEqual(buf0, buf1)
@skipIfRocm
def test_kernel_many_args(self):
N = 200
# Make 200 arguments
args = [f"arg_{i}" for i in range(N)]
decl = ", ".join(args)
sums = [f" total += arg_{i}" for i in range(N)]
sums_str = "\n".join(sums)
template = f"""
from torch._inductor.runtime.triton_compat import tl, triton
@triton.jit
def kernel_many_args(out_tensor, {decl}):
out = tl.load(out_tensor)
total = out
{sums_str}
tl.store(out_tensor, total)
"""
result = PyCodeCache.load(template.lstrip())
kernel_args = tuple(random.random() for _ in range(N))
buf0 = torch.zeros(1, device="cuda")
compiled_kernel = result.kernel_many_args[1,](buf0, *kernel_args)
launcher = self._make_launcher(compiled_kernel)
device_interface = get_interface_for_device("cuda")
stream = device_interface.get_raw_stream(device_interface.current_device())
buf1 = torch.zeros(1, device="cuda")
launcher.run(1, 1, 1, stream, buf1, *kernel_args)
self.assertEqual(buf0, buf1)
@requires_cuda_and_triton
@torch._inductor.config.patch(
{"use_static_cuda_launcher": True, "strict_static_cuda_launcher": True}
)
|
TestStaticCudaLauncher
|
python
|
PrefectHQ__prefect
|
tests/test_task_worker.py
|
{
"start": 18884,
"end": 22301
}
|
class ____:
async def test_nested_task_run_via_task_worker(
self, prefect_client, events_pipeline
):
@task
def inner_task(x):
return x
@task
def outer_task(x):
return inner_task(x)
task_worker = TaskWorker(outer_task)
task_run_future = outer_task.apply_async((42,))
task_run = await prefect_client.read_task_run(task_run_future.task_run_id)
await task_worker.execute_task_run(task_run)
await events_pipeline.process_events()
updated_task_run = await prefect_client.read_task_run(
task_run_future.task_run_id
)
assert updated_task_run.state.is_completed()
assert await updated_task_run.state.result() == 42
async def test_nested_flow_run_via_task_worker(
self, prefect_client, events_pipeline
):
@flow
def inner_flow(x):
return x
@task
def background_task(x):
return inner_flow(x)
task_worker = TaskWorker(background_task)
task_run_future = background_task.apply_async((42,))
task_run = await prefect_client.read_task_run(task_run_future.task_run_id)
await task_worker.execute_task_run(task_run)
await events_pipeline.process_events()
updated_task_run = await prefect_client.read_task_run(
task_run_future.task_run_id
)
assert updated_task_run.state.is_completed()
assert await updated_task_run.state.result() == 42
async def test_nested_task_delay_serialization(
self, prefect_client, events_pipeline
):
"""
Test that calling task.delay() from within a task that was itself called with delay
works correctly. This tests the fix for the serialization issue where
TaskRunContext.serialize() with serialize_as_any=True would fail with:
TypeError: 'MockValSer' object cannot be converted to 'SchemaSerializer'
The error occurred when tasks were defined in separate modules, but we can
test the serialization path directly.
"""
@task
def inner_task(value: str) -> str:
return f"processed: {value}"
@task
def outer_task(value: str) -> str:
# Calling delay from within a task that was itself delayed
# This would trigger the serialization error if TaskRunContext.serialize()
# uses serialize_as_any=True globally
inner_task.delay(value)
# Just return a marker that we successfully called delay
return f"scheduled_{value}"
# Create worker with both tasks
task_worker = TaskWorker(outer_task, inner_task)
# Submit outer task using apply_async (equivalent to delay)
future = outer_task.apply_async(("test_value",))
task_run = await prefect_client.read_task_run(future.task_run_id)
# Execute - this would fail with serialization error before fix
await task_worker.execute_task_run(task_run)
await events_pipeline.process_events()
# Verify it completed successfully
updated_task_run = await prefect_client.read_task_run(future.task_run_id)
assert updated_task_run.state.is_completed()
result = await updated_task_run.state.result()
assert result == "scheduled_test_value"
|
TestTaskWorkerNestedTasks
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py
|
{
"start": 1370,
"end": 10273
}
|
class ____(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def tearDown(self):
config.enable_tensor_float_32_execution(self.tf32_keep_)
def setUp(self):
self.tf32_keep_ = config.tensor_float_32_execution_enabled()
config.enable_tensor_float_32_execution(False)
self._atol[dtypes.complex64] = 1e-5
self._rtol[dtypes.complex64] = 1e-5
def operator_and_matrix(self,
build_info,
dtype,
use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
if ensure_self_adjoint_and_pd:
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
else:
matrix = linear_operator_test_util.random_tril_matrix(
shape, dtype, force_well_conditioned=True, remove_upper=True)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
if ensure_self_adjoint_and_pd:
operator = LinearOperatorAdjoint(
linalg.LinearOperatorFullMatrix(
lin_op_matrix, is_positive_definite=True, is_self_adjoint=True))
else:
operator = LinearOperatorAdjoint(
linalg.LinearOperatorLowerTriangular(lin_op_matrix))
return operator, linalg.adjoint(matrix)
def test_base_operator_hint_used(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
operator_adjoint = operator.adjoint()
self.assertIsInstance(operator_adjoint, LinearOperatorAdjoint)
self.assertTrue(operator_adjoint.is_positive_definite)
self.assertTrue(operator_adjoint.is_non_singular)
self.assertFalse(operator_adjoint.is_self_adjoint)
def test_adjoint_of_adjoint_is_operator(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix)
operator_adjoint = operator.adjoint()
self.assertIsInstance(operator_adjoint, LinearOperatorAdjoint)
adjoint_of_op_adjoint = operator_adjoint.adjoint()
self.assertIsInstance(adjoint_of_op_adjoint,
linalg.LinearOperatorFullMatrix)
def test_supplied_hint_used(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix)
operator_adjoint = LinearOperatorAdjoint(
operator,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator_adjoint.is_positive_definite)
self.assertTrue(operator_adjoint.is_non_singular)
self.assertFalse(operator_adjoint.is_self_adjoint)
def test_contradicting_hints_raise(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_positive_definite=False)
with self.assertRaisesRegex(ValueError, "positive-definite"):
LinearOperatorAdjoint(operator, is_positive_definite=True)
operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=False)
with self.assertRaisesRegex(ValueError, "self-adjoint"):
LinearOperatorAdjoint(operator, is_self_adjoint=True)
def test_name(self):
matrix = [[11., 0.], [1., 8.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, name="my_operator", is_non_singular=True)
operator = LinearOperatorAdjoint(operator)
self.assertEqual("my_operator_adjoint", operator.name)
def test_matmul_adjoint_operator(self):
matrix1 = np.random.randn(4, 4)
matrix2 = np.random.randn(4, 4)
full_matrix1 = linalg.LinearOperatorFullMatrix(matrix1)
full_matrix2 = linalg.LinearOperatorFullMatrix(matrix2)
self.assertAllClose(
np.matmul(matrix1, matrix2.T),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint_arg=True).to_dense()))
self.assertAllClose(
np.matmul(matrix1.T, matrix2),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint=True).to_dense()))
self.assertAllClose(
np.matmul(matrix1.T, matrix2.T),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint=True,
adjoint_arg=True).to_dense()))
def test_matmul_adjoint_complex_operator(self):
matrix1 = np.random.randn(4, 4) + 1j * np.random.randn(4, 4)
matrix2 = np.random.randn(4, 4) + 1j * np.random.randn(4, 4)
full_matrix1 = linalg.LinearOperatorFullMatrix(matrix1)
full_matrix2 = linalg.LinearOperatorFullMatrix(matrix2)
self.assertAllClose(
np.matmul(matrix1,
matrix2.conj().T),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint_arg=True).to_dense()))
self.assertAllClose(
np.matmul(matrix1.conj().T, matrix2),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint=True).to_dense()))
self.assertAllClose(
np.matmul(matrix1.conj().T,
matrix2.conj().T),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint=True,
adjoint_arg=True).to_dense()))
def test_matvec(self):
matrix = np.array([[1., 2.], [3., 4.]])
x = np.array([1., 2.])
operator = linalg.LinearOperatorFullMatrix(matrix)
self.assertAllClose(matrix.dot(x), self.evaluate(operator.matvec(x)))
self.assertAllClose(matrix.T.dot(x), self.evaluate(operator.H.matvec(x)))
def test_solve_adjoint_operator(self):
matrix1 = self.evaluate(
linear_operator_test_util.random_tril_matrix(
[4, 4], dtype=dtypes.float64, force_well_conditioned=True))
matrix2 = np.random.randn(4, 4)
full_matrix1 = linalg.LinearOperatorLowerTriangular(
matrix1, is_non_singular=True)
full_matrix2 = linalg.LinearOperatorFullMatrix(matrix2)
self.assertAllClose(
self.evaluate(linalg.triangular_solve(matrix1, matrix2.T)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint_arg=True).to_dense()))
self.assertAllClose(
self.evaluate(linalg.triangular_solve(matrix1.T, matrix2, lower=False)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint=True).to_dense()))
self.assertAllClose(
self.evaluate(
linalg.triangular_solve(matrix1.T, matrix2.T, lower=False)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint=True,
adjoint_arg=True).to_dense()))
def test_solve_adjoint_complex_operator(self):
matrix1 = self.evaluate(
linear_operator_test_util.random_tril_matrix(
[4, 4], dtype=dtypes.complex128, force_well_conditioned=True) +
1j * linear_operator_test_util.random_tril_matrix(
[4, 4], dtype=dtypes.complex128, force_well_conditioned=True))
matrix2 = np.random.randn(4, 4) + 1j * np.random.randn(4, 4)
full_matrix1 = linalg.LinearOperatorLowerTriangular(
matrix1, is_non_singular=True)
full_matrix2 = linalg.LinearOperatorFullMatrix(matrix2)
self.assertAllClose(
self.evaluate(linalg.triangular_solve(matrix1,
matrix2.conj().T)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint_arg=True).to_dense()))
self.assertAllClose(
self.evaluate(
linalg.triangular_solve(matrix1.conj().T, matrix2, lower=False)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint=True).to_dense()))
self.assertAllClose(
self.evaluate(
linalg.triangular_solve(
matrix1.conj().T, matrix2.conj().T, lower=False)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint=True,
adjoint_arg=True).to_dense()))
def test_solvevec(self):
matrix = np.array([[1., 2.], [3., 4.]])
inv_matrix = np.linalg.inv(matrix)
x = np.array([1., 2.])
operator = linalg.LinearOperatorFullMatrix(matrix)
self.assertAllClose(inv_matrix.dot(x), self.evaluate(operator.solvevec(x)))
self.assertAllClose(
inv_matrix.T.dot(x), self.evaluate(operator.H.solvevec(x)))
def test_tape_safe(self):
matrix = variables_module.Variable([[1., 2.], [3., 4.]])
operator = LinearOperatorAdjoint(linalg.LinearOperatorFullMatrix(matrix))
self.check_tape_safe(operator)
@test_util.run_all_in_graph_and_eager_modes
|
LinearOperatorAdjointTest
|
python
|
ray-project__ray
|
python/ray/autoscaler/_private/autoscaler.py
|
{
"start": 2938,
"end": 3868
}
|
class ____:
active_nodes: Dict[NodeType, int]
idle_nodes: Optional[Dict[NodeType, int]]
pending_nodes: List[Tuple[NodeIP, NodeType, NodeStatus]]
pending_launches: Dict[NodeType, int]
failed_nodes: List[Tuple[NodeIP, NodeType]]
node_availability_summary: NodeAvailabilitySummary = field(
default_factory=lambda: NodeAvailabilitySummary({})
)
# A dictionary of node IP to a list of reasons the node is not idle.
node_activities: Optional[Dict[str, Tuple[NodeIP, List[str]]]] = None
pending_resources: Dict[str, int] = field(default_factory=lambda: {})
# A mapping from node name (the same key as `usage_by_node`) to node type.
# Optional for deployment modes which have the concept of node types and
# backwards compatibility.
node_type_mapping: Optional[Dict[str, str]] = None
# Whether the autoscaler summary is v1 or v2.
legacy: bool = False
|
AutoscalerSummary
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 138567,
"end": 138828
}
|
class ____(VegaLiteSchema):
"""
BBox schema wrapper.
Bounding box https://tools.ietf.org/html/rfc7946#section-5
"""
_schema = {"$ref": "#/definitions/BBox"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
|
BBox
|
python
|
readthedocs__readthedocs.org
|
readthedocs/settings/base.py
|
{
"start": 696,
"end": 43766
}
|
class ____(Settings):
"""Community base settings, don't use this directly."""
# Django settings
SITE_ID = 1
ROOT_URLCONF = "readthedocs.urls"
LOGIN_REDIRECT_URL = "/dashboard/"
FORCE_WWW = False
SECRET_KEY = "replace-this-please" # noqa
ATOMIC_REQUESTS = True
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Debug settings
DEBUG = True
RTD_FORCE_SHOW_DEBUG_TOOLBAR = False
# Build FTD index for all versions
RTD_FILETREEDIFF_ALL = False
@property
def DEBUG_TOOLBAR_CONFIG(self):
def _show_debug_toolbar(request):
return (
request.environ.get("SERVER_NAME", None) != "testserver"
and self.SHOW_DEBUG_TOOLBAR
)
return {
"SHOW_TOOLBAR_CALLBACK": _show_debug_toolbar,
"DISABLE_PANELS": [
# Default ones
"debug_toolbar.panels.profiling.ProfilingPanel",
"debug_toolbar.panels.redirects.RedirectsPanel",
# Custome ones
# We are disabling these because they take a lot of time to execute in the new dashboard.
# We make an intensive usage of the ``include`` template tag there.
# It's a "known issue/bug" and there is no solution as far as we can tell.
"debug_toolbar.panels.sql.SQLPanel",
"debug_toolbar.panels.templates.TemplatesPanel",
],
}
@property
def SHOW_DEBUG_TOOLBAR(self):
"""
Show django-debug-toolbar on DEBUG or if it's forced by RTD_FORCE_SHOW_DEBUG_TOOLBAR.
This will show the debug toolbar on:
- Docker local instance
- web-extra production instance
"""
return self.DEBUG or self.RTD_FORCE_SHOW_DEBUG_TOOLBAR
# Domains and URLs
RTD_IS_PRODUCTION = False
PRODUCTION_DOMAIN = "readthedocs.org"
PUBLIC_DOMAIN = None
PUBLIC_DOMAIN_USES_HTTPS = False
PUBLIC_API_URL = "https://{}".format(PRODUCTION_DOMAIN)
RTD_INTERSPHINX_URL = "https://{}".format(PRODUCTION_DOMAIN)
RTD_EXTERNAL_VERSION_DOMAIN = "external-builds.readthedocs.io"
@property
def RTD_RESTRICTED_DOMAINS(self):
"""
Domains that are restricted for users to use as custom domains.
This is to avoid users hijacking our domains.
We return the last two parts of our public domains to cover all subdomains,
e.g, if our domain is "app.readthedocs.org", we restrict all subdomains from "readthedocs.org".
If your domain is like "readthedocs.co.uk", you might want to override this property.
We recommend disallowing:
- Dashboard domain
- Public domain (from where documentation pages are served)
- External version domain (from where PR previews are served)
- Any public domains that point to the validation record (e.g., CNAME to readthedocs.io)
"""
domains = [
self.PRODUCTION_DOMAIN,
self.PUBLIC_DOMAIN,
self.RTD_EXTERNAL_VERSION_DOMAIN,
"rtfd.io",
"rtfd.org",
]
return [
".".join(domain.split(".")[-2:])
for domain in domains
]
# Doc Builder Backends
MKDOCS_BACKEND = "readthedocs.doc_builder.backends.mkdocs"
SPHINX_BACKEND = "readthedocs.doc_builder.backends.sphinx"
# slumber settings
SLUMBER_API_HOST = "https://readthedocs.org"
# Email
DEFAULT_FROM_EMAIL = "no-reply@readthedocs.org"
SERVER_EMAIL = DEFAULT_FROM_EMAIL
SUPPORT_EMAIL = None
SUPPORT_FORM_ENDPOINT = None
# Sessions
SESSION_COOKIE_DOMAIN = "readthedocs.org"
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_AGE = 30 * 24 * 60 * 60 # 30 days
SESSION_SAVE_EVERY_REQUEST = False
SESSION_COOKIE_SAMESITE = "Lax"
# CSRF
CSRF_COOKIE_HTTPONLY = True
CSRF_COOKIE_AGE = 30 * 24 * 60 * 60 # 30 days
# Security & X-Frame-Options Middleware
# https://docs.djangoproject.com/en/1.11/ref/middleware/#django.middleware.security.SecurityMiddleware
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_REFERRER_POLICY = "strict-origin-when-cross-origin"
X_FRAME_OPTIONS = "DENY"
# Pagination
# Only show 1 page on either side of the current page
PAGINATION_DEFAULT_WINDOW = 1
# Only show 1 page at the beginning and end
PAGINATION_DEFAULT_MARGIN = 1
# Read the Docs
READ_THE_DOCS_EXTENSIONS = ext
RTD_LATEST = "latest"
RTD_LATEST_VERBOSE_NAME = "latest"
RTD_STABLE = "stable"
RTD_STABLE_VERBOSE_NAME = "stable"
RTD_CLEAN_AFTER_BUILD = False
RTD_BUILD_HEALTHCHECK_TIMEOUT = 60 # seconds
RTD_BUILD_HEALTHCHECK_DELAY = 15 # seconds
RTD_MAX_CONCURRENT_BUILDS = 4
RTD_BUILDS_MAX_RETRIES = 25
RTD_BUILDS_RETRY_DELAY = 5 * 60 # seconds
RTD_BUILD_STATUS_API_NAME = "docs/readthedocs"
RTD_ANALYTICS_DEFAULT_RETENTION_DAYS = 30 * 3
RTD_AUDITLOGS_DEFAULT_RETENTION_DAYS = 30 * 3
# Number of days the validation process for a domain will be retried.
RTD_CUSTOM_DOMAINS_VALIDATION_PERIOD = 30
# Keep BuildData models on database during this time
RTD_TELEMETRY_DATA_RETENTION_DAYS = 30 * 6 # 180 days / 6 months
# Number of days an invitation is valid.
RTD_INVITATIONS_EXPIRATION_DAYS = 15
RTD_ENFORCE_BROWNOUTS_FOR_DEPRECATIONS = False
@property
def RTD_DEFAULT_FEATURES(self):
# Features listed here will be available to users that don't have a
# subscription or if their subscription doesn't include the feature.
# Depending on the feature type, the numeric value represents a
# number of days or limit of the feature.
from readthedocs.subscriptions import constants
from readthedocs.subscriptions.products import RTDProductFeature
return dict(
(
# Max number of domains allowed per project.
RTDProductFeature(type=constants.TYPE_CNAME, value=2).to_item(),
RTDProductFeature(type=constants.TYPE_EMBED_API).to_item(),
# Retention days for search analytics.
RTDProductFeature(
type=constants.TYPE_SEARCH_ANALYTICS,
value=self.RTD_ANALYTICS_DEFAULT_RETENTION_DAYS,
).to_item(),
# Retention days for page view analytics.
RTDProductFeature(
type=constants.TYPE_PAGEVIEW_ANALYTICS,
value=self.RTD_ANALYTICS_DEFAULT_RETENTION_DAYS,
).to_item(),
# Retention days for audit logs.
RTDProductFeature(
type=constants.TYPE_AUDIT_LOGS,
value=self.RTD_AUDITLOGS_DEFAULT_RETENTION_DAYS,
).to_item(),
# Max number of concurrent builds.
RTDProductFeature(
type=constants.TYPE_CONCURRENT_BUILDS,
value=self.RTD_MAX_CONCURRENT_BUILDS,
).to_item(),
# Max number of redirects allowed per project.
RTDProductFeature(
type=constants.TYPE_REDIRECTS_LIMIT, value=100
).to_item(),
)
)
# A dictionary of Stripe products mapped to a RTDProduct object.
# In .org we don't have subscriptions/products, default features are
# defined in RTD_DEFAULT_FEATURES.
RTD_PRODUCTS = {}
# Database and API hitting settings
DONT_HIT_DB = True
RTD_SAVE_BUILD_COMMANDS_TO_STORAGE = False
DATABASE_ROUTERS = ["readthedocs.core.db.MapAppsRouter"]
USER_MATURITY_DAYS = 7
# override classes
CLASS_OVERRIDES = {}
DOC_PATH_PREFIX = "_/"
RTD_EXT_THEME_DEV_SERVER = None
# Application classes
@property
def INSTALLED_APPS(self): # noqa
apps = [
"django.contrib.auth",
"django.contrib.admin",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.humanize",
# readthedocs.core app needs to be before
# django.contrib.staticfiles to use our custom collectstatic
# command
"readthedocs.core",
"django.contrib.staticfiles",
# third party apps
"dj_pagination",
"taggit",
"django_gravatar",
"rest_framework",
"rest_framework.authtoken",
"rest_framework_api_key",
"generic_relations",
"corsheaders",
"annoying",
"django_extensions",
"crispy_forms",
"django_elasticsearch_dsl",
"django_filters",
"polymorphic",
"simple_history",
"djstripe",
"django_celery_beat",
"django_safemigrate.apps.SafeMigrateConfig",
"django_structlog",
# our apps
"readthedocs.projects",
"readthedocs.organizations",
"readthedocs.builds",
"readthedocs.doc_builder",
"readthedocs.oauth",
"readthedocs.redirects",
"readthedocs.sso",
"readthedocs.audit",
"readthedocs.rtd_tests",
"readthedocs.api.v2",
"readthedocs.api.v3",
"readthedocs.gold",
"readthedocs.payments",
"readthedocs.subscriptions",
"readthedocs.notifications",
"readthedocs.integrations",
"readthedocs.analytics",
"readthedocs.search",
"readthedocs.embed",
"readthedocs.telemetry",
"readthedocs.domains",
"readthedocs.invitations",
# allauth
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.github",
"readthedocs.allauth.providers.githubapp",
"allauth.socialaccount.providers.gitlab",
"allauth.socialaccount.providers.bitbucket_oauth2",
"allauth.mfa",
# Others
# NOTE: impersonate functionality is only enabled when ALLOW_ADMIN is True,
# but we still need to include it even when not enabled, since it has objects
# related to the user model that Django needs to know about when deleting users.
"impersonate",
]
if ext:
apps.append("readthedocsext.cdn")
apps.append("readthedocsext.donate")
apps.append("readthedocsext.spamfighting")
if self.SHOW_DEBUG_TOOLBAR:
apps.append("debug_toolbar")
if ext_theme:
apps.append("readthedocsext.theme")
return apps
@property
def CRISPY_TEMPLATE_PACK(self):
return "semantic-ui"
@property
def CRISPY_ALLOWED_TEMPLATE_PACKS(self):
return ("semantic-ui",)
@property
def USE_PROMOS(self): # noqa
return "readthedocsext.donate" in self.INSTALLED_APPS
@property
def MIDDLEWARE(self):
middlewares = [
"readthedocs.core.middleware.NullCharactersMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"allauth.account.middleware.AccountMiddleware",
"dj_pagination.middleware.PaginationMiddleware",
"csp.middleware.CSPMiddleware",
"readthedocs.core.middleware.UpdateCSPMiddleware",
"simple_history.middleware.HistoryRequestMiddleware",
"readthedocs.core.logs.ReadTheDocsRequestMiddleware",
"django_structlog.middlewares.RequestMiddleware",
]
if self.SHOW_DEBUG_TOOLBAR:
middlewares.insert(0, "debug_toolbar.middleware.DebugToolbarMiddleware")
if self.ALLOW_ADMIN:
middlewares.append("impersonate.middleware.ImpersonateMiddleware")
return middlewares
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {
"min_length": 9,
},
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Explicitly set the password hashers to the default ones,
# so we can change them in our test settings.
PASSWORD_HASHERS = PASSWORD_HASHERS
# Paths
SITE_ROOT = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
TEMPLATE_ROOT = os.path.join(SITE_ROOT, "readthedocs", "templates")
DOCROOT = os.path.join(SITE_ROOT, "user_builds")
LOGS_ROOT = os.path.join(SITE_ROOT, "logs")
PRODUCTION_ROOT = os.path.join(SITE_ROOT, "prod_artifacts")
PRODUCTION_MEDIA_ARTIFACTS = os.path.join(PRODUCTION_ROOT, "media")
# Assets and media
STATIC_ROOT = os.path.join(SITE_ROOT, "static")
STATIC_URL = "/static/"
MEDIA_ROOT = os.path.join(SITE_ROOT, "media/")
MEDIA_URL = "/media/"
ADMIN_MEDIA_PREFIX = "/media/admin/"
ADMIN_URL = "/admin"
STATICFILES_DIRS = [
os.path.join(SITE_ROOT, "media"),
]
STATICFILES_FINDERS = [
"readthedocs.core.static.SelectiveFileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"readthedocs.core.finders.DebugToolbarFinder",
]
PYTHON_MEDIA = False
# Content Security Policy
# https://django-csp.readthedocs.io/
CSP_FRAME_ANCESTORS = ("'none'",)
CSP_REPORT_URI = None
CSP_REPORT_ONLY = False
# Default to disallow everything, and then allow specific sources on each directive.
CSP_DEFAULT_SRC = ["'none'"]
CSP_IMG_SRC = [
"'self'",
# Some of our styles include images as data URLs.
"data:",
# We load avatars from GitHub, GitLab, and Bitbucket,
# and other services. They don't use a single specific domain,
# so we just allow any https domain here.
"https:",
]
CSP_BASE_URI = ["'self'"]
CSP_FRAME_SRC = [
# Stripe (used for Gold subscriptions)
"https://js.stripe.com/",
]
RTD_CSP_UPDATE_HEADERS = {}
@property
def CSP_CONNECT_SRC(self):
CSP_CONNECT_SRC = [
"'self'",
# Allow sentry to report errors.
"https://*.ingest.us.sentry.io",
# Allow fontawesome to load.
"https://ka-p.fontawesome.com",
"https://kit.fontawesome.com",
# Plausible analytics
"https://plausible.io/api/event",
]
CSP_CONNECT_SRC.append(f"ws://{self.PRODUCTION_DOMAIN}:10001/ws")
return CSP_CONNECT_SRC
@property
def CSP_SCRIPT_SRC(self):
CSP_SCRIPT_SRC = [
"'self'",
# Some of our JS deps are using eval.
"'unsafe-eval'",
# Allow fontawesome to load.
"https://kit.fontawesome.com",
# Stripe (used for Gold subscriptions)
"https://js.stripe.com/",
]
CSP_SCRIPT_SRC.append(self.STATIC_URL)
if self.RTD_EXT_THEME_DEV_SERVER:
CSP_SCRIPT_SRC.append(self.RTD_EXT_THEME_DEV_SERVER)
return CSP_SCRIPT_SRC
@property
def CSP_FONT_SRC(self):
CSP_FONT_SRC = [
"'self'",
# Allow fontawesome to load.
"data:",
"https://ka-p.fontawesome.com",
]
CSP_FONT_SRC.append(self.STATIC_URL)
if self.RTD_EXT_THEME_DEV_SERVER:
CSP_FONT_SRC.append(self.RTD_EXT_THEME_DEV_SERVER)
return CSP_FONT_SRC
@property
def CSP_STYLE_SRC(self):
CSP_STYLE_SRC = [
"'self'",
# We have lots of inline styles!
# TODO: we should remove this.
"'unsafe-inline'",
]
CSP_STYLE_SRC.append(self.STATIC_URL)
if self.RTD_EXT_THEME_DEV_SERVER:
CSP_STYLE_SRC.append(self.RTD_EXT_THEME_DEV_SERVER)
return CSP_STYLE_SRC
@property
def CSP_FORM_ACTION(self):
CSP_FORM_ACTION = [
"'self'",
# Chrome and Safari block form submissions if it redirects to a different domain.
# We redirect to external domains for some forms, like login.
"https://github.com",
"https://gitlab.com",
"https://bitbucket.org",
"https://id.atlassian.com",
"https://accounts.google.com",
# We also redirect to Stripe on subscription forms.
"https://billing.stripe.com",
"https://checkout.stripe.com",
]
# Allow our support form to submit to external domains.
if self.SUPPORT_FORM_ENDPOINT:
CSP_FORM_ACTION.append(self.SUPPORT_FORM_ENDPOINT)
return CSP_FORM_ACTION
# Django Storage subclass used to write build artifacts to cloud or local storage
# https://docs.readthedocs.io/page/development/settings.html#rtd-build-media-storage
RTD_BUILD_MEDIA_STORAGE = "readthedocs.builds.storage.BuildMediaFileSystemStorage"
RTD_BUILD_TOOLS_STORAGE = "readthedocs.builds.storage.BuildMediaFileSystemStorage"
RTD_BUILD_COMMANDS_STORAGE = (
"readthedocs.builds.storage.BuildMediaFileSystemStorage"
)
# This is for serving static files on proxito, not Django static files
# https://github.com/readthedocs/readthedocs.org/pull/9237
RTD_STATICFILES_STORAGE = "readthedocs.builds.storage.StaticFilesStorage"
@property
def TEMPLATES(self):
dirs = [self.TEMPLATE_ROOT]
if ext_theme:
dirs.insert(
0,
os.path.join(
os.path.dirname(readthedocsext.theme.__file__),
"templates",
),
)
# Disable ``cached.Loader`` on development
# https://docs.djangoproject.com/en/4.2/ref/templates/api/#django.template.loaders.cached.Loader
default_loaders = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
cached_loaders = [("django.template.loaders.cached.Loader", default_loaders)]
return [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": dirs,
"OPTIONS": {
"debug": self.DEBUG,
"loaders": default_loaders if self.DEBUG else cached_loaders,
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.request",
# Read the Docs processor
"readthedocs.core.context_processors.readthedocs_processor",
"readthedocs.core.context_processors.user_notifications",
],
},
},
]
# Cache
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"PREFIX": "docs",
}
}
CACHE_MIDDLEWARE_SECONDS = 60
# I18n
TIME_ZONE = "UTC"
USE_TZ = True
LANGUAGE_CODE = "en-us"
LANGUAGES = (
("ca", gettext("Catalan")),
("en", gettext("English")),
("es", gettext("Spanish")),
("pt-br", gettext("Brazilian Portuguese")),
("nb", gettext("Norwegian Bokmål")),
("fr", gettext("French")),
("ru", gettext("Russian")),
("de", gettext("German")),
("gl", gettext("Galician")),
("vi", gettext("Vietnamese")),
("zh-cn", gettext("Simplified Chinese")),
("zh-tw", gettext("Traditional Chinese")),
("ja", gettext("Japanese")),
("uk", gettext("Ukrainian")),
("it", gettext("Italian")),
("ko", gettext("Korean")),
)
LOCALE_PATHS = [
os.path.join(SITE_ROOT, "readthedocs", "locale"),
]
USE_I18N = True
USE_L10N = True
BUILD_TIME_LIMIT = 900 # seconds
@property
def BUILD_MEMORY_LIMIT(self):
"""
Set build memory limit dynamically, if in production, based on system memory.
We do this to avoid having separate build images. This assumes 1 build
process per server, which will be allowed to consume all available
memory.
"""
# Our normal default
default_memory_limit = "7g"
# Only run on our servers
if self.RTD_IS_PRODUCTION:
total_memory, memory_limit = self._get_build_memory_limit()
memory_limit = f"{memory_limit}m"
else:
memory_limit = default_memory_limit
log.info(
"Using dynamic build limits.",
hostname=socket.gethostname(),
memory=memory_limit,
)
return memory_limit
# Celery
CELERY_APP_NAME = "readthedocs"
CELERY_ALWAYS_EAGER = True
CELERYD_TASK_TIME_LIMIT = 60 * 60 # 60 minutes
CELERY_SEND_TASK_ERROR_EMAILS = False
CELERY_IGNORE_RESULT = True
CELERYD_HIJACK_ROOT_LOGGER = False
# This stops us from pre-fetching a task that then sits around on the builder
CELERY_ACKS_LATE = True
# Don't queue a bunch of tasks in the workers
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_CREATE_MISSING_QUEUES = True
# https://github.com/readthedocs/readthedocs.org/issues/12317#issuecomment-3070950434
# https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/redis.html#visibility-timeout
BROKER_TRANSPORT_OPTIONS = {
'visibility_timeout': 18000, # 5 hours
}
CELERY_DEFAULT_QUEUE = "celery"
CELERYBEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
CELERYBEAT_SCHEDULE = {
"every-minute-finish-unhealthy-builds": {
"task": "readthedocs.projects.tasks.utils.finish_unhealthy_builds",
"schedule": crontab(minute="*"),
"options": {"queue": "web"},
},
"every-day-delete-old-search-queries": {
"task": "readthedocs.search.tasks.delete_old_search_queries_from_db",
"schedule": crontab(minute=0, hour=0),
"options": {"queue": "web"},
},
"every-day-delete-old-page-views": {
"task": "readthedocs.analytics.tasks.delete_old_page_counts",
"schedule": crontab(minute=27, hour="*/6"),
"options": {"queue": "web"},
},
"every-day-delete-old-buildata-models": {
"task": "readthedocs.telemetry.tasks.delete_old_build_data",
"schedule": crontab(minute=0, hour=2),
"options": {"queue": "web"},
},
"weekly-delete-old-personal-audit-logs": {
"task": "readthedocs.audit.tasks.delete_old_personal_audit_logs",
"schedule": crontab(day_of_week="wed", minute=0, hour=7),
"options": {"queue": "web"},
},
"every-day-resync-repositories-from-sso-organizations": {
"task": "readthedocs.oauth.tasks.sync_remote_repositories_from_sso_organizations",
"schedule": crontab(minute=0, hour=4),
"options": {"queue": "web"},
},
"quarter-archive-builds": {
"task": "readthedocs.builds.tasks.archive_builds_task",
"schedule": crontab(minute="*/15"),
"options": {"queue": "web"},
"kwargs": {
"days": 1,
"limit": 500,
"delete": True,
},
},
"every-30m-delete-inactive-external-versions": {
"task": "readthedocs.builds.tasks.delete_closed_external_versions",
# Increase the frequency because we have 255k closed versions and they keep growing.
# It's better to increase this frequency than the `limit=` of the task.
"schedule": crontab(minute="*/30", hour="*"),
"options": {"queue": "web"},
},
"every-day-resync-remote-repositories": {
"task": "readthedocs.oauth.tasks.sync_active_users_remote_repositories",
"schedule": crontab(minute=30, hour=2),
"options": {"queue": "web"},
},
"every-day-email-pending-custom-domains": {
"task": "readthedocs.domains.tasks.email_pending_custom_domains",
"schedule": crontab(minute=0, hour=3),
"options": {"queue": "web"},
},
"every-15m-delete-pidbox-objects": {
"task": "readthedocs.core.tasks.cleanup_pidbox_keys",
"schedule": crontab(minute="*/15"),
"options": {"queue": "web"},
},
"every-day-delete-old-revoked-build-api-keys": {
"task": "readthedocs.api.v2.tasks.delete_old_revoked_build_api_keys",
"schedule": crontab(minute=0, hour=4),
"options": {"queue": "web"},
},
}
# Sentry
SENTRY_CELERY_IGNORE_EXPECTED = True
DJANGO_STRUCTLOG_CELERY_ENABLED = True
# Docker
DOCKER_ENABLE = False
DOCKER_SOCKET = "unix:///var/run/docker.sock"
# User used to create the container.
# In production we use the same user than the one defined by the
# ``USER docs`` instruction inside the Dockerfile.
# In development, we can use the "UID:GID" of the current user running the
# instance to avoid file permissions issues.
# https://docs.docker.com/engine/reference/run/#user
RTD_DOCKER_USER = "docs:docs"
RTD_DOCKER_SUPER_USER = "root:root"
RTD_DOCKER_WORKDIR = "/home/docs/"
RTD_DOCKER_COMPOSE = False
DOCKER_VERSION = "auto"
DOCKER_DEFAULT_VERSION = "ubuntu-22.04"
DOCKER_IMAGE = "{}:{}".format(
constants_docker.DOCKER_DEFAULT_IMAGE, DOCKER_DEFAULT_VERSION
)
# Additional binds for the build container
RTD_DOCKER_ADDITIONAL_BINDS = {}
RTD_DOCKER_BUILD_SETTINGS = constants_docker.RTD_DOCKER_BUILD_SETTINGS
# This is used for the image used to clone the users repo,
# since we can't read their config file image choice before cloning
RTD_DOCKER_CLONE_IMAGE = RTD_DOCKER_BUILD_SETTINGS["os"]["ubuntu-22.04"]
def _get_build_memory_limit(self):
"""
Return the buld memory limit based on available system memory.
We subtract ~1000Mb for overhead of processes and base system, and set
the build time as proportional to the memory limit.
"""
try:
total_memory = int(
subprocess.check_output(
"free -m | awk '/^Mem:/{print $2}'",
shell=True,
)
)
return total_memory, round(total_memory - 1000, -2)
except ValueError:
# On systems without a `free` command it will return a string to
# int and raise a ValueError
log.exception("Failed to get memory size, using defaults Docker limits.")
# Allauth
ACCOUNT_ADAPTER = "readthedocs.core.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = 'readthedocs.core.adapters.SocialAccountAdapter'
ACCOUNT_SIGNUP_FIELDS = ['username*', 'email*', 'password1*', 'password2*']
# By preventing enumeration, we will always send an email,
# even if the email is not registered, that's hurting
# our email reputation. We are okay with people knowing
# if an email is registered or not.
ACCOUNT_PREVENT_ENUMERATION = False
# Make email verification mandatory.
# Users won't be able to login until they verify the email address.
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_LOGIN_METHODS = ["username", "email"]
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7
SOCIALACCOUNT_AUTO_SIGNUP = False
SOCIALACCOUNT_STORE_TOKENS = True
_SOCIALACCOUNT_PROVIDERS = {
"github": {
"APPS": [
{
"name": "GitHub OAuth",
"client_id": "123",
"secret": "456",
"key": "",
"settings": {
"hidden": False,
"hidden_on_login": False,
"hidden_on_connect": False,
"priority": 10,
},
},
],
"SCOPE": [
"user:email",
"read:org",
"admin:repo_hook",
"repo:status",
],
},
"githubapp": {
"APPS": [
{
"name": "GitHub App",
"client_id": "123",
"secret": "456",
"key": "",
"settings": {
"hidden": False,
"hidden_on_login": False,
"hidden_on_connect": False,
"priority": 20,
},
},
],
# Scope is determined by the GitHub App permissions.
"SCOPE": [],
},
"gitlab": {
"APPS": [
{"client_id": "123", "secret": "456", "key": "", "settings": {"priority": 30}},
],
# GitLab returns the primary email only, we can trust it's verified.
"VERIFIED_EMAIL": True,
"SCOPE": [
"api",
"read_user",
],
},
"bitbucket_oauth2": {
"APPS": [
{"client_id": "123", "secret": "456", "key": "", "settings": {"priority": 40}},
],
# Bitbucket scope/permissions are determined by the Oauth consumer setup on bitbucket.org.
},
}
@property
def SOCIALACCOUNT_PROVIDERS(self):
"""
This is useful to override in a subclass, mainly to add the secrets when deploying.
Our ops repos have a complex (shared) inheritance structure, so it's easier this way.
"""
return self._SOCIALACCOUNT_PROVIDERS
ACCOUNT_FORMS = {
"signup": "readthedocs.forms.SignupFormWithNewsletter",
}
GITHUB_APP_ID = 1234
GITHUB_APP_NAME = "readthedocs"
GITHUB_APP_PRIVATE_KEY = ""
GITHUB_APP_WEBHOOK_SECRET = ""
RTD_ALLOW_GITHUB_APP = True
@property
def GITHUB_APP_CLIENT_ID(self):
return self.SOCIALACCOUNT_PROVIDERS["githubapp"]["APPS"][0]["client_id"]
# CORS
# Don't allow sending cookies in cross-domain requests, this is so we can
# relax our CORS headers for more views, but at the same time not opening
# users to CSRF attacks.
CORS_ALLOW_CREDENTIALS = False
# Allow cross-site requests from any origin,
# all information from our allowed endpoits is public.
CORS_ALLOW_ALL_ORIGINS = True
CORS_ALLOW_HEADERS = list(default_headers) + [
"x-hoverxref-version",
]
# Additional protection to allow only idempotent methods.
CORS_ALLOW_METHODS = [
"GET",
"OPTIONS",
"HEAD",
]
# URLs to allow CORS to read from unauthed.
CORS_URLS_REGEX = re.compile(
r"""
^(
/api/v2/search
|/api/v2/docsearch
|/api/v2/embed
|/api/v3/embed
|/api/v2/sustainability
)
""",
re.VERBOSE,
)
# RTD Settings
ALLOW_PRIVATE_REPOS = False
DEFAULT_PRIVACY_LEVEL = "public"
DEFAULT_VERSION_PRIVACY_LEVEL = "public"
ALLOW_ADMIN = True
# Organization settings
RTD_ALLOW_ORGANIZATIONS = False
RTD_ORG_DEFAULT_STRIPE_SUBSCRIPTION_PRICE = "trial-v2-monthly"
RTD_ORG_TRIAL_PERIOD_DAYS = 30
# Elasticsearch settings.
ELASTICSEARCH_DSL = {
"default": {
"hosts": "http://elastic:password@search:9200",
},
}
# Chunk size for elasticsearch reindex celery tasks
ES_TASK_CHUNK_SIZE = 500
# Info from Honza about this:
# The key to determine shard number is actually usually not the node count,
# but the size of your data.
# There are advantages to just having a single shard in an index since
# you don't have to do the distribute/collect steps when executing a search.
# If your data will allow it (not significantly larger than 40GB)
# I would recommend going to a single shard and one replica meaning
# any of the two nodes will be able to serve any search without talking to the other one.
# Scaling to more searches will then just mean adding a third node
# and a second replica resulting in immediate 50% bump in max search throughput.
ES_INDEXES = {
"project": {
"name": "project_index",
"settings": {"number_of_shards": 1, "number_of_replicas": 1},
},
"page": {
"name": "page_index",
"settings": {
"number_of_shards": 1,
"number_of_replicas": 1,
},
},
}
# ANALYZER = 'analysis': {
# 'analyzer': {
# 'default_icu': {
# 'type': 'custom',
# 'tokenizer': 'icu_tokenizer',
# 'filter': ['word_delimiter', 'icu_folding', 'icu_normalizer'],
# }
# }
# }
# Disable auto refresh for increasing index performance
ELASTICSEARCH_DSL_AUTO_REFRESH = False
ALLOWED_HOSTS = ["*"]
ABSOLUTE_URL_OVERRIDES = {"auth.user": lambda o: "/profiles/{}/".format(o.username)}
INTERNAL_IPS = ("127.0.0.1",)
# django-impersonate.
IMPERSONATE = {
# By default, only staff users can impersonate.
"REQUIRE_SUPERUSER": True,
}
# Taggit
# https://django-taggit.readthedocs.io
TAGGIT_TAGS_FROM_STRING = "readthedocs.projects.tag_utils.rtd_parse_tags"
# DJStripe values -- **CHANGE THESE IN PRODUCTION**
STRIPE_LIVE_MODE = False # Change to True in production
# These values shouldn't need to change..
DJSTRIPE_FOREIGN_KEY_TO_FIELD = "id"
DJSTRIPE_USE_NATIVE_JSONFIELD = (
True # We recommend setting to True for new installations
)
# Disable adding djstripe metadata to the Customer objects.
# We are managing the subscriber relationship by ourselves,
# since we have subscriptions attached to an organization or gold user
# we can't make use of the DJSTRIPE_SUBSCRIBER_MODEL setting.
DJSTRIPE_SUBSCRIBER_CUSTOMER_KEY = None
# Webhook URL for BotDog to post messages in Slack #sales channel:
# https://api.slack.com/apps/A01ML7J7N4T/incoming-webhooks
SLACK_WEBHOOK_RTD_NOTIFICATIONS_CHANNEL = None # https://hooks.slack.com/services/...
# Do Not Track support
DO_NOT_TRACK_ENABLED = False
# Advertising configuration defaults
ADSERVER_API_BASE = None
ADSERVER_API_KEY = None
ADSERVER_API_TIMEOUT = 0.35 # seconds
# Misc application settings
GLOBAL_ANALYTICS_CODE = None
DASHBOARD_ANALYTICS_CODE = None # For the dashboard, not docs
GRAVATAR_DEFAULT_IMAGE = (
"https://assets.readthedocs.org/static/images/silhouette.png" # NOQA
)
OAUTH_AVATAR_USER_DEFAULT_URL = GRAVATAR_DEFAULT_IMAGE
OAUTH_AVATAR_ORG_DEFAULT_URL = GRAVATAR_DEFAULT_IMAGE
REST_FRAMEWORK = {
"DEFAULT_FILTER_BACKENDS": (
"django_filters.rest_framework.DjangoFilterBackend",
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination", # NOQA
"DEFAULT_THROTTLE_RATES": {
"anon": "5/minute",
"user": "60/minute",
},
"PAGE_SIZE": 10,
"TEST_REQUEST_DEFAULT_FORMAT": "json",
}
REST_FLEX_FIELDS = {
"RECURSIVE_EXPANSION_PERMITTED": False,
}
SILENCED_SYSTEM_CHECKS = ["fields.W342"]
# Logging
LOG_FORMAT = "%(name)s:%(lineno)s[%(process)d]: %(levelname)s %(message)s"
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"default": {
"format": LOG_FORMAT,
"datefmt": "%d/%b/%Y %H:%M:%S",
},
# structlog
"plain_console": {
"()": structlog.stdlib.ProcessorFormatter,
"processors": [
structlog.stdlib.ProcessorFormatter.remove_processors_meta,
structlog.dev.ConsoleRenderer(colors=False),
],
# Allows to add extra data to log entries generated via ``logging`` module
# See https://www.structlog.org/en/stable/standard-library.html#rendering-using-structlog-based-formatters-within-logging
"foreign_pre_chain": shared_processors,
},
"colored_console": {
"()": structlog.stdlib.ProcessorFormatter,
"processors": [
structlog.stdlib.ProcessorFormatter.remove_processors_meta,
structlog.dev.ConsoleRenderer(colors=True),
],
# Allows to add extra data to log entries generated via ``logging`` module
# See https://www.structlog.org/en/stable/standard-library.html#rendering-using-structlog-based-formatters-within-logging
"foreign_pre_chain": shared_processors,
},
"key_value": {
"()": structlog.stdlib.ProcessorFormatter,
"processors": [
structlog.processors.TimeStamper(fmt="iso"),
structlog.stdlib.ProcessorFormatter.remove_processors_meta,
structlog.processors.KeyValueRenderer(
key_order=["timestamp", "level", "event", "logger"]
),
],
# Allows to add extra data to log entries generated via ``logging`` module
# See https://www.structlog.org/en/stable/standard-library.html#rendering-using-structlog-based-formatters-within-logging
"foreign_pre_chain": shared_processors,
},
},
"handlers": {
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "plain_console",
},
"debug": {
"level": "DEBUG",
"class": "logging.handlers.RotatingFileHandler",
"filename": os.path.join(LOGS_ROOT, "debug.log"),
"formatter": "key_value",
},
"null": {
"class": "logging.NullHandler",
},
},
"loggers": {
"": { # root logger
"handlers": ["debug", "console"],
# Always send from the root, handlers can filter levels
"level": "INFO",
},
"docker.utils.config": {
"handlers": ["null"],
# Don't double log at the root logger for these.
"propagate": False,
},
"django_structlog.middlewares.request": {
"handlers": ["null"],
# Don't double log at the root logger for these.
"propagate": False,
},
"readthedocs": {
"handlers": ["debug", "console"],
"level": "DEBUG",
# Don't double log at the root logger for these.
"propagate": False,
},
"django.security.DisallowedHost": {
"handlers": ["null"],
"propagate": False,
},
"elastic_transport.transport": {
"handlers": ["null"],
"propagate": False,
},
"celery.worker.consumer.gossip": {
"handlers": ["null"],
"propagate": False,
},
},
}
# MailerLite API for newsletter signups
MAILERLITE_API_SUBSCRIBERS_URL = "https://api.mailerlite.com/api/v2/subscribers"
MAILERLITE_API_ONBOARDING_GROUP_ID = None
MAILERLITE_API_ONBOARDING_GROUP_URL = None
MAILERLITE_API_KEY = None
RTD_EMBED_API_EXTERNAL_DOMAINS = [
r"^docs\.python\.org$",
r"^docs\.scipy\.org$",
r"^docs\.sympy\.org$",
r"^numpy\.org$",
]
RTD_EMBED_API_PAGE_CACHE_TIMEOUT = 5 * 10
RTD_EMBED_API_DEFAULT_REQUEST_TIMEOUT = 1
RTD_EMBED_API_DOMAIN_RATE_LIMIT = 50
RTD_EMBED_API_DOMAIN_RATE_LIMIT_TIMEOUT = 60
RTD_SPAM_THRESHOLD_DONT_SHOW_ADS = 100
RTD_SPAM_THRESHOLD_DENY_ON_ROBOTS = 200
RTD_SPAM_THRESHOLD_DONT_SHOW_DASHBOARD = 300
RTD_SPAM_THRESHOLD_DONT_SERVE_DOCS = 500
RTD_SPAM_THRESHOLD_REMOVE_FROM_SEARCH_INDEX = 500
RTD_SPAM_THRESHOLD_DELETE_PROJECT = 1000
RTD_SPAM_MAX_SCORE = 9999
S3_PROVIDER = "AWS"
# Used by readthedocs.aws.security_token_service.
AWS_STS_ASSUME_ROLE_ARN = "arn:aws:iam::1234:role/SomeRole"
@property
def STORAGES(self):
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html
return {
"staticfiles": {
"BACKEND": "readthedocs.storage.s3_storage.S3StaticStorage"
},
"usercontent": {
"BACKEND": "django.core.files.storage.FileSystemStorage",
"OPTIONS": {
"location": Path(self.MEDIA_ROOT) / "usercontent",
"allow_overwrite": True,
}
},
}
@property
def USING_AWS(self):
"""Return True if we are using AWS as our storage/cloud provider."""
return self.S3_PROVIDER == "AWS"
|
CommunityBaseSettings
|
python
|
davidhalter__jedi
|
jedi/inference/names.py
|
{
"start": 21408,
"end": 21689
}
|
class ____:
def __init__(self, wrapped_name):
self._wrapped_name = wrapped_name
def __getattr__(self, name):
return getattr(self._wrapped_name, name)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._wrapped_name)
|
NameWrapper
|
python
|
pyca__cryptography
|
tests/x509/test_x509_ext.py
|
{
"start": 160552,
"end": 169851
}
|
class ____:
def test_invalid_distribution_points(self):
with pytest.raises(TypeError):
x509.CRLDistributionPoints(
["notadistributionpoint"], # type:ignore[list-item]
)
def test_iter_len(self):
cdp = x509.CRLDistributionPoints(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://domain")],
None,
None,
None,
),
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
None,
),
]
)
assert len(cdp) == 2
assert list(cdp) == [
x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://domain")],
None,
None,
None,
),
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
None,
),
]
def test_iter_input(self):
points = [
x509.DistributionPoint(
[x509.UniformResourceIdentifier("http://domain")],
None,
None,
None,
),
]
cdp = x509.CRLDistributionPoints(iter(points))
assert list(cdp) == points
def test_repr(self):
cdp = x509.CRLDistributionPoints(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset([x509.ReasonFlags.key_compromise]),
None,
),
]
)
assert repr(cdp) == (
"<CRLDistributionPoints([<DistributionPoint(full_name=[<Unifo"
"rmResourceIdentifier(value='ftp://domain')>], relative"
"_name=None, reasons=frozenset({<ReasonFlags.key_compromise: "
"'keyCompromise'>}), crl_issuer=None)>])>"
)
def test_eq(self):
cdp = x509.CRLDistributionPoints(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
cdp2 = x509.CRLDistributionPoints(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
assert cdp == cdp2
def test_ne(self):
cdp = x509.CRLDistributionPoints(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
cdp2 = x509.CRLDistributionPoints(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain2")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
cdp3 = x509.CRLDistributionPoints(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset([x509.ReasonFlags.key_compromise]),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
cdp4 = x509.CRLDistributionPoints(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing2")],
),
]
)
assert cdp != cdp2
assert cdp != cdp3
assert cdp != cdp4
assert cdp != object()
def test_hash(self):
cdp = x509.CRLDistributionPoints(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
cdp2 = x509.CRLDistributionPoints(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
cdp3 = x509.CRLDistributionPoints(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset([x509.ReasonFlags.key_compromise]),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
assert hash(cdp) == hash(cdp2)
assert hash(cdp) != hash(cdp3)
def test_indexing(self):
ci = x509.CRLDistributionPoints(
[
x509.DistributionPoint(
None,
None,
None,
[x509.UniformResourceIdentifier("uri://thing")],
),
x509.DistributionPoint(
None,
None,
None,
[x509.UniformResourceIdentifier("uri://thing2")],
),
x509.DistributionPoint(
None,
None,
None,
[x509.UniformResourceIdentifier("uri://thing3")],
),
x509.DistributionPoint(
None,
None,
None,
[x509.UniformResourceIdentifier("uri://thing4")],
),
x509.DistributionPoint(
None,
None,
None,
[x509.UniformResourceIdentifier("uri://thing5")],
),
]
)
assert ci[-1] == ci[4]
assert ci[2:6:2] == [ci[2], ci[4]]
def test_public_bytes(self):
ext = x509.CRLDistributionPoints(
[
x509.DistributionPoint(
[x509.UniformResourceIdentifier("ftp://domain")],
None,
frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
[x509.UniformResourceIdentifier("uri://thing")],
),
]
)
assert (
ext.public_bytes()
== b"0'0%\xa0\x10\xa0\x0e\x86\x0cftp://domain\x81\x02\x05`\xa2\r"
b"\x86\x0buri://thing"
)
|
TestCRLDistributionPoints
|
python
|
pytorch__pytorch
|
torch/onnx/_internal/fx/passes/type_promotion.py
|
{
"start": 1630,
"end": 3477
}
|
class ____(abc.ABC):
"""Base class for type promotion rule per 'torch.ops.{namespace}.{op_name}'."""
def __init__(self, namespace: str, op_name: str) -> None:
self.namespace = namespace
self.op_name = op_name
# Make this abstract as well because subclass needs to override __eq__().
# A class that overrides __eq__() and does not define __hash__() will have its __hash__() implicitly set to None.
# Ref: https://docs.python.org/3/reference/datamodel.html#object.__hash__
@abc.abstractmethod
def __hash__(self) -> int: ...
@abc.abstractmethod
def __repr__(self) -> str: ...
@abc.abstractmethod
def __eq__(self, other: object) -> bool: ...
def is_valid(self) -> bool:
"""Check if the rule is valid."""
# This always returns a module. If the module does not exist it will be created.
module = getattr(torch.ops, self.namespace)
py_op = getattr(module, self.op_name, None)
if py_op is None:
logger.warning(
"Cannot find op: %s in module: %s", self.op_name, self.namespace
)
return False
if not isinstance(py_op, torch._ops.OpOverloadPacket):
logger.warning(
"Op: torch.ops.%s.%s is not an OpOverloadPacket, got: %s",
self.namespace,
self.op_name,
type(py_op),
)
return False
return True
@abc.abstractmethod
def preview_type_promotion(
self, args: tuple, kwargs: dict
) -> TypePromotionSnapshot:
"""Preview type promotion results for provided set of args and kwargs.
Returns a TypePromotionSnapshot object that contains the promoted dtypes for
the arguments and the expected output dtype.
"""
...
|
TypePromotionRule
|
python
|
coleifer__peewee
|
peewee.py
|
{
"start": 136459,
"end": 144983
}
|
class ____(Database):
field_types = {
'AUTO': 'INTEGER AUTO_INCREMENT',
'BIGAUTO': 'BIGINT AUTO_INCREMENT',
'BOOL': 'BOOL',
'DECIMAL': 'NUMERIC',
'DOUBLE': 'DOUBLE PRECISION',
'FLOAT': 'FLOAT',
'UUID': 'VARCHAR(40)',
'UUIDB': 'VARBINARY(16)'}
operations = {
'LIKE': 'LIKE BINARY',
'ILIKE': 'LIKE',
'REGEXP': 'REGEXP BINARY',
'IREGEXP': 'REGEXP',
'XOR': 'XOR'}
param = '%s'
quote = '``'
compound_select_parentheses = CSQ_PARENTHESES_UNNESTED
for_update = True
index_using_precedes_table = True
limit_max = 2 ** 64 - 1
safe_create_index = False
safe_drop_index = False
sql_mode = 'PIPES_AS_CONCAT'
def init(self, database, **kwargs):
params = {
'charset': 'utf8',
'sql_mode': self.sql_mode,
'use_unicode': True}
params.update(kwargs)
if 'password' in params and mysql_passwd:
params['passwd'] = params.pop('password')
super(MySQLDatabase, self).init(database, **params)
def _connect(self):
if mysql is None:
raise ImproperlyConfigured('MySQL driver not installed!')
conn = mysql.connect(db=self.database, autocommit=True,
**self.connect_params)
return conn
def _set_server_version(self, conn):
try:
version_raw = conn.server_version
except AttributeError:
version_raw = conn.get_server_info()
self.server_version = self._extract_server_version(version_raw)
def _extract_server_version(self, version):
if isinstance(version, tuple):
return version
version = version.lower()
if 'maria' in version:
match_obj = re.search(r'(1\d\.\d+\.\d+)', version)
else:
match_obj = re.search(r'(\d\.\d+\.\d+)', version)
if match_obj is not None:
return tuple(int(num) for num in match_obj.groups()[0].split('.'))
warnings.warn('Unable to determine MySQL version: "%s"' % version)
return (0, 0, 0) # Unable to determine version!
def is_connection_usable(self):
if self._state.closed:
return False
conn = self._state.conn
if hasattr(conn, 'ping'):
if self.server_version[0] == 8:
args = ()
else:
args = (False,)
try:
conn.ping(*args)
except Exception:
return False
return True
def default_values_insert(self, ctx):
return ctx.literal('() VALUES ()')
def begin(self, isolation_level=None):
if self.is_closed():
self.connect()
with __exception_wrapper__:
curs = self.cursor()
if isolation_level:
curs.execute('SET TRANSACTION ISOLATION LEVEL %s' %
isolation_level)
curs.execute('BEGIN')
def get_tables(self, schema=None):
query = ('SELECT table_name FROM information_schema.tables '
'WHERE table_schema = DATABASE() AND table_type != %s '
'ORDER BY table_name')
return [table for table, in self.execute_sql(query, ('VIEW',))]
def get_views(self, schema=None):
query = ('SELECT table_name, view_definition '
'FROM information_schema.views '
'WHERE table_schema = DATABASE() ORDER BY table_name')
cursor = self.execute_sql(query)
return [ViewMetadata(*row) for row in cursor.fetchall()]
def get_indexes(self, table, schema=None):
cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
unique = set()
indexes = {}
for row in cursor.fetchall():
if not row[1]:
unique.add(row[2])
indexes.setdefault(row[2], [])
indexes[row[2]].append(row[4])
return [IndexMetadata(name, None, indexes[name], name in unique, table)
for name in indexes]
def get_columns(self, table, schema=None):
sql = """
SELECT column_name, is_nullable, data_type, column_default
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()
ORDER BY ordinal_position"""
cursor = self.execute_sql(sql, (table,))
pks = set(self.get_primary_keys(table))
return [ColumnMetadata(name, dt, null == 'YES', name in pks, table, df)
for name, null, dt, df in cursor.fetchall()]
def get_primary_keys(self, table, schema=None):
cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
return [row[4] for row in
filter(lambda row: row[2] == 'PRIMARY', cursor.fetchall())]
def get_foreign_keys(self, table, schema=None):
query = """
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL"""
cursor = self.execute_sql(query, (table,))
return [
ForeignKeyMetadata(column, dest_table, dest_column, table)
for column, dest_table, dest_column in cursor.fetchall()]
def get_binary_type(self):
return mysql.Binary
def conflict_statement(self, on_conflict, query):
if not on_conflict._action: return
action = on_conflict._action.lower()
if action == 'replace':
return SQL('REPLACE')
elif action == 'ignore':
return SQL('INSERT IGNORE')
elif action != 'update':
raise ValueError('Un-supported action for conflict resolution. '
'MySQL supports REPLACE, IGNORE and UPDATE.')
def conflict_update(self, on_conflict, query):
if on_conflict._where or on_conflict._conflict_target or \
on_conflict._conflict_constraint:
raise ValueError('MySQL does not support the specification of '
'where clauses or conflict targets for conflict '
'resolution.')
updates = []
if on_conflict._preserve:
# Here we need to determine which function to use, which varies
# depending on the MySQL server version. MySQL and MariaDB prior to
# 10.3.3 use "VALUES", while MariaDB 10.3.3+ use "VALUE".
version = self.server_version or (0,)
if version[0] == 10 and version >= (10, 3, 3):
VALUE_FN = fn.VALUE
else:
VALUE_FN = fn.VALUES
for column in on_conflict._preserve:
entity = ensure_entity(column)
expression = NodeList((
ensure_entity(column),
SQL('='),
VALUE_FN(entity)))
updates.append(expression)
if on_conflict._update:
for k, v in on_conflict._update.items():
if not isinstance(v, Node):
# Attempt to resolve string field-names to their respective
# field object, to apply data-type conversions.
if isinstance(k, basestring):
k = getattr(query.table, k)
if isinstance(k, Field):
v = k.to_value(v)
else:
v = Value(v, unpack=False)
updates.append(NodeList((ensure_entity(k), SQL('='), v)))
if updates:
return NodeList((SQL('ON DUPLICATE KEY UPDATE'),
CommaNodeList(updates)))
def extract_date(self, date_part, date_field):
return fn.EXTRACT(NodeList((SQL(date_part), SQL('FROM'), date_field)))
def truncate_date(self, date_part, date_field):
return fn.DATE_FORMAT(date_field, __mysql_date_trunc__[date_part],
python_value=simple_date_time)
def to_timestamp(self, date_field):
return fn.UNIX_TIMESTAMP(date_field)
def from_timestamp(self, date_field):
return fn.FROM_UNIXTIME(date_field)
def random(self):
return fn.rand()
def get_noop_select(self, ctx):
return ctx.literal('DO 0')
# TRANSACTION CONTROL.
|
MySQLDatabase
|
python
|
bokeh__bokeh
|
src/bokeh/events.py
|
{
"start": 23678,
"end": 24157
}
|
class ____(PointEvent):
''' Announce the end of a rotate event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'rotateend'
|
RotateEnd
|
python
|
huggingface__transformers
|
src/transformers/models/mixtral/modular_mixtral.py
|
{
"start": 9441,
"end": 11085
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: MixtralConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = MixtralAttention(config, layer_idx)
self.mlp = MixtralSparseMoeBlock(config)
self.input_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
MixtralDecoderLayer
|
python
|
coleifer__peewee
|
tests/pwiz_integration.py
|
{
"start": 1172,
"end": 1522
}
|
class ____(object):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._buffer = StringIO()
return self
def __exit__(self, *args):
self.data = self._buffer.getvalue()
sys.stdout = self._stdout
EXPECTED = """
from peewee import *
database = SqliteDatabase('peewee_test.db')
|
capture_output
|
python
|
has2k1__plotnine
|
plotnine/composition/_plot_layout.py
|
{
"start": 277,
"end": 3732
}
|
class ____(ComposeAddable):
"""
Customise the layout of plots in a composition
"""
nrow: int | None = None
"""
Number of rows
"""
ncol: int | None = None
"""
Number of columns
"""
byrow: bool | None = None
"""
How to place plots into the grid.
If None or True, they are placed row by row, left to right.
If False, they are placed column by column, top to bottom.
"""
widths: Sequence[float] | None = None
"""
Relative widths of each column
"""
heights: Sequence[float] | None = None
"""
Relative heights of each column
"""
_cmp: Compose = field(init=False, repr=False)
"""
Composition that this layout is attached to
"""
def __radd__(self, cmp: Compose) -> Compose:
"""
Add plot layout to composition
"""
cmp.layout = self
return cmp
def _setup(self, cmp: Compose):
"""
Setup default parameters as they are expected by the layout manager
- Ensure nrow and ncol have values
- Ensure the widths & heights are set and normalised to mean=1
"""
from . import Beside, Stack
# setup nrow & ncol
if isinstance(cmp, Beside):
if self.ncol is None:
self.ncol = len(cmp)
elif self.ncol < len(cmp):
raise ValueError(
"Composition has more items than the layout columns."
)
if self.nrow is None:
self.nrow = 1
elif isinstance(cmp, Stack):
if self.nrow is None:
self.nrow = len(cmp)
elif self.nrow < len(cmp):
raise ValueError(
"Composition has more items than the layout rows."
)
if self.ncol is None:
self.ncol = 1
else:
from plotnine.facets.facet_wrap import wrap_dims
self.nrow, self.ncol = wrap_dims(len(cmp), self.nrow, self.ncol)
nrow, ncol = self.nrow, self.ncol
# byrow
if self.byrow is None:
self.byrow = True
# setup widths & heights
ws, hs = self.widths, self.heights
if ws is None:
ws = (1 / ncol,) * ncol
elif len(ws) != ncol:
ws = repeat(ws, ncol)
if hs is None:
hs = (1 / nrow,) * nrow
elif len(hs) != nrow:
hs = repeat(hs, nrow)
self.widths = normalise(ws)
self.heights = normalise(hs)
def update(self, other: plot_layout):
"""
Update this layout with the contents of other
"""
if other.widths:
self.widths = other.widths
if other.heights:
self.heights = other.heights
if other.ncol:
self.ncol = other.ncol
if other.nrow:
self.nrow = other.nrow
if other.byrow is not None:
self.byrow = other.byrow
def repeat(seq: Sequence[float], n: int) -> list[float]:
"""
Ensure returned sequence has n values, repeat as necessary
"""
return [val for _, val in zip(range(n), cycle(seq))]
def normalise(seq: Sequence[float]) -> list[float]:
"""
Normalise seq so that the mean is 1
"""
mean = sum(seq) / len(seq)
if mean == 0:
raise ValueError("Cannot rescale: mean is zero")
return [x / mean for x in seq]
|
plot_layout
|
python
|
numba__numba
|
numba/core/typing/collections.py
|
{
"start": 1687,
"end": 2381
}
|
class ____(AbstractTemplate):
def generic(self, args, kws):
seq, idx, value = args
if isinstance(seq, types.MutableSequence):
idx = normalize_1d_index(idx)
if isinstance(idx, types.SliceType):
return signature(types.none, seq, idx, seq)
elif isinstance(idx, types.Integer):
if not self.context.can_convert(value, seq.dtype):
msg = "invalid setitem with value of {} to element of {}"
raise errors.TypingError(msg.format(types.unliteral(value), seq.dtype))
return signature(types.none, seq, idx, seq.dtype)
@infer_global(operator.delitem)
|
SetItemSequence
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/internal/conjecture/junkdrawer.py
|
{
"start": 1534,
"end": 5671
}
|
class ____(Sequence[int]):
"""Class for storing a list of non-negative integers compactly.
We store them as the smallest size integer array we can get
away with. When we try to add an integer that is too large,
we upgrade the array to the smallest word size needed to store
the new value."""
ARRAY_CODES: ClassVar[list[str]] = ["B", "H", "I", "L", "Q", "O"]
NEXT_ARRAY_CODE: ClassVar[dict[str, str]] = dict(itertools.pairwise(ARRAY_CODES))
__slots__ = ("__underlying",)
def __init__(self, values: Sequence[int] = ()):
for code in self.ARRAY_CODES:
try:
underlying = self._array_or_list(code, values)
break
except OverflowError:
pass
else: # pragma: no cover
raise AssertionError(f"Could not create storage for {values!r}")
if isinstance(underlying, list):
for v in underlying:
if not isinstance(v, int) or v < 0:
raise ValueError(f"Could not create IntList for {values!r}")
self.__underlying: list[int] | ArrayType[int] = underlying
@classmethod
def of_length(cls, n: int) -> "IntList":
return cls(array.array("B", [0]) * n)
@staticmethod
def _array_or_list(
code: str, contents: Iterable[int]
) -> Union[list[int], "ArrayType[int]"]:
if code == "O":
return list(contents)
return array.array(code, contents)
def count(self, value: int) -> int:
return self.__underlying.count(value)
def __repr__(self) -> str:
return f"IntList({list(self.__underlying)!r})"
def __len__(self) -> int:
return len(self.__underlying)
@overload
def __getitem__(self, i: int) -> int: ... # pragma: no cover
@overload
def __getitem__(
self, i: slice
) -> "list[int] | ArrayType[int]": ... # pragma: no cover
def __getitem__(self, i: int | slice) -> "int | list[int] | ArrayType[int]":
return self.__underlying[i]
def __delitem__(self, i: int | slice) -> None:
del self.__underlying[i]
def insert(self, i: int, v: int) -> None:
self.__underlying.insert(i, v)
def __iter__(self) -> Iterator[int]:
return iter(self.__underlying)
def __eq__(self, other: object) -> bool:
if self is other:
return True
if not isinstance(other, IntList):
return NotImplemented
return self.__underlying == other.__underlying
def __ne__(self, other: object) -> bool:
if self is other:
return False
if not isinstance(other, IntList):
return NotImplemented
return self.__underlying != other.__underlying
def append(self, n: int) -> None:
# try the fast path of appending n first. If this overflows, use the
# __setitem__ path, which will upgrade the underlying array.
try:
self.__underlying.append(n)
except OverflowError:
i = len(self.__underlying)
self.__underlying.append(0)
self[i] = n
def __setitem__(self, i: int, n: int) -> None:
while True:
try:
self.__underlying[i] = n
return
except OverflowError:
assert n > 0
self.__upgrade()
def extend(self, ls: Iterable[int]) -> None:
for n in ls:
self.append(n)
def __upgrade(self) -> None:
assert isinstance(self.__underlying, array.array)
code = self.NEXT_ARRAY_CODE[self.__underlying.typecode]
self.__underlying = self._array_or_list(code, self.__underlying)
def binary_search(lo: int, hi: int, f: Callable[[int], bool]) -> int:
"""Binary searches in [lo , hi) to find
n such that f(n) == f(lo) but f(n + 1) != f(lo).
It is implicitly assumed and will not be checked
that f(hi) != f(lo).
"""
reference = f(lo)
while lo + 1 < hi:
mid = (lo + hi) // 2
if f(mid) == reference:
lo = mid
else:
hi = mid
return lo
|
IntList
|
python
|
numba__numba
|
numba/core/types/common.py
|
{
"start": 783,
"end": 3022
}
|
class ____(IterableType, ArrayCompatible):
"""
Type class for objects providing the buffer protocol.
Derived classes exist for more specific cases.
"""
mutable = True
slice_is_copy = False
aligned = True
# CS and FS are not reserved for inner contig but strided
LAYOUTS = frozenset(['C', 'F', 'CS', 'FS', 'A'])
def __init__(self, dtype, ndim, layout, readonly=False, name=None):
from .misc import unliteral
if isinstance(dtype, Buffer):
msg = ("The dtype of a Buffer type cannot itself be a Buffer type, "
"this is unsupported behaviour."
"\nThe dtype requested for the unsupported Buffer was: {}.")
raise NumbaTypeError(msg.format(dtype))
if layout not in self.LAYOUTS:
raise NumbaValueError("Invalid layout '%s'" % layout)
self.dtype = unliteral(dtype)
self.ndim = ndim
self.layout = layout
if readonly:
self.mutable = False
if name is None:
type_name = self.__class__.__name__.lower()
if readonly:
type_name = "readonly %s" % type_name
name = "%s(%s, %sd, %s)" % (type_name, dtype, ndim, layout)
super(Buffer, self).__init__(name)
@property
def iterator_type(self):
from .iterators import ArrayIterator
return ArrayIterator(self)
@property
def as_array(self):
return self
def copy(self, dtype=None, ndim=None, layout=None):
if dtype is None:
dtype = self.dtype
if ndim is None:
ndim = self.ndim
if layout is None:
layout = self.layout
return self.__class__(dtype=dtype, ndim=ndim, layout=layout,
readonly=not self.mutable)
@property
def key(self):
return self.dtype, self.ndim, self.layout, self.mutable
@property
def is_c_contig(self):
return self.layout == 'C' or (self.ndim <= 1 and self.layout in 'CF')
@property
def is_f_contig(self):
return self.layout == 'F' or (self.ndim <= 1 and self.layout in 'CF')
@property
def is_contig(self):
return self.layout in 'CF'
|
Buffer
|
python
|
automl__auto-sklearn
|
autosklearn/evaluation/__init__.py
|
{
"start": 5064,
"end": 22256
}
|
class ____(AbstractTAFunc):
def __init__(
self,
backend: Backend,
autosklearn_seed: int,
resampling_strategy: Union[
str, BaseCrossValidator, _RepeatedSplits, BaseShuffleSplit
],
metrics: Sequence[Scorer],
cost_for_crash: float,
abort_on_first_run_crash: bool,
port: int,
pynisher_context: str,
multi_objectives: List[str],
initial_num_run: int = 1,
stats: Optional[Stats] = None,
run_obj: str = "quality",
par_factor: int = 1,
scoring_functions: Optional[List[Scorer]] = None,
output_y_hat_optimization: bool = True,
include: Optional[List[str]] = None,
exclude: Optional[List[str]] = None,
memory_limit: Optional[int] = None,
disable_file_output: bool = False,
init_params: Optional[Dict[str, Any]] = None,
budget_type: Optional[str] = None,
ta: Optional[Callable] = None, # Required by SMAC's parent class
**resampling_strategy_args: Any,
):
if resampling_strategy == "holdout":
eval_function = autosklearn.evaluation.train_evaluator.eval_holdout
elif resampling_strategy == "holdout-iterative-fit":
eval_function = (
autosklearn.evaluation.train_evaluator.eval_iterative_holdout
)
elif resampling_strategy == "cv-iterative-fit":
eval_function = autosklearn.evaluation.train_evaluator.eval_iterative_cv
elif resampling_strategy == "cv" or isinstance(
resampling_strategy, (BaseCrossValidator, _RepeatedSplits, BaseShuffleSplit)
):
eval_function = autosklearn.evaluation.train_evaluator.eval_cv
elif resampling_strategy == "partial-cv":
eval_function = autosklearn.evaluation.train_evaluator.eval_partial_cv
elif resampling_strategy == "partial-cv-iterative-fit":
eval_function = (
autosklearn.evaluation.train_evaluator.eval_partial_cv_iterative
)
elif resampling_strategy == "test":
eval_function = autosklearn.evaluation.test_evaluator.eval_t
output_y_hat_optimization = False
else:
raise ValueError("Unknown resampling strategy %s" % resampling_strategy)
self.worst_possible_result = cost_for_crash
eval_function = functools.partial(
fit_predict_try_except_decorator,
ta=eval_function,
cost_for_crash=self.worst_possible_result,
)
super().__init__(
ta=eval_function,
stats=stats,
run_obj=run_obj,
par_factor=par_factor,
cost_for_crash=self.worst_possible_result,
abort_on_first_run_crash=abort_on_first_run_crash,
multi_objectives=multi_objectives,
)
self.backend = backend
self.autosklearn_seed = autosklearn_seed
self.resampling_strategy = resampling_strategy
self.initial_num_run = initial_num_run
self.metrics = metrics
self.resampling_strategy = resampling_strategy
self.resampling_strategy_args = resampling_strategy_args
self.scoring_functions = scoring_functions
# TODO deactivate output_y_hat_optimization and let respective evaluator decide
self.output_y_hat_optimization = output_y_hat_optimization
self.include = include
self.exclude = exclude
self.disable_file_output = disable_file_output
self.init_params = init_params
self.budget_type = budget_type
if memory_limit is not None:
memory_limit = int(math.ceil(memory_limit))
self.memory_limit = memory_limit
dm = self.backend.load_datamanager()
self._get_test_loss = "X_test" in dm.data and "Y_test" in dm.data
self.port = port
self.pynisher_context = pynisher_context
if self.port is None:
self.logger: Union[
logging.Logger, PickableLoggerAdapter
] = logging.getLogger("TAE")
else:
self.logger = get_named_client_logger(
name="TAE",
port=self.port,
)
def run_wrapper(
self,
run_info: RunInfo,
) -> Tuple[RunInfo, RunValue]:
"""
wrapper function for ExecuteTARun.run_wrapper() to cap the target algorithm
runtime if it would run over the total allowed runtime.
Parameters
----------
run_info : RunInfo
Object that contains enough information to execute a configuration run in
isolation.
Returns
-------
RunInfo:
an object containing the configuration launched
RunValue:
Contains information about the status/performance of config
"""
if self.budget_type is None:
if run_info.budget != 0:
raise ValueError(
"If budget_type is None, budget must be.0, but is %f"
% run_info.budget
)
else:
if run_info.budget == 0:
run_info = run_info._replace(budget=100)
elif run_info.budget <= 0 or run_info.budget > 100:
raise ValueError(
"Illegal value for budget, must be >0 and <=100, but is %f"
% run_info.budget
)
if self.budget_type not in ("subsample", "iterations", "mixed"):
raise ValueError(
"Illegal value for budget type, must be one of "
"('subsample', 'iterations', 'mixed'), but is : %s"
% self.budget_type
)
remaining_time = self.stats.get_remaing_time_budget()
if remaining_time - 5 < run_info.cutoff:
run_info = run_info._replace(cutoff=int(remaining_time - 5))
config_id = (
run_info.config
if isinstance(run_info.config, int)
else run_info.config.config_id
)
if run_info.cutoff < 1.0:
self.logger.info(
"Not starting configuration %d because time is up" % config_id
)
return run_info, RunValue(
status=StatusType.STOP,
cost=self.worst_possible_result,
time=0.0,
additional_info={},
starttime=time.time(),
endtime=time.time(),
)
elif run_info.cutoff != int(np.ceil(run_info.cutoff)) and not isinstance(
run_info.cutoff, int
):
run_info = run_info._replace(cutoff=int(np.ceil(run_info.cutoff)))
self.logger.info("Starting to evaluate configuration %d" % config_id)
return super().run_wrapper(run_info=run_info)
def run(
self,
config: Configuration,
instance: Optional[str] = None,
cutoff: Optional[float] = None,
seed: int = 12345,
budget: float = 0.0,
instance_specific: Optional[str] = None,
) -> Tuple[
StatusType,
float | list[float],
float,
Dict[str, Union[int, float, str, Dict, List, Tuple]],
]:
# Additional information of each of the tae executions
# Defined upfront for mypy
additional_run_info: TYPE_ADDITIONAL_INFO = {}
context = multiprocessing.get_context(self.pynisher_context)
preload_modules(context)
queue = context.Queue()
if not (instance_specific is None or instance_specific == "0"):
raise ValueError(instance_specific)
init_params = {"instance": instance}
if self.init_params is not None:
init_params.update(self.init_params)
if self.port is None:
logger: Union[logging.Logger, PickableLoggerAdapter] = logging.getLogger(
"pynisher"
)
else:
logger = get_named_client_logger(
name="pynisher",
port=self.port,
)
arguments = dict(
logger=logger,
wall_time_in_s=cutoff,
mem_in_mb=self.memory_limit,
capture_output=True,
context=context,
)
if isinstance(config, int):
num_run = self.initial_num_run
else:
num_run = config.config_id + self.initial_num_run
obj_kwargs = dict(
queue=queue,
config=config,
backend=self.backend,
port=self.port,
metrics=self.metrics,
seed=self.autosklearn_seed,
num_run=num_run,
scoring_functions=self.scoring_functions,
output_y_hat_optimization=self.output_y_hat_optimization,
include=self.include,
exclude=self.exclude,
disable_file_output=self.disable_file_output,
instance=instance,
init_params=init_params,
budget=budget,
budget_type=self.budget_type,
additional_components=autosklearn.pipeline.components.base._addons,
)
if self.resampling_strategy != "test":
obj_kwargs["resampling_strategy"] = self.resampling_strategy
obj_kwargs["resampling_strategy_args"] = self.resampling_strategy_args
try:
obj = pynisher.enforce_limits(**arguments)(self.ta)
obj(**obj_kwargs)
except Exception as e:
exception_traceback = traceback.format_exc()
error_message = repr(e)
additional_run_info.update(
{"traceback": exception_traceback, "error": error_message}
)
return (
StatusType.CRASHED,
self.worst_possible_result,
0.0,
additional_run_info,
)
if obj.exit_status in (
pynisher.TimeoutException,
pynisher.MemorylimitException,
):
# Even if the pynisher thinks that a timeout or memout occured,
# it can be that the target algorithm wrote something into the queue
# - then we treat it as a succesful run
try:
info = autosklearn.evaluation.util.read_queue(queue)
result = info[-1]["loss"]
status = info[-1]["status"]
additional_run_info = info[-1]["additional_run_info"]
if obj.stdout:
additional_run_info["subprocess_stdout"] = obj.stdout
if obj.stderr:
additional_run_info["subprocess_stderr"] = obj.stderr
if obj.exit_status is pynisher.TimeoutException:
additional_run_info["info"] = "Run stopped because of timeout."
elif obj.exit_status is pynisher.MemorylimitException:
additional_run_info["info"] = "Run stopped because of memout."
if status in [StatusType.SUCCESS, StatusType.DONOTADVANCE]:
cost = result
else:
cost = self.worst_possible_result
except Empty:
info = None
if obj.exit_status is pynisher.TimeoutException:
status = StatusType.TIMEOUT
additional_run_info = {"error": "Timeout"}
elif obj.exit_status is pynisher.MemorylimitException:
status = StatusType.MEMOUT
additional_run_info = {
"error": "Memout (used more than {} MB).".format(
self.memory_limit
)
}
else:
raise ValueError(obj.exit_status)
cost = self.worst_possible_result
elif obj.exit_status is TAEAbortException:
info = None
status = StatusType.ABORT
cost = self.worst_possible_result
additional_run_info = {
"error": "Your configuration of " "auto-sklearn does not work!",
"exit_status": _encode_exit_status(obj.exit_status),
"subprocess_stdout": obj.stdout,
"subprocess_stderr": obj.stderr,
}
else:
try:
info = autosklearn.evaluation.util.read_queue(queue)
result = info[-1]["loss"]
status = info[-1]["status"]
additional_run_info = info[-1]["additional_run_info"]
if obj.exit_status == 0:
cost = result
else:
status = StatusType.CRASHED
cost = self.worst_possible_result
additional_run_info["info"] = (
"Run treated as crashed "
"because the pynisher exit "
"status %s is unknown." % str(obj.exit_status)
)
additional_run_info["exit_status"] = _encode_exit_status(
obj.exit_status
)
additional_run_info["subprocess_stdout"] = obj.stdout
additional_run_info["subprocess_stderr"] = obj.stderr
except Empty:
info = None
additional_run_info = {
"error": "Result queue is empty",
"exit_status": _encode_exit_status(obj.exit_status),
"subprocess_stdout": obj.stdout,
"subprocess_stderr": obj.stderr,
"exitcode": obj.exitcode,
}
status = StatusType.CRASHED
cost = self.worst_possible_result
if (
self.budget_type is None or budget == 0
) and status == StatusType.DONOTADVANCE:
status = StatusType.SUCCESS
if not isinstance(additional_run_info, dict):
additional_run_info = {"message": additional_run_info}
if (
info is not None
and self.resampling_strategy
in ("holdout-iterative-fit", "cv-iterative-fit")
and status != StatusType.CRASHED
):
learning_curve = autosklearn.evaluation.util.extract_learning_curve(info)
learning_curve_runtime = autosklearn.evaluation.util.extract_learning_curve(
info, "duration"
)
if len(learning_curve) > 1:
additional_run_info["learning_curve"] = learning_curve
additional_run_info["learning_curve_runtime"] = learning_curve_runtime
train_learning_curve = autosklearn.evaluation.util.extract_learning_curve(
info, "train_loss"
)
if len(train_learning_curve) > 1:
additional_run_info["train_learning_curve"] = train_learning_curve
additional_run_info["learning_curve_runtime"] = learning_curve_runtime
if self._get_test_loss:
test_learning_curve = (
autosklearn.evaluation.util.extract_learning_curve(
info,
"test_loss",
)
)
if len(test_learning_curve) > 1:
additional_run_info["test_learning_curve"] = test_learning_curve
additional_run_info[
"learning_curve_runtime"
] = learning_curve_runtime
if isinstance(config, int):
origin = "DUMMY"
config_id = config
else:
origin = getattr(config, "origin", "UNKNOWN")
config_id = config.config_id
additional_run_info["configuration_origin"] = origin
runtime = float(obj.wall_clock_time)
autosklearn.evaluation.util.empty_queue(queue)
self.logger.info("Finished evaluating configuration %d" % config_id)
# Do some sanity checking (for multi objective)
if len(self.multi_objectives) > 1:
error = (
f"Returned costs {cost} does not match the number of objectives"
f" {len(self.multi_objectives)}."
)
# If dict convert to array
# Make sure the ordering is correct
if isinstance(cost, dict):
ordered_cost = []
for name in self.multi_objectives:
if name not in cost:
raise RuntimeError(
f"Objective {name} was not found "
f"in the returned costs ({cost})"
)
ordered_cost.append(cost[name])
cost = ordered_cost
if isinstance(cost, list):
if len(cost) != len(self.multi_objectives):
raise RuntimeError(error)
if isinstance(cost, float):
raise RuntimeError(error)
return status, cost, runtime, additional_run_info
|
ExecuteTaFuncWithQueue
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/math_ops_test.py
|
{
"start": 46855,
"end": 48720
}
|
class ____(test_util.TensorFlowTestCase):
def testErrorReceivedIfDtypeMismatchFromOp(self):
if context.executing_eagerly():
error = errors_impl.InvalidArgumentError
error_message = (
r"cannot compute Add(V2)? as input #1\(zero-based\) was expected to "
r"be a int32 tensor but is a float tensor \[Op:Add(V2)?\]")
else:
error = TypeError
error_message = (
"Input 'y' of 'Add(V2)?' Op has type float32 that does not "
"match type int32 of argument 'x'.")
with self.assertRaisesRegex(error, error_message):
a = array_ops.ones([1], dtype=dtypes.int32) + 1.0
self.evaluate(a)
def testRHSDispatchingAndErrorRaising(self):
if context.executing_eagerly():
error = ValueError
error_message = (
r"Attempt to convert a value .* with an unsupported type")
else:
error = TypeError
error_message = (r"Failed to convert elements of .* to Tensor")
class RHSReturnsTrue:
def __radd__(self, other):
return True
a = array_ops.ones([1], dtype=dtypes.int32) + RHSReturnsTrue()
self.assertEqual(a, True)
class RHSRaisesError:
def __radd__(self, other):
raise TypeError("RHS not implemented")
with self.assertRaisesRegex(error, error_message):
a = array_ops.ones([1], dtype=dtypes.int32) + RHSRaisesError()
self.evaluate(a)
class RHSReturnsNotImplemented:
def __radd__(self, other):
return NotImplemented
with self.assertRaisesRegex(error, error_message):
a = array_ops.ones([1], dtype=dtypes.int32) + RHSReturnsNotImplemented()
self.evaluate(a)
class RHSNotImplemented:
pass
with self.assertRaisesRegex(error, error_message):
a = array_ops.ones([1], dtype=dtypes.int32) + RHSNotImplemented()
self.evaluate(a)
|
BinaryOpsTest
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_int.py
|
{
"start": 34932,
"end": 35024
}
|
class ____(IntStrDigitLimitsTests):
int_class = IntSubclass
|
IntSubclassStrDigitLimitsTests
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster_tests/declarative_automation_tests/scenario_utils/automation_condition_scenario.py
|
{
"start": 1558,
"end": 5030
}
|
class ____(ScenarioState):
automation_condition: Optional[dg.AutomationCondition] = None
condition_cursor: Optional[AutomationConditionCursor] = None
requested_asset_partitions: Optional[Sequence[AssetKeyPartitionKey]] = None
ensure_empty_result: bool = True
request_backfills: bool = False
def _get_request_subsets_by_key(
self, asset_graph_view: AssetGraphView
) -> Mapping[dg.AssetKey, EntitySubset]:
if self.requested_asset_partitions is None:
return {}
ap_by_key = defaultdict(set)
for ap in self.requested_asset_partitions:
ap_by_key[ap.asset_key].add(ap)
return {
asset_key: asset_graph_view.get_asset_subset_from_asset_partitions(asset_key, aps)
for asset_key, aps in ap_by_key.items()
}
async def evaluate(
self, asset: CoercibleToAssetKey
) -> tuple["AutomationConditionScenarioState", dg.AutomationResult]:
asset_key = AssetKey.from_coercible(asset)
# ensure that the top level condition never returns any asset partitions, as otherwise the
# next evaluation will assume that those asset partitions were requested by the machinery
asset_condition = (
AndAutomationCondition(
operands=[check.not_none(self.automation_condition), FalseAutomationCondition()]
)
if self.ensure_empty_result
else check.not_none(self.automation_condition)
)
asset_graph = self.scenario_spec.with_asset_properties(
keys=[asset],
auto_materialize_policy=AutoMaterializePolicy.from_automation_condition(
asset_condition
),
).asset_graph
with freeze_time(self.current_time):
cursor = AssetDaemonCursor.empty().with_updates(
0, 0, [], [self.condition_cursor] if self.condition_cursor else [], asset_graph
)
evaluator = AutomationConditionEvaluator(
asset_graph=asset_graph,
instance=self.instance,
entity_keys=asset_graph.get_all_asset_keys(),
cursor=cursor,
logger=self.logger,
emit_backfills=False,
evaluation_id=cursor.evaluation_id,
)
evaluator.request_subsets_by_key = self._get_request_subsets_by_key(
evaluator.asset_graph_view
) # type: ignore
context = AutomationContext.create(key=asset_key, evaluator=evaluator)
full_result = await asset_condition.evaluate(context) # type: ignore
new_state = dataclasses.replace(self, condition_cursor=full_result.get_new_cursor())
result = full_result.child_results[0] if self.ensure_empty_result else full_result
return new_state, result
def without_cursor(self) -> "AutomationConditionScenarioState":
"""Removes the previous evaluation state from the state. This is useful for testing
re-evaluating this data "from scratch" after much computation has occurred.
"""
return dataclasses.replace(self, condition_cursor=None)
def with_requested_asset_partitions(
self, requested_asset_partitions: Sequence[AssetKeyPartitionKey]
) -> "AutomationConditionScenarioState":
return dataclasses.replace(self, requested_asset_partitions=requested_asset_partitions)
|
AutomationConditionScenarioState
|
python
|
django__django
|
django/template/defaulttags.py
|
{
"start": 18090,
"end": 29381
}
|
class ____(Node):
def __init__(self, var, name, nodelist, extra_context=None):
self.nodelist = nodelist
# var and name are legacy attributes, being left in case they are used
# by third-party subclasses of this Node.
self.extra_context = extra_context or {}
if name:
self.extra_context[name] = var
def __repr__(self):
return "<%s>" % self.__class__.__name__
def render(self, context):
values = {key: val.resolve(context) for key, val in self.extra_context.items()}
with context.push(**values):
return self.nodelist.render(context)
@register.tag
def autoescape(parser, token):
"""
Force autoescape behavior for this block.
"""
# token.split_contents() isn't useful here because this tag doesn't accept
# variable as arguments.
args = token.contents.split()
if len(args) != 2:
raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.")
arg = args[1]
if arg not in ("on", "off"):
raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'")
nodelist = parser.parse(("endautoescape",))
parser.delete_first_token()
return AutoEscapeControlNode((arg == "on"), nodelist)
@register.tag
def comment(parser, token):
"""
Ignore everything between ``{% comment %}`` and ``{% endcomment %}``.
"""
parser.skip_past("endcomment")
return CommentNode()
@register.tag
def cycle(parser, token):
"""
Cycle among the given strings each time this tag is encountered.
Within a loop, cycles among the given strings each time through
the loop::
{% for o in some_list %}
<tr class="{% cycle 'row1' 'row2' %}">
...
</tr>
{% endfor %}
Outside of a loop, give the values a unique name the first time you call
it, then use that name each successive time through::
<tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
You can use any number of values, separated by spaces. Commas can also
be used to separate values; if a comma is used, the cycle values are
interpreted as literal strings.
The optional flag "silent" can be used to prevent the cycle declaration
from returning any value::
{% for o in some_list %}
{% cycle 'row1' 'row2' as rowcolors silent %}
<tr class="{{ rowcolors }}">{% include "subtemplate.html " %}</tr>
{% endfor %}
"""
# Note: This returns the exact same node on each {% cycle name %} call;
# that is, the node object returned from {% cycle a b c as name %} and the
# one returned from {% cycle name %} are the exact same object. This
# shouldn't cause problems (heh), but if it does, now you know.
#
# Ugly hack warning: This stuffs the named template dict into parser so
# that names are only unique within each template (as opposed to using
# a global variable, which would make cycle names have to be unique across
# *all* templates.
#
# It keeps the last node in the parser to be able to reset it with
# {% resetcycle %}.
args = token.split_contents()
if len(args) < 2:
raise TemplateSyntaxError("'cycle' tag requires at least two arguments")
if len(args) == 2:
# {% cycle foo %} case.
name = args[1]
if not hasattr(parser, "_named_cycle_nodes"):
raise TemplateSyntaxError(
"No named cycles in template. '%s' is not defined" % name
)
if name not in parser._named_cycle_nodes:
raise TemplateSyntaxError("Named cycle '%s' does not exist" % name)
return parser._named_cycle_nodes[name]
as_form = False
if len(args) > 4:
# {% cycle ... as foo [silent] %} case.
if args[-3] == "as":
if args[-1] != "silent":
raise TemplateSyntaxError(
"Only 'silent' flag is allowed after cycle's name, not '%s'."
% args[-1]
)
as_form = True
silent = True
args = args[:-1]
elif args[-2] == "as":
as_form = True
silent = False
if as_form:
name = args[-1]
values = [parser.compile_filter(arg) for arg in args[1:-2]]
node = CycleNode(values, name, silent=silent)
if not hasattr(parser, "_named_cycle_nodes"):
parser._named_cycle_nodes = {}
parser._named_cycle_nodes[name] = node
else:
values = [parser.compile_filter(arg) for arg in args[1:]]
node = CycleNode(values)
parser._last_cycle_node = node
return node
@register.tag
def csrf_token(parser, token):
return CsrfTokenNode()
@register.tag
def debug(parser, token):
"""
Output a whole load of debugging information, including the current
context and imported modules.
Sample usage::
<pre>
{% debug %}
</pre>
"""
return DebugNode()
@register.tag("filter")
def do_filter(parser, token):
"""
Filter the contents of the block through variable filters.
Filters can also be piped through each other, and they can have
arguments -- just like in variable syntax.
Sample usage::
{% filter force_escape|lower %}
This text will be HTML-escaped, and will appear in lowercase.
{% endfilter %}
Note that the ``escape`` and ``safe`` filters are not acceptable arguments.
Instead, use the ``autoescape`` tag to manage autoescaping for blocks of
template code.
"""
# token.split_contents() isn't useful here because this tag doesn't accept
# variable as arguments.
_, rest = token.contents.split(None, 1)
filter_expr = parser.compile_filter("var|%s" % (rest))
for func, unused in filter_expr.filters:
filter_name = getattr(func, "_filter_name", None)
if filter_name in ("escape", "safe"):
raise TemplateSyntaxError(
'"filter %s" is not permitted. Use the "autoescape" tag instead.'
% filter_name
)
nodelist = parser.parse(("endfilter",))
parser.delete_first_token()
return FilterNode(filter_expr, nodelist)
@register.tag
def firstof(parser, token):
"""
Output the first variable passed that is not False.
Output nothing if all the passed variables are False.
Sample usage::
{% firstof var1 var2 var3 as myvar %}
This is equivalent to::
{% if var1 %}
{{ var1 }}
{% elif var2 %}
{{ var2 }}
{% elif var3 %}
{{ var3 }}
{% endif %}
but much cleaner!
You can also use a literal string as a fallback value in case all
passed variables are False::
{% firstof var1 var2 var3 "fallback value" %}
If you want to disable auto-escaping of variables you can use::
{% autoescape off %}
{% firstof var1 var2 var3 "<strong>fallback value</strong>" %}
{% autoescape %}
Or if only some variables should be escaped, you can use::
{% firstof var1 var2|safe var3 "<strong>fallback</strong>"|safe %}
"""
bits = token.split_contents()[1:]
asvar = None
if not bits:
raise TemplateSyntaxError("'firstof' statement requires at least one argument")
if len(bits) >= 2 and bits[-2] == "as":
asvar = bits[-1]
bits = bits[:-2]
return FirstOfNode([parser.compile_filter(bit) for bit in bits], asvar)
@register.tag("for")
def do_for(parser, token):
"""
Loop over each item in an array.
For example, to display a list of athletes given ``athlete_list``::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
</ul>
You can loop over a list in reverse by using
``{% for obj in list reversed %}``.
You can also unpack multiple values from a two-dimensional array::
{% for key,value in dict.items %}
{{ key }}: {{ value }}
{% endfor %}
The ``for`` tag can take an optional ``{% empty %}`` clause that will
be displayed if the given array is empty or could not be found::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% empty %}
<li>Sorry, no athletes in this list.</li>
{% endfor %}
<ul>
The above is equivalent to -- but shorter, cleaner, and possibly faster
than -- the following::
<ul>
{% if athlete_list %}
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
{% else %}
<li>Sorry, no athletes in this list.</li>
{% endif %}
</ul>
The for loop sets a number of variables available within the loop:
======================= ==============================================
Variable Description
======================= ==============================================
``forloop.counter`` The current iteration of the loop (1-indexed)
``forloop.counter0`` The current iteration of the loop (0-indexed)
``forloop.revcounter`` The number of iterations from the end of the
loop (1-indexed)
``forloop.revcounter0`` The number of iterations from the end of the
loop (0-indexed)
``forloop.first`` True if this is the first time through the
loop
``forloop.last`` True if this is the last time through the loop
``forloop.parentloop`` For nested loops, this is the loop "above" the
current one
======================= ==============================================
"""
bits = token.split_contents()
if len(bits) < 4:
raise TemplateSyntaxError(
"'for' statements should have at least four words: %s" % token.contents
)
is_reversed = bits[-1] == "reversed"
in_index = -3 if is_reversed else -2
if bits[in_index] != "in":
raise TemplateSyntaxError(
"'for' statements should use the format"
" 'for x in y': %s" % token.contents
)
invalid_chars = frozenset((" ", '"', "'", FILTER_SEPARATOR))
loopvars = re.split(r" *, *", " ".join(bits[1:in_index]))
for var in loopvars:
if not var or not invalid_chars.isdisjoint(var):
raise TemplateSyntaxError(
"'for' tag received an invalid argument: %s" % token.contents
)
sequence = parser.compile_filter(bits[in_index + 1])
nodelist_loop = parser.parse(
(
"empty",
"endfor",
)
)
token = parser.next_token()
if token.contents == "empty":
nodelist_empty = parser.parse(("endfor",))
parser.delete_first_token()
else:
nodelist_empty = None
return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)
|
WithNode
|
python
|
PyCQA__pylint
|
pylint/testutils/functional/test_file.py
|
{
"start": 1315,
"end": 4346
}
|
class ____:
"""A single functional test case file with options."""
_CONVERTERS: dict[str, Callable[[str], tuple[int, ...] | list[str]]] = {
"min_pyver": parse_python_version,
"max_pyver": parse_python_version,
"min_pyver_end_position": parse_python_version,
"requires": lambda s: [i.strip() for i in s.split(",")],
"except_implementations": lambda s: [i.strip() for i in s.split(",")],
"exclude_platforms": lambda s: [i.strip() for i in s.split(",")],
}
def __init__(self, directory: str, filename: str) -> None:
self._directory = directory
self.base = filename.replace(".py", "")
# TODO:4.0: Deprecate FunctionalTestFile.options and related code
# We should just parse these options like a normal configuration file.
self.options: TestFileOptions = {
"min_pyver": (2, 5),
"max_pyver": (4, 0),
"min_pyver_end_position": (3, 8),
"requires": [],
"except_implementations": [],
"exclude_platforms": [],
"exclude_from_minimal_messages_config": False,
}
self._parse_options()
def __repr__(self) -> str:
return f"FunctionalTest:{self.base}"
def _parse_options(self) -> None:
cp = configparser.ConfigParser()
cp.add_section("testoptions")
try:
cp.read(self.option_file)
except NoFileError:
pass
for name, value in cp.items("testoptions"):
conv = self._CONVERTERS.get(name, lambda v: v)
assert (
name in POSSIBLE_TEST_OPTIONS
), f"[testoptions]' can only contains one of {POSSIBLE_TEST_OPTIONS} and had '{name}'"
self.options[name] = conv(value) # type: ignore[literal-required]
@property
def option_file(self) -> str:
return self._file_type(".rc")
@property
def module(self) -> str:
package = basename(self._directory)
return ".".join([package, self.base])
@property
def expected_output(self) -> str:
files = [
p.stem
for p in Path(self._directory).glob(f"{split(self.base)[-1]}.[0-9]*.txt")
]
output_options = [
(int(version[0]), int(version[1:]))
for s in files
if (version := s.rpartition(".")[2]).isalnum()
]
for opt in sorted(output_options, reverse=True):
if _CURRENT_VERSION >= opt:
str_opt = "".join([str(s) for s in opt])
return join(self._directory, f"{self.base}.{str_opt}.txt")
return join(self._directory, self.base + ".txt")
@property
def source(self) -> str:
return self._file_type(".py")
def _file_type(self, ext: str, check_exists: bool = True) -> str:
name = join(self._directory, self.base + ext)
if not check_exists or exists(name):
return name
raise NoFileError(f"Cannot find '{name}'.")
|
FunctionalTestFile
|
python
|
pytorch__pytorch
|
test/onnx/model_defs/mnist.py
|
{
"start": 56,
"end": 680
}
|
class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
|
MNIST
|
python
|
huggingface__transformers
|
src/transformers/models/mobilevit/modeling_mobilevit.py
|
{
"start": 28459,
"end": 30310
}
|
class ____(nn.Module):
"""
ASPP module defined in DeepLab papers: https://huggingface.co/papers/1606.00915, https://huggingface.co/papers/1706.05587
"""
def __init__(self, config: MobileViTConfig) -> None:
super().__init__()
in_channels = config.neck_hidden_sizes[-2]
out_channels = config.aspp_out_channels
if len(config.atrous_rates) != 3:
raise ValueError("Expected 3 values for atrous_rates")
self.convs = nn.ModuleList()
in_projection = MobileViTConvLayer(
config,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
use_activation="relu",
)
self.convs.append(in_projection)
self.convs.extend(
[
MobileViTConvLayer(
config,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
dilation=rate,
use_activation="relu",
)
for rate in config.atrous_rates
]
)
pool_layer = MobileViTASPPPooling(config, in_channels, out_channels)
self.convs.append(pool_layer)
self.project = MobileViTConvLayer(
config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation="relu"
)
self.dropout = nn.Dropout(p=config.aspp_dropout_prob)
def forward(self, features: torch.Tensor) -> torch.Tensor:
pyramid = []
for conv in self.convs:
pyramid.append(conv(features))
pyramid = torch.cat(pyramid, dim=1)
pooled_features = self.project(pyramid)
pooled_features = self.dropout(pooled_features)
return pooled_features
|
MobileViTASPP
|
python
|
walkccc__LeetCode
|
solutions/889. Construct Binary Tree from Preorder and Postorder Traversal/889.py
|
{
"start": 0,
"end": 897
}
|
class ____:
def constructFromPrePost(
self,
pre: list[int],
post: list[int],
) -> TreeNode | None:
postToIndex = {num: i for i, num in enumerate(post)}
def build(preStart: int, preEnd: int, postStart: int, postEnd: int) -> TreeNode | None:
if preStart > preEnd:
return None
if preStart == preEnd:
return TreeNode(pre[preStart])
rootVal = pre[preStart]
leftRootVal = pre[preStart + 1]
leftRootPostIndex = postToIndex[leftRootVal]
leftSize = leftRootPostIndex - postStart + 1
root = TreeNode(rootVal)
root.left = build(preStart + 1, preStart + leftSize,
postStart, leftRootPostIndex)
root.right = build(preStart + leftSize + 1, preEnd,
leftRootPostIndex + 1, postEnd - 1)
return root
return build(0, len(pre) - 1, 0, len(post) - 1)
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/seer/anomaly_detection/types.py
|
{
"start": 701,
"end": 835
}
|
class ____(TypedDict):
time_period: int
sensitivity: str
direction: str
expected_seasonality: str
|
AnomalyDetectionConfig
|
python
|
coleifer__peewee
|
peewee.py
|
{
"start": 65986,
"end": 66888
}
|
class ____(Query):
union_all = __add__ = __compound_select__('UNION ALL')
union = __or__ = __compound_select__('UNION')
intersect = __and__ = __compound_select__('INTERSECT')
except_ = __sub__ = __compound_select__('EXCEPT')
__radd__ = __compound_select__('UNION ALL', inverted=True)
__ror__ = __compound_select__('UNION', inverted=True)
__rand__ = __compound_select__('INTERSECT', inverted=True)
__rsub__ = __compound_select__('EXCEPT', inverted=True)
def select_from(self, *columns):
if not columns:
raise ValueError('select_from() must specify one or more columns.')
query = (Select((self,), columns)
.bind(self._database))
if getattr(self, 'model', None) is not None:
# Bind to the sub-select's model type, if defined.
query = query.objects(self.model)
return query
|
SelectQuery
|
python
|
getsentry__sentry
|
src/sentry/new_migrations/monkey/executor.py
|
{
"start": 674,
"end": 2089
}
|
class ____(Exception):
"""
Raised when migration operation is missing information needed for selecting
correct database connection.
"""
def _check_bitfield_flags(name: str, old: list[str], new: list[str]) -> None:
deleted = set(old) - set(new)
if deleted:
raise ValueError(
f"migration `{name}` alters a BitField in an unsafe way!\n\n"
f'the following flags were removed: {", ".join(sorted(deleted))}\n\n'
f"unused flags must remain to preserve padding for future flags"
)
should_match_old = new[: len(old)]
inserted = set(should_match_old) - set(old)
if inserted:
raise ValueError(
f"migration `{name}` alters a BitField in an unsafe way!\n\n"
f'the following flags were inserted between old flags: {", ".join(sorted(inserted))}\n\n'
f"new flags must be added at the end or flags will change meaning"
)
if old != should_match_old:
diff = "\n".join(
difflib.unified_diff(old, should_match_old, fromfile="old", tofile="new", lineterm="")
)
raise ValueError(
f"migration `{name}` alters a BitField in an unsafe way!\n\n"
f"the following old flags were reordered:\n\n"
f"{diff}\n\n"
f"flags must retain historical order or flags will change meaning"
)
|
MissingDatabaseRoutingInfo
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_base.py
|
{
"start": 700,
"end": 36761
}
|
class ____(BaseModel):
"""Base Pydantic model for an Azure PostgreSQL-backed vector store.
This class encapsulates configuration (connection pool, table/column
names, embedding type/dimension, index configuration and metadata
column) and performs runtime verification that the target table
exists with expected columns and index configuration. If the table
does not exist, ``verify_and_init_store`` will create it.
"""
connection_pool: AzurePGConnectionPool
schema_name: str = "public"
table_name: str = "vector_store"
id_column: str = "id"
content_column: str = "content"
embedding_column: str = "embedding"
embedding_type: VectorType | None = None
embedding_dimension: PositiveInt | None = None
embedding_index: Algorithm | None = None
metadata_column: str | None = "metadata"
model_config = ConfigDict(
arbitrary_types_allowed=True, # Allow arbitrary types like Embeddings and AzurePGConnectionPool
)
@model_validator(mode="after")
def verify_and_init_store(self) -> Self:
"""Validate the store configuration and initialize DB schema and index.
This validator runs after Pydantic model initialization. It queries
the database to detect an existing table and its columns/indexes,
performs type and dimension checks for the embedding column, and
sets inferred properties (like embedding_type and embedding_dimension)
when they are not explicitly provided. If the table does not exist,
it will create the table with sensible defaults.
Returns:
Self: The same model instance, possibly updated with inferred values.
"""
# verify that metadata_column is not empty if provided
if self.metadata_column is not None and len(self.metadata_column) == 0:
raise ValueError("'metadata_column' cannot be empty if provided.")
_logger.debug(
"checking if table '%s.%s' exists with the required columns",
self.schema_name,
self.table_name,
)
with (
self.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
cursor.execute(
sql.SQL(
"""
select a.attname as column_name,
format_type(a.atttypid, a.atttypmod) as column_type
from pg_attribute a
join pg_class c on a.attrelid = c.oid
join pg_namespace n on c.relnamespace = n.oid
where a.attnum > 0
and not a.attisdropped
and n.nspname = %(schema_name)s
and c.relname = %(table_name)s
order by a.attnum asc
"""
),
{"schema_name": self.schema_name, "table_name": self.table_name},
)
resultset = cursor.fetchall()
existing_columns: dict[str, str] = {
row["column_name"]: row["column_type"] for row in resultset
}
# if table exists, verify that required columns exist and have correct types
if len(existing_columns) > 0:
_logger.debug(
"table '%s.%s' exists with the following column mapping: %s",
self.schema_name,
self.table_name,
existing_columns,
)
id_column_type = existing_columns.get(self.id_column)
if id_column_type != "uuid":
raise ValueError(
f"Table '{self.schema_name}.{self.table_name}' must have a column '{self.id_column}' of type 'uuid'."
)
content_column_type = existing_columns.get(self.content_column)
if content_column_type is None or (
content_column_type != "text"
and not content_column_type.startswith("varchar")
):
raise ValueError(
f"Table '{self.schema_name}.{self.table_name}' must have a column '{self.content_column}' of type 'text' or 'varchar'."
)
embedding_column_type = existing_columns.get(self.embedding_column)
pattern = re.compile(r"(?P<type>\w+)(?:\((?P<dim>\d+)\))?")
m = pattern.match(embedding_column_type if embedding_column_type else "")
parsed_type: str | None = m.group("type") if m else None
parsed_dim: PositiveInt | None = (
PositiveInt(m.group("dim")) if m and m.group("dim") else None
)
vector_types = [t.value for t in VectorType.__members__.values()]
if parsed_type not in vector_types:
raise ValueError(
f"Column '{self.embedding_column}' in table '{self.schema_name}.{self.table_name}' must be one of the following types: {vector_types}."
)
elif (
self.embedding_type is not None
and parsed_type != self.embedding_type.value
):
raise ValueError(
f"Column '{self.embedding_column}' in table '{self.schema_name}.{self.table_name}' has type '{parsed_type}', but the specified embedding_type is '{self.embedding_type.value}'. They must match."
)
elif self.embedding_type is None:
_logger.info(
"embedding_type is not specified, but the column '%s' in table '%s.%s' has type '%s'. Overriding embedding_type accordingly.",
self.embedding_column,
self.schema_name,
self.table_name,
parsed_type,
)
self.embedding_type = VectorType(parsed_type)
if parsed_dim is not None and self.embedding_dimension is None:
_logger.info(
"embedding_dimension is not specified, but the column '%s' in table '%s.%s' has a dimension of %d. Overriding embedding_dimension accordingly.",
self.embedding_column,
self.schema_name,
self.table_name,
parsed_dim,
)
self.embedding_dimension = parsed_dim
elif (
parsed_dim is not None
and self.embedding_dimension is not None
and parsed_dim != self.embedding_dimension
):
raise ValueError(
f"Column '{self.embedding_column}' in table '{self.schema_name}.{self.table_name}' has a dimension of {parsed_dim}, but the specified embedding_dimension is {self.embedding_dimension}. They must match."
)
if self.metadata_column is not None:
existing_type = existing_columns.get(self.metadata_column)
if existing_type is None:
raise ValueError(
f"Column '{self.metadata_column}' does not exist in table '{self.schema_name}.{self.table_name}'."
)
with (
self.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
_logger.debug(
"checking if table '%s.%s' has a vector index on column '%s'",
self.schema_name,
self.table_name,
self.embedding_column,
)
cursor.execute(
sql.SQL(
"""
with cte as (
select n.nspname as schema_name,
ct.relname as table_name,
ci.relname as index_name,
a.amname as index_type,
pg_get_indexdef(
ci.oid, -- index OID
generate_series(1, array_length(ii.indkey, 1)), -- column no
true -- pretty print
) as index_column,
o.opcname as index_opclass,
ci.reloptions as index_opts
from pg_class ci
join pg_index ii on ii.indexrelid = ci.oid
join pg_am a on a.oid = ci.relam
join pg_class ct on ct.oid = ii.indrelid
join pg_namespace n on n.oid = ci.relnamespace
join pg_opclass o on o.oid = any(ii.indclass)
where ci.relkind = 'i'
and ct.relkind = 'r'
and ii.indisvalid
and ii.indisready
) select schema_name, table_name, index_name, index_type,
index_column, index_opclass, index_opts
from cte
where schema_name = %(schema_name)s
and table_name = %(table_name)s
and index_column like %(embedding_column)s
and (
index_opclass like '%%vector%%'
or index_opclass like '%%halfvec%%'
or index_opclass like '%%sparsevec%%'
or index_opclass like '%%bit%%'
)
order by schema_name, table_name, index_name
"""
),
{
"schema_name": self.schema_name,
"table_name": self.table_name,
"embedding_column": f"%{self.embedding_column}%",
},
)
resultset = cursor.fetchall()
if len(resultset) > 0:
_logger.debug(
"table '%s.%s' has %d vector index(es): %s",
self.schema_name,
self.table_name,
len(resultset),
resultset,
)
if self.embedding_index is None:
_logger.info(
"embedding_index is not specified, using the first found index: %s",
resultset[0],
)
index_type = resultset[0]["index_type"]
index_opclass = VectorOpClass(resultset[0]["index_opclass"])
index_opts = {
opts.split("=")[0]: opts.split("=")[1]
for opts in resultset[0]["index_opts"]
}
index = (
DiskANN(op_class=index_opclass, **index_opts)
if index_type == "diskann"
else (
HNSW(op_class=index_opclass, **index_opts)
if index_type == "hnsw"
else IVFFlat(op_class=index_opclass, **index_opts)
)
)
self.embedding_index = index
else:
_logger.info(
"embedding_index is specified as '%s'; will try to find a matching index.",
self.embedding_index,
)
print(resultset)
index_opclass = self.embedding_index.op_class.value # type: ignore[assignment]
if isinstance(self.embedding_index, DiskANN):
index_type = "diskann"
elif isinstance(self.embedding_index, HNSW):
index_type = "hnsw"
else:
index_type = "ivfflat"
for row in resultset:
if (
row["index_type"] == index_type
and row["index_opclass"] == index_opclass
):
_logger.info(
"found a matching index: %s. overriding embedding_index.",
row,
)
index_opts = {
opts.split("=")[0]: opts.split("=")[1]
for opts in row["index_opts"]
}
index = (
DiskANN(op_class=index_opclass, **index_opts)
if index_type == "diskann"
else (
HNSW(op_class=index_opclass, **index_opts)
if index_type == "hnsw"
else IVFFlat(op_class=index_opclass, **index_opts)
)
)
self.embedding_index = index
break
elif self.embedding_index is None:
_logger.info(
"embedding_index is not specified, and no vector index found in table '%s.%s'. defaulting to 'DiskANN' with 'vector_cosine_ops' opclass.",
self.schema_name,
self.table_name,
)
self.embedding_index = DiskANN(op_class=VectorOpClass.vector_cosine_ops)
# if table does not exist, create it
else:
_logger.debug(
"table '%s.%s' does not exist, creating it with the required columns",
self.schema_name,
self.table_name,
)
if self.embedding_type is None:
_logger.warning(
"Embedding type is not specified, defaulting to 'vector'."
)
self.embedding_type = VectorType.vector
if self.embedding_dimension is None:
_logger.warning(
"Embedding dimension is not specified, defaulting to 1536."
)
self.embedding_dimension = PositiveInt(1_536)
if self.embedding_index is None:
_logger.warning(
"Embedding index is not specified, defaulting to 'DiskANN' with 'vector_cosine_ops' opclass."
)
self.embedding_index = DiskANN(op_class=VectorOpClass.vector_cosine_ops)
with self.connection_pool.connection() as conn, conn.cursor() as cursor:
cursor.execute(
sql.SQL(
"""
create table {table_name} (
{id_column} uuid primary key,
{content_column} text,
{embedding_column} {embedding_type}({embedding_dimension}),
{metadata_column} jsonb
)
"""
).format(
table_name=sql.Identifier(self.schema_name, self.table_name),
id_column=sql.Identifier(self.id_column),
content_column=sql.Identifier(self.content_column),
embedding_column=sql.Identifier(self.embedding_column),
embedding_type=sql.Identifier(self.embedding_type.value),
embedding_dimension=sql.Literal(self.embedding_dimension),
metadata_column=sql.Identifier(self.metadata_column),
)
)
return self
def _delete_rows_from_table(
self, ids: list[str] | None = None, **kwargs: Any
) -> bool | None:
"""Delete rows from the table by their IDs or truncate the table.
Args:
ids (list[str] | None): List of IDs to delete. If None, truncates the table.
**kwargs: Additional options, such as 'restart' and 'cascade' for truncation.
Returns:
bool | None: True if successful, False if an exception occurred, None otherwise.
"""
with self.connection_pool.connection() as conn:
conn.autocommit = False
try:
with conn.transaction() as _tx, conn.cursor() as cursor:
if ids is None:
restart = bool(kwargs.pop("restart", None))
cascade = bool(kwargs.pop("cascade", None))
cursor.execute(
sql.SQL(
"""
truncate table {table_name} {restart} {cascade}
"""
).format(
table_name=sql.Identifier(
self.schema_name, self.table_name
),
restart=sql.SQL(
"restart identity"
if restart
else "continue identity"
),
cascade=sql.SQL("cascade" if cascade else "restrict"),
)
)
else:
cursor.execute(
sql.SQL(
"""
delete from {table_name}
where {id_column} = any(%(id)s)
"""
).format(
table_name=sql.Identifier(
self.schema_name, self.table_name
),
id_column=sql.Identifier(self.id_column),
),
{"id": ids},
)
except Exception:
return False
else:
return True
def _similarity_search_by_vector_with_distance(
self, embedding: list[float], k: int = 4, **kwargs: Any
) -> list[tuple[dict, float, np.ndarray | None]]:
"""Perform a similarity search using a vector embedding and return results with distances.
Args:
embedding (list[float]): The query embedding vector.
k (int): Number of top results to return.
**kwargs: Additional options such as 'return_embeddings', 'top_m', and 'filter_expression'.
Returns:
list[tuple[dict, float, np.ndarray | None]]: List of tuples containing document dict, distance, and optionally the embedding.
"""
assert self.embedding_index is not None, (
"embedding_index should have already been set"
)
return_embeddings = bool(kwargs.pop("return_embeddings", None))
top_m = int(kwargs.pop("top_m", 5 * k))
filter_expression: sql.SQL = kwargs.pop("filter_expression", sql.SQL("true"))
with self.connection_pool.connection() as conn:
register_vector(conn)
with conn.cursor(row_factory=dict_row) as cursor:
metadata_column: list[str]
if isinstance(self.metadata_column, list):
metadata_column = [
col if isinstance(col, str) else col[0]
for col in self.metadata_column
]
elif isinstance(self.metadata_column, str):
metadata_column = [self.metadata_column]
else:
metadata_column = []
# do reranking for the following cases:
# - binary or scalar quantizations (for HNSW and IVFFlat), or
# - product quantization (for DiskANN)
if (
self.embedding_index.op_class == VectorOpClass.bit_hamming_ops
or self.embedding_index.op_class == VectorOpClass.bit_jaccard_ops
or self.embedding_index.op_class == VectorOpClass.halfvec_cosine_ops
or self.embedding_index.op_class == VectorOpClass.halfvec_ip_ops
or self.embedding_index.op_class == VectorOpClass.halfvec_l1_ops
or self.embedding_index.op_class == VectorOpClass.halfvec_l2_ops
or (
isinstance(self.embedding_index, DiskANN)
and self.embedding_index.product_quantized
)
):
sql_query = sql.SQL(
"""
select {outer_columns},
{embedding_column} {op} %(query)s as distance,
{maybe_embedding_column}
from (
select {inner_columns}
from {table_name}
where {filter_expression}
order by {expression} asc
limit %(top_m)s
) i
order by {embedding_column} {op} %(query)s asc
limit %(top_k)s
"""
).format(
outer_columns=sql.SQL(", ").join(
map(
sql.Identifier,
[
self.id_column,
self.content_column,
*metadata_column,
],
)
),
embedding_column=sql.Identifier(self.embedding_column),
op=(
sql.SQL(
VectorOpClass.vector_cosine_ops.to_operator()
) # TODO(arda): Think of getting this from outside
if (
self.embedding_index.op_class
in (
VectorOpClass.bit_hamming_ops,
VectorOpClass.bit_jaccard_ops,
)
)
else sql.SQL(self.embedding_index.op_class.to_operator())
),
maybe_embedding_column=(
sql.Identifier(self.embedding_column)
if return_embeddings
else sql.SQL(" as ").join(
(sql.NULL, sql.Identifier(self.embedding_column))
)
),
inner_columns=sql.SQL(", ").join(
map(
sql.Identifier,
[
self.id_column,
self.content_column,
self.embedding_column,
*metadata_column,
],
)
),
table_name=sql.Identifier(self.schema_name, self.table_name),
filter_expression=filter_expression,
expression=(
sql.SQL(
"binary_quantize({embedding_column})::bit({embedding_dim}) {op} binary_quantize({query})"
).format(
embedding_column=sql.Identifier(self.embedding_column),
embedding_dim=sql.Literal(self.embedding_dimension),
op=sql.SQL(self.embedding_index.op_class.to_operator()),
query=sql.Placeholder("query"),
)
if self.embedding_index.op_class
in (
VectorOpClass.bit_hamming_ops,
VectorOpClass.bit_jaccard_ops,
)
else (
sql.SQL(
"{embedding_column}::halfvec({embedding_dim}) {op} {query}::halfvec({embedding_dim})"
).format(
embedding_column=sql.Identifier(
self.embedding_column
),
embedding_dim=sql.Literal(self.embedding_dimension),
op=sql.SQL(
self.embedding_index.op_class.to_operator()
),
query=sql.Placeholder("query"),
)
if self.embedding_index.op_class
in (
VectorOpClass.halfvec_cosine_ops,
VectorOpClass.halfvec_ip_ops,
VectorOpClass.halfvec_l1_ops,
VectorOpClass.halfvec_l2_ops,
)
else sql.SQL("{embedding_column} {op} {query}").format(
embedding_column=sql.Identifier(
self.embedding_column
),
op=sql.SQL(
self.embedding_index.op_class.to_operator()
),
query=sql.Placeholder("query"),
)
)
),
)
# otherwise (i.e., no quantization), do not do reranking
else:
sql_query = sql.SQL(
"""
select {outer_columns},
{embedding_column} {op} %(query)s as distance,
{maybe_embedding_column}
from {table_name}
where {filter_expression}
order by {embedding_column} {op} %(query)s asc
limit %(top_k)s
"""
).format(
outer_columns=sql.SQL(", ").join(
map(
sql.Identifier,
[
self.id_column,
self.content_column,
*metadata_column,
],
)
),
embedding_column=sql.Identifier(self.embedding_column),
op=sql.SQL(self.embedding_index.op_class.to_operator()),
maybe_embedding_column=(
sql.Identifier(self.embedding_column)
if return_embeddings
else sql.SQL(" as ").join(
(sql.NULL, sql.Identifier(self.embedding_column))
)
),
table_name=sql.Identifier(self.schema_name, self.table_name),
filter_expression=filter_expression,
)
cursor.execute(
sql_query,
{
"query": np.array(embedding, dtype=np.float32),
"top_m": top_m,
"top_k": k,
},
)
resultset = cursor.fetchall()
return [
(
{
"id": result[self.id_column],
"content": result[self.content_column],
"metadata": (
result[metadata_column[0]]
if isinstance(self.metadata_column, str)
else {col: result[col] for col in metadata_column}
),
},
result["distance"],
result.get(self.embedding_column), # type: ignore[return-value]
)
for result in resultset
]
def _get_by_ids(self, ids: Sequence[str], /) -> list[dict[str, Any]]:
"""Retrieve documents from the table by their IDs.
Args:
ids (Sequence[str]): List of IDs to retrieve.
Returns:
list[dict[str, Any]]: List of document dictionaries with id, content, embedding, and metadata.
"""
with (
self.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
metadata_column: list[str]
if isinstance(self.metadata_column, list):
metadata_column = [
col if isinstance(col, str) else col[0]
for col in self.metadata_column
]
elif isinstance(self.metadata_column, str):
metadata_column = [self.metadata_column]
else:
metadata_column = []
if ids is not None:
where_clause = sql.SQL(" where {id_column} = any(%(id)s)").format(
id_column=sql.Identifier(self.id_column)
)
else:
where_clause = sql.SQL("")
get_sql = sql.SQL(
"""
select {columns}
from {table_name}
{where_clause}
"""
).format(
columns=sql.SQL(", ").join(
map(
sql.Identifier,
[
self.id_column,
self.content_column,
self.embedding_column,
*metadata_column,
],
)
),
table_name=sql.Identifier(self.schema_name, self.table_name),
where_clause=where_clause,
)
if ids is not None:
cursor.execute(get_sql, {"id": ids})
else:
cursor.execute(get_sql)
resultset = cursor.fetchall()
documents = [
{
"id": result[self.id_column],
"content": result[self.content_column],
"embedding": result[self.embedding_column],
"metadata": (
result[metadata_column[0]]
if isinstance(self.metadata_column, str)
else {col: result[col] for col in metadata_column}
),
}
for result in resultset
]
return documents
def _full_text_search(
self,
query_str: str,
k: int = 4,
language: str = "english",
**kwargs: Any,
) -> list[tuple[dict, float, None]]:
"""Run a Postgres full-text search using plainto_tsquery and return ranked results.
Args:
query_str: The free-text query string to search for.
k: Maximum number of results to return.
language: The text search configuration/language to use (e.g. 'english').
**kwargs: Reserved for future options; currently ignored.
Returns:
List of tuples (document_dict, rank, None). Document dict contains id, content, and metadata.
"""
with (
self.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
# normalize metadata column(s)
metadata_columns: list[str]
if isinstance(self.metadata_column, list):
metadata_columns = [
col if isinstance(col, str) else col[0]
for col in self.metadata_column
]
elif isinstance(self.metadata_column, str):
metadata_columns = [self.metadata_column]
else:
metadata_columns = []
sql_query = sql.SQL(
"""
SELECT {id_col}, {content_col},
rank() OVER (
ORDER BY ts_rank_cd(
to_tsvector({lang}, {content_col}),
plainto_tsquery({lang}, %(q)s)
) DESC
) AS rank
FROM {table}
WHERE plainto_tsquery({lang}, %(q)s) @@ to_tsvector({lang}, {content_col})
ORDER BY rank
LIMIT %(top_k)s
"""
).format(
id_col=sql.Identifier(self.id_column),
content_col=sql.Identifier(self.content_column),
lang=sql.Literal(language),
table=sql.Identifier(self.schema_name, self.table_name),
)
cursor.execute(sql_query, {"q": query_str, "top_k": k})
rows = cursor.fetchall()
results: list[tuple[dict, float, None]] = []
for row in rows:
doc = {
"id": row[self.id_column],
"content": row[self.content_column],
"metadata": (
row[metadata_columns[0]]
if isinstance(self.metadata_column, str)
else {col: row[col] for col in metadata_columns}
),
}
rank_val = float(row["rank"]) if row.get("rank") is not None else 0.0
results.append((doc, rank_val, None))
return results
def _dedup_results(
self, results: list[tuple[dict, float, Any]]
) -> list[tuple[dict, float, Any]]:
"""Deduplicate search results by document id, preserving order.
Accepts a list of tuples (document_dict, score, optional_embedding) where
document_dict contains at least the id column (self.id_column) or 'id'.
Returns a filtered list keeping the first occurrence of each id.
"""
seen_ids: set = set()
deduped: list[tuple[dict, float, Any]] = []
for doc, score, emb in results:
# robustly get id value using configured id_column or fallback to 'id'
doc_id = doc.get(self.id_column) if isinstance(doc, dict) else None
if doc_id is None:
doc_id = doc.get("id") if isinstance(doc, dict) else None
# If there's no id, treat the row as unique and keep it
if doc_id is None:
deduped.append((doc, score, emb))
continue
if doc_id not in seen_ids:
deduped.append((doc, score, emb))
seen_ids.add(doc_id)
return deduped
|
BaseAzurePGVectorStore
|
python
|
sympy__sympy
|
sympy/plotting/pygletplot/plot_modes.py
|
{
"start": 915,
"end": 1494
}
|
class ____(PlotSurface):
i_vars, d_vars = 'xy', 'z'
intervals = [[-1, 1, 40], [-1, 1, 40]]
aliases = ['cartesian', 'monge']
is_default = True
def _get_sympy_evaluator(self):
fz = self.d_vars[0]
x = self.u_interval.v
y = self.v_interval.v
@float_vec3
def e(_x, _y):
return (_x, _y, fz.subs(x, _x).subs(y, _y))
return e
def _get_lambda_evaluator(self):
fz = self.d_vars[0]
x = self.u_interval.v
y = self.v_interval.v
return lambdify([x, y], [x, y, fz])
|
Cartesian3D
|
python
|
huggingface__transformers
|
src/transformers/models/chinese_clip/modeling_chinese_clip.py
|
{
"start": 39309,
"end": 41449
}
|
class ____(ChineseCLIPPreTrainedModel):
config: ChineseCLIPVisionConfig
main_input_name = "pixel_values"
input_modalities = ("image",)
_no_split_modules = ["ChineseCLIPVisionEmbeddings", "ChineseCLIPVisionAttention"]
def __init__(self, config: ChineseCLIPVisionConfig):
super().__init__(config)
self.vision_model = ChineseCLIPVisionTransformer(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.patch_embedding
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import CLIPProcessor, ChineseCLIPVisionModel
>>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
>>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
>>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
return self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
@auto_docstring
|
ChineseCLIPVisionModel
|
python
|
automl__auto-sklearn
|
test/test_evaluation/test_train_evaluator.py
|
{
"start": 2346,
"end": 108172
}
|
class ____(BaseEvaluatorTest, unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
"""
Creates a backend mock
"""
tmp_dir_name = self.id()
self.ev_path = os.path.join(this_directory, ".tmp_evaluations", tmp_dir_name)
if os.path.exists(self.ev_path):
shutil.rmtree(self.ev_path)
os.makedirs(self.ev_path, exist_ok=False)
dummy_model_files = [os.path.join(self.ev_path, str(n)) for n in range(100)]
dummy_pred_files = [os.path.join(self.ev_path, str(n)) for n in range(100, 200)]
dummy_cv_model_files = [
os.path.join(self.ev_path, str(n)) for n in range(200, 300)
]
backend_mock = unittest.mock.Mock()
backend_mock.temporary_directory = tempfile.gettempdir()
backend_mock.get_model_dir.return_value = self.ev_path
backend_mock.get_cv_model_dir.return_value = self.ev_path
backend_mock.get_model_path.side_effect = dummy_model_files
backend_mock.get_cv_model_path.side_effect = dummy_cv_model_files
backend_mock.get_prediction_output_path.side_effect = dummy_pred_files
self.backend_mock = backend_mock
self.tmp_dir = os.path.join(self.ev_path, "tmp_dir")
self.port = logging.handlers.DEFAULT_TCP_LOGGING_PORT
def tearDown(self):
if os.path.exists(self.ev_path):
shutil.rmtree(self.ev_path)
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_holdout(self, pipeline_mock):
# Binary iris, contains 69 train samples, 25 validation samples,
# 6 test samples
D = get_binary_classification_datamanager()
D.name = "test"
pipeline_mock.predict_proba.side_effect = lambda X, batch_size=None: np.tile(
[0.6, 0.4], (len(X), 1)
)
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
pipeline_mock.get_additional_run_info.return_value = None
pipeline_mock.get_max_iter.return_value = 1
pipeline_mock.get_current_iter.return_value = 1
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(
temporary_directory=self.tmp_dir,
output_directory=None,
prefix="auto-sklearn",
)
backend_api.load_datamanager = lambda: D
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(
backend_api,
queue_,
configuration=configuration,
resampling_strategy="holdout",
resampling_strategy_args={"train_size": 0.66},
scoring_functions=None,
output_y_hat_optimization=True,
metrics=[accuracy],
port=self.port,
additional_components=dict(),
)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, {})
evaluator.fit_predict_and_loss()
return_value = read_queue(evaluator.queue)
self.assertEqual(len(return_value), 1)
result = return_value[0]["loss"]
self.assertEqual(len(return_value[0]), 3)
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(evaluator.file_output.call_count, 1)
self.assertEqual(result, 0.45833333333333337)
self.assertEqual(pipeline_mock.fit.call_count, 1)
# four calls because of train, holdout and test set
self.assertEqual(pipeline_mock.predict_proba.call_count, 3)
self.assertEqual(evaluator.file_output.call_count, 1)
self.assertEqual(evaluator.file_output.call_args[0][0].shape[0], 24)
self.assertEqual(
evaluator.file_output.call_args[0][1].shape[0], D.data["Y_test"].shape[0]
)
self.assertEqual(evaluator.model.fit.call_count, 1)
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_iterative_holdout(self, pipeline_mock):
# Regular fitting
D = get_binary_classification_datamanager()
D.name = "test"
class SideEffect(object):
def __init__(self):
self.fully_fitted_call_count = 0
def configuration_fully_fitted(self):
self.fully_fitted_call_count += 1
# Is called twice as often as call to fit because we also check
# if we need to add a special indicator to show that this is the
# final call to iterative fit
return self.fully_fitted_call_count > 18
Xt_fixture = "Xt_fixture"
pipeline_mock.estimator_supports_iterative_fit.return_value = True
pipeline_mock.configuration_fully_fitted.side_effect = (
SideEffect().configuration_fully_fitted
)
pipeline_mock.fit_transformer.return_value = Xt_fixture, {}
pipeline_mock.predict_proba.side_effect = lambda X, batch_size=None: np.tile(
[0.6, 0.4], (len(X), 1)
)
pipeline_mock.get_additional_run_info.return_value = None
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
pipeline_mock.get_max_iter.return_value = 512
pipeline_mock.get_current_iter.side_effect = (
2,
4,
8,
16,
32,
64,
128,
256,
512,
)
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(
temporary_directory=self.tmp_dir,
output_directory=None,
prefix="auto-sklearn",
)
backend_api.load_datamanager = lambda: D
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(
backend_api,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="holdout",
scoring_functions=None,
output_y_hat_optimization=True,
metrics=[accuracy],
budget=0.0,
additional_components=dict(),
)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, {})
class LossSideEffect(object):
def __init__(self):
# The 3 below is related to train, test, opt sets
self.losses = [
{"accuracy": value}
for value in chain.from_iterable(
[i] * 3 for i in [1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
)
]
self.iteration = 0
def side_effect(self, *args, **kwargs):
self.iteration += 1
return self.losses[self.iteration - 1]
evaluator._loss = unittest.mock.Mock()
evaluator._loss.side_effect = LossSideEffect().side_effect
evaluator.fit_predict_and_loss(iterative=True)
self.assertEqual(evaluator.file_output.call_count, 9)
for i in range(1, 10):
return_value = evaluator.queue.get(timeout=1)
result = return_value["loss"]
self.assertAlmostEqual(result, 1.0 - (0.1 * (i - 1)))
if i < 9:
self.assertEqual(return_value["status"], StatusType.DONOTADVANCE)
self.assertEqual(len(return_value), 3)
else:
self.assertEqual(return_value["status"], StatusType.SUCCESS)
self.assertEqual(len(return_value), 4)
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(pipeline_mock.iterative_fit.call_count, 9)
self.assertEqual(
[cal[1]["n_iter"] for cal in pipeline_mock.iterative_fit.call_args_list],
[2, 2, 4, 8, 16, 32, 64, 128, 256],
)
# 9 per split type
self.assertEqual(evaluator.model.predict_proba.call_count, 27)
self.assertEqual(evaluator.file_output.call_args[0][0].shape[0], 23)
self.assertEqual(
evaluator.file_output.call_args[0][1].shape[0], D.data["Y_test"].shape[0]
)
self.assertEqual(evaluator.file_output.call_count, 9)
self.assertEqual(evaluator.model.fit.call_count, 0)
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_iterative_holdout_interuption(self, pipeline_mock):
# Regular fitting
D = get_binary_classification_datamanager()
D.name = "test"
class SideEffect(object):
def __init__(self):
self.fully_fitted_call_count = 0
def configuration_fully_fitted(self):
self.fully_fitted_call_count += 1
# Is called twice as often as call to fit because we also check
# if we need to add a special indicator to show that this is the
# final call to iterative fit
if self.fully_fitted_call_count == 5:
raise ValueError("fixture")
return self.fully_fitted_call_count > 10
Xt_fixture = "Xt_fixture"
pipeline_mock.estimator_supports_iterative_fit.return_value = True
pipeline_mock.configuration_fully_fitted.side_effect = (
SideEffect().configuration_fully_fitted
)
pipeline_mock.fit_transformer.return_value = Xt_fixture, {}
pipeline_mock.predict_proba.side_effect = lambda X, batch_size=None: np.tile(
[0.6, 0.4], (len(X), 1)
)
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
pipeline_mock.get_additional_run_info.return_value = None
pipeline_mock.get_max_iter.return_value = 512
pipeline_mock.get_current_iter.side_effect = (
2,
4,
8,
16,
32,
64,
128,
256,
512,
)
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(
temporary_directory=self.tmp_dir,
output_directory=None,
prefix="auto-sklearn",
)
backend_api.load_datamanager = lambda: D
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(
backend_api,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="holdout-iterative-fit",
scoring_functions=None,
output_y_hat_optimization=True,
metrics=[accuracy],
budget=0.0,
additional_components=dict(),
)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, {})
class LossSideEffect(object):
def __init__(self):
self.losses = [
{"accuracy": value}
for value in [
0.8,
0.8,
0.8,
0.8,
0.6,
0.6,
0.6,
0.6,
0.4,
0.4,
0.4,
0.4,
0.2,
0.2,
0.2,
0.2,
0.0,
0.0,
0.0,
0.0,
]
]
self.iteration = 0
def side_effect(self, *args, **kwargs):
self.iteration += 1
return self.losses[self.iteration - 1]
evaluator._loss = unittest.mock.Mock()
evaluator._loss.side_effect = LossSideEffect().side_effect
self.assertRaisesRegex(
ValueError,
"fixture",
evaluator.fit_predict_and_loss,
iterative=True,
)
self.assertEqual(evaluator.file_output.call_count, 2)
for i in range(1, 3):
return_value = evaluator.queue.get(timeout=1)
self.assertAlmostEqual(return_value["loss"], 1.0 - (0.2 * i))
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(pipeline_mock.iterative_fit.call_count, 2)
# 6 calls because of train, holdout and test set
# and a total of two calls each because of two iterations of fitting
self.assertEqual(evaluator.model.predict_proba.call_count, 6)
self.assertEqual(evaluator.file_output.call_args[0][0].shape[0], 23)
self.assertEqual(
evaluator.file_output.call_args[0][1].shape[0], D.data["Y_test"].shape[0]
)
self.assertEqual(evaluator.file_output.call_count, 2)
self.assertEqual(evaluator.model.fit.call_count, 0)
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_iterative_holdout_not_iterative(self, pipeline_mock):
# Regular fitting
D = get_binary_classification_datamanager()
D.name = "test"
Xt_fixture = "Xt_fixture"
pipeline_mock.estimator_supports_iterative_fit.return_value = False
pipeline_mock.fit_transformer.return_value = Xt_fixture, {}
pipeline_mock.predict_proba.side_effect = lambda X, batch_size=None: np.tile(
[0.6, 0.4], (len(X), 1)
)
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
pipeline_mock.get_additional_run_info.return_value = None
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(
temporary_directory=self.tmp_dir,
output_directory=None,
prefix="auto-sklearn",
)
backend_api.load_datamanager = lambda: D
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(
backend_api,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="holdout-iterative-fit",
scoring_functions=None,
output_y_hat_optimization=True,
metrics=[accuracy],
additional_components=dict(),
)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, {})
evaluator.fit_predict_and_loss(iterative=True)
self.assertEqual(evaluator.file_output.call_count, 1)
return_value = evaluator.queue.get(timeout=1)
self.assertAlmostEqual(return_value["loss"], 0.47826086956521741)
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(pipeline_mock.iterative_fit.call_count, 0)
# 3 calls for train, opt and test
self.assertEqual(evaluator.model.predict_proba.call_count, 3)
self.assertEqual(evaluator.file_output.call_args[0][0].shape[0], 23)
self.assertEqual(
evaluator.file_output.call_args[0][1].shape[0], D.data["Y_test"].shape[0]
)
self.assertEqual(evaluator.file_output.call_count, 1)
self.assertEqual(evaluator.model.fit.call_count, 1)
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_cv(self, pipeline_mock):
D = get_binary_classification_datamanager()
pipeline_mock.predict_proba.side_effect = lambda X, batch_size=None: np.tile(
[0.6, 0.4], (len(X), 1)
)
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
pipeline_mock.get_additional_run_info.return_value = None
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(
temporary_directory=self.tmp_dir,
output_directory=None,
prefix="auto-sklearn",
)
backend_api.load_datamanager = lambda: D
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(
backend_api,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="cv",
resampling_strategy_args={"folds": 5},
scoring_functions=None,
output_y_hat_optimization=True,
metrics=[accuracy],
additional_components=dict(),
)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, {})
evaluator.fit_predict_and_loss()
return_value = read_queue(evaluator.queue)
self.assertEqual(len(return_value), 1)
result = return_value[0]["loss"]
self.assertEqual(len(return_value[0]), 3)
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(evaluator.file_output.call_count, 1)
self.assertEqual(result, 0.463768115942029)
self.assertEqual(pipeline_mock.fit.call_count, 5)
# 15 calls because of the training (5), holdout (5) and test set (5)
self.assertEqual(pipeline_mock.predict_proba.call_count, 15)
self.assertEqual(
evaluator.file_output.call_args[0][0].shape[0], D.data["Y_train"].shape[0]
)
self.assertEqual(
evaluator.file_output.call_args[0][1].shape[0], D.data["Y_test"].shape[0]
)
# The model prior to fitting is saved, this cannot be directly tested
# because of the way the mock module is used. Instead, we test whether
# the if block in which model assignment is done is accessed
self.assertTrue(evaluator._added_empty_model)
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_partial_cv(self, pipeline_mock):
D = get_binary_classification_datamanager()
pipeline_mock.predict_proba.side_effect = lambda X, batch_size=None: np.tile(
[0.6, 0.4], (len(X), 1)
)
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
pipeline_mock.get_additional_run_info.return_value = None
pipeline_mock.get_max_iter.return_value = 1
pipeline_mock.get_current_iter.return_value = 1
D = get_binary_classification_datamanager()
D.name = "test"
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(
temporary_directory=self.tmp_dir,
output_directory=None,
prefix="auto-sklearn",
)
backend_api.load_datamanager = lambda: D
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(
backend_api,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="partial-cv",
resampling_strategy_args={"folds": 5},
scoring_functions=None,
output_y_hat_optimization=True,
metrics=[accuracy],
additional_components=dict(),
)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, {})
evaluator.partial_fit_predict_and_loss(fold=1)
return_value = evaluator.queue.get(timeout=1)
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(evaluator.file_output.call_count, 0)
self.assertEqual(return_value["loss"], 0.5)
self.assertEqual(pipeline_mock.fit.call_count, 1)
self.assertEqual(pipeline_mock.predict_proba.call_count, 3)
# The model prior to fitting is saved, this cannot be directly tested
# because of the way the mock module is used. Instead, we test whether
# the if block in which model assignment is done is accessed
self.assertTrue(hasattr(evaluator, "model"))
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_iterative_partial_cv(self, pipeline_mock):
# Regular fitting
D = get_binary_classification_datamanager()
D.name = "test"
class SideEffect(object):
def __init__(self):
self.fully_fitted_call_count = 0
def configuration_fully_fitted(self):
self.fully_fitted_call_count += 1
# Is called twice as often as call to fit because we also check
# if we need to add a special indicator to show that this is the
# final call to iterative fit
return self.fully_fitted_call_count > 18
Xt_fixture = "Xt_fixture"
pipeline_mock.estimator_supports_iterative_fit.return_value = True
pipeline_mock.configuration_fully_fitted.side_effect = (
SideEffect().configuration_fully_fitted
)
pipeline_mock.fit_transformer.return_value = Xt_fixture, {}
pipeline_mock.predict_proba.side_effect = lambda X, batch_size=None: np.tile(
[0.6, 0.4], (len(X), 1)
)
pipeline_mock.get_additional_run_info.return_value = None
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
pipeline_mock.get_max_iter.return_value = 512
pipeline_mock.get_current_iter.side_effect = (
2,
4,
8,
16,
32,
64,
128,
256,
512,
)
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(
temporary_directory=self.tmp_dir,
output_directory=None,
prefix="auto-sklearn",
)
backend_api.load_datamanager = lambda: D
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(
backend_api,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="partial-cv-iterative-fit",
resampling_strategy_args={"folds": 5},
scoring_functions=None,
output_y_hat_optimization=True,
metrics=[accuracy],
budget=0.0,
additional_components=dict(),
)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, {})
evaluator._loss = unittest.mock.Mock()
evaluator._loss.side_effect = LossSideEffect().side_effect
evaluator.partial_fit_predict_and_loss(fold=1, iterative=True)
# No file output here!
self.assertEqual(evaluator.file_output.call_count, 0)
for i in range(1, 10):
return_value = evaluator.queue.get(timeout=1)
self.assertAlmostEqual(return_value["loss"], 1.0 - (0.1 * (i - 1)))
if i < 9:
self.assertEqual(return_value["status"], StatusType.DONOTADVANCE)
else:
self.assertEqual(return_value["status"], StatusType.SUCCESS)
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(pipeline_mock.iterative_fit.call_count, 9)
self.assertEqual(
[cal[1]["n_iter"] for cal in pipeline_mock.iterative_fit.call_args_list],
[2, 2, 4, 8, 16, 32, 64, 128, 256],
)
self.assertTrue(hasattr(evaluator, "model"))
self.assertEqual(pipeline_mock.iterative_fit.call_count, 9)
self.assertEqual(pipeline_mock.predict_proba.call_count, 27)
@unittest.mock.patch.object(TrainEvaluator, "_loss")
@unittest.mock.patch.object(TrainEvaluator, "_get_model")
def test_file_output(self, loss_mock, model_mock):
D = get_regression_datamanager()
D.name = "test"
self.backend_mock.load_datamanager.return_value = D
configuration = unittest.mock.Mock(spec=Configuration)
queue_ = multiprocessing.Queue()
loss_mock.return_value = None
model_mock.return_value = None
evaluator = TrainEvaluator(
self.backend_mock,
queue=queue_,
port=self.port,
configuration=configuration,
resampling_strategy="cv",
resampling_strategy_args={"folds": 5},
scoring_functions=SCORER_LIST,
output_y_hat_optimization=True,
metrics=[accuracy],
additional_components=dict(),
)
self.backend_mock.get_model_dir.return_value = True
evaluator.model = "model"
evaluator.Y_optimization = D.data["Y_train"]
return_value = evaluator.file_output(D.data["Y_train"], D.data["Y_test"])
self.assertEqual(return_value, (None, {}))
self.assertEqual(self.backend_mock.save_additional_data.call_count, 2)
self.assertEqual(self.backend_mock.save_numrun_to_dir.call_count, 1)
self.assertEqual(
self.backend_mock.save_numrun_to_dir.call_args_list[-1][1].keys(),
{
"seed",
"idx",
"budget",
"model",
"cv_model",
"valid_predictions", # TODO remove once backend updated
"ensemble_predictions",
"test_predictions",
},
)
self.assertIsNotNone(
self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]["model"]
)
self.assertIsNone(
self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]["cv_model"]
)
evaluator.models = ["model2", "model2"]
return_value = evaluator.file_output(D.data["Y_train"], D.data["Y_test"])
self.assertEqual(return_value, (None, {}))
self.assertEqual(self.backend_mock.save_additional_data.call_count, 4)
self.assertEqual(self.backend_mock.save_numrun_to_dir.call_count, 2)
self.assertEqual(
self.backend_mock.save_numrun_to_dir.call_args_list[-1][1].keys(),
{
"seed",
"idx",
"budget",
"model",
"cv_model",
"valid_predictions", # TODO remove once backend updated
"ensemble_predictions",
"test_predictions",
},
)
self.assertIsNotNone(
self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]["model"]
)
self.assertIsNotNone(
self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]["cv_model"]
)
D.data["Y_train"][0] = np.NaN
return_value = evaluator.file_output(D.data["Y_train"], D.data["Y_test"])
self.assertEqual(
return_value,
(
1.0,
{"error": "Model predictions for optimization set contains NaNs."},
),
)
@unittest.mock.patch("autosklearn.automl_common.common.utils.backend.Backend")
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_subsample_indices_classification(self, mock, backend_mock):
configuration = unittest.mock.Mock(spec=Configuration)
queue_ = multiprocessing.Queue()
D = get_binary_classification_datamanager()
backend_mock.load_datamanager.return_value = D
backend_mock.temporary_directory = tempfile.gettempdir()
evaluator = TrainEvaluator(
backend_mock,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="cv",
resampling_strategy_args={"folds": 10},
metrics=[accuracy],
additional_components=dict(),
)
train_indices = np.arange(69, dtype=int)
train_indices1 = subsample_indices(
train_indices, 0.1449, evaluator.task_type, evaluator.Y_train
)
evaluator.subsample = 20
train_indices2 = subsample_indices(
train_indices, 0.2898, evaluator.task_type, evaluator.Y_train
)
evaluator.subsample = 30
train_indices3 = subsample_indices(
train_indices, 0.4347, evaluator.task_type, evaluator.Y_train
)
evaluator.subsample = 67
train_indices4 = subsample_indices(
train_indices, 0.971, evaluator.task_type, evaluator.Y_train
)
# Common cases
for ti in train_indices1:
self.assertIn(ti, train_indices2)
for ti in train_indices2:
self.assertIn(ti, train_indices3)
for ti in train_indices3:
self.assertIn(ti, train_indices4)
# Corner cases
self.assertRaisesRegex(
ValueError,
"train_size=0.0 should be either positive and smaller than the "
r"number of samples 69 or a float in the \(0, 1\) range",
subsample_indices,
train_indices,
0.0,
evaluator.task_type,
evaluator.Y_train,
)
# With equal or greater it should return a non-shuffled array of indices
train_indices5 = subsample_indices(
train_indices, 1.0, evaluator.task_type, evaluator.Y_train
)
self.assertTrue(np.all(train_indices5 == train_indices))
evaluator.subsample = 68
self.assertRaisesRegex(
ValueError,
"The test_size = 1 should be greater or equal to the number of "
"classes = 2",
subsample_indices,
train_indices,
0.9999,
evaluator.task_type,
evaluator.Y_train,
)
@unittest.mock.patch("autosklearn.automl_common.common.utils.backend.Backend")
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_subsample_indices_regression(self, mock, backend_mock):
configuration = unittest.mock.Mock(spec=Configuration)
queue_ = multiprocessing.Queue()
backend_mock.temporary_directory = tempfile.gettempdir()
evaluator = TrainEvaluator(
backend_mock,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="cv",
resampling_strategy_args={"folds": 10},
metrics=[accuracy],
additional_components=dict(),
)
train_indices = np.arange(69, dtype=int)
train_indices3 = subsample_indices(
train_indices,
subsample=0.4347,
task_type=evaluator.task_type,
Y_train=evaluator.Y_train,
)
evaluator.subsample = 67
train_indices4 = subsample_indices(
train_indices,
subsample=0.4347,
task_type=evaluator.task_type,
Y_train=evaluator.Y_train,
)
# Common cases
for ti in train_indices3:
self.assertIn(ti, train_indices4)
# Corner cases
self.assertRaisesRegex(
ValueError,
"train_size=0.0 should be either positive and smaller than the "
r"number of samples 69 or a float in the \(0, 1\) range",
subsample_indices,
train_indices,
0.0,
evaluator.task_type,
evaluator.Y_train,
)
self.assertRaisesRegex(
ValueError,
"Subsample must not be larger than 1, but is 1.000100",
subsample_indices,
train_indices,
1.0001,
evaluator.task_type,
evaluator.Y_train,
)
# With equal or greater it should return a non-shuffled array of indices
train_indices6 = subsample_indices(
train_indices, 1.0, evaluator.task_type, evaluator.Y_train
)
np.testing.assert_allclose(train_indices6, train_indices)
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_predict_proba_binary_classification(self, mock):
D = get_binary_classification_datamanager()
self.backend_mock.load_datamanager.return_value = D
mock.predict_proba.side_effect = lambda y, batch_size=None: np.array(
[[0.1, 0.9]] * y.shape[0]
)
mock.side_effect = lambda **kwargs: mock
configuration = unittest.mock.Mock(spec=Configuration)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(
self.backend_mock,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="cv",
resampling_strategy_args={"folds": 10},
output_y_hat_optimization=False,
metrics=[accuracy],
additional_components=dict(),
)
evaluator.fit_predict_and_loss()
Y_optimization_pred = self.backend_mock.save_numrun_to_dir.call_args_list[0][1][
"ensemble_predictions"
]
for i in range(7):
self.assertEqual(0.9, Y_optimization_pred[i][1])
@unittest.mock.patch.object(TrainEvaluator, "file_output")
@unittest.mock.patch.object(TrainEvaluator, "_partial_fit_and_predict_standard")
@unittest.mock.patch("autosklearn.automl_common.common.utils.backend.Backend")
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_fit_predict_and_loss_standard_additional_run_info(
self,
mock,
backend_mock,
_partial_fit_and_predict_mock,
file_output_mock,
):
D = get_binary_classification_datamanager()
backend_mock.load_datamanager.return_value = D
backend_mock.temporary_directory = tempfile.gettempdir()
mock.side_effect = lambda **kwargs: mock
_partial_fit_and_predict_mock.return_value = (
np.array([[0.1, 0.9]] * 46),
np.array([[0.1, 0.9]] * 23),
np.array([[0.1, 0.9]] * 6),
{"a": 5},
)
file_output_mock.return_value = (None, {})
configuration = unittest.mock.Mock(spec=Configuration)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(
backend_mock,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="holdout",
output_y_hat_optimization=False,
metrics=[accuracy],
additional_components=dict(),
)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, {})
evaluator.model = unittest.mock.Mock()
evaluator.model.estimator_supports_iterative_fit.return_value = False
evaluator.X_targets[0] = np.array([1, 0] * 23)
evaluator.Y_targets[0] = np.array([1] * 23)
evaluator.Y_train_targets = np.array([1] * 69)
return_value = evaluator.fit_predict_and_loss(iterative=False)
self.assertIsNone(return_value)
element = queue_.get()
self.assertEqual(element["status"], StatusType.SUCCESS)
self.assertEqual(element["additional_run_info"]["a"], 5)
self.assertEqual(_partial_fit_and_predict_mock.call_count, 1)
class SideEffect(object):
def __init__(self):
self.n_call = 0
def __call__(self, *args, **kwargs):
if self.n_call == 0:
self.n_call += 1
return (
np.array([[0.1, 0.9]] * 34),
np.array([[0.1, 0.9]] * 35),
np.array([[0.1, 0.9]] * 6),
{"a": 5},
)
else:
return (
np.array([[0.1, 0.9]] * 34),
np.array([[0.1, 0.9]] * 34),
np.array([[0.1, 0.9]] * 6),
{"a": 5},
)
_partial_fit_and_predict_mock.side_effect = SideEffect()
evaluator = TrainEvaluator(
backend_mock,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="cv",
resampling_strategy_args={"folds": 2},
output_y_hat_optimization=False,
metrics=[accuracy],
additional_components=dict(),
)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, {})
evaluator.Y_targets[0] = np.array([1] * 35)
evaluator.Y_targets[1] = np.array([1] * 34)
evaluator.Y_train_targets = np.array([1] * 69)
self.assertRaisesRegex(
TAEAbortException,
"Found additional run info \"{'a': 5}\" in fold 1, "
"but cannot handle additional run info if fold >= 1.",
evaluator.fit_predict_and_loss,
iterative=False,
)
@unittest.mock.patch.object(TrainEvaluator, "_loss")
@unittest.mock.patch.object(TrainEvaluator, "finish_up")
@unittest.mock.patch("autosklearn.automl_common.common.utils.backend.Backend")
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_fit_predict_and_loss_iterative_additional_run_info(
self,
mock,
backend_mock,
finish_up_mock,
loss_mock,
):
class Counter:
counter = 0
def __call__(self):
self.counter += 1
return False if self.counter <= 1 else True
mock.estimator_supports_iterative_fit.return_value = True
mock.fit_transformer.return_value = ("Xt", {})
mock.configuration_fully_fitted.side_effect = Counter()
mock.get_current_iter.side_effect = Counter()
mock.get_max_iter.return_value = 1
mock.get_additional_run_info.return_value = 14678
loss_mock.return_value = {"accuracy": 0.5}
D = get_binary_classification_datamanager()
backend_mock.load_datamanager.return_value = D
backend_mock.temporary_directory = tempfile.gettempdir()
mock.side_effect = lambda **kwargs: mock
configuration = unittest.mock.Mock(spec=Configuration)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(
backend_mock,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="holdout",
output_y_hat_optimization=False,
metrics=[accuracy],
budget=0.0,
additional_components=dict(),
)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, {})
evaluator.Y_targets[0] = np.array([1] * 23).reshape((-1, 1))
evaluator.Y_train_targets = np.array([1] * 69).reshape((-1, 1))
return_value = evaluator.fit_predict_and_loss(iterative=True)
self.assertIsNone(return_value)
self.assertEqual(finish_up_mock.call_count, 1)
self.assertEqual(finish_up_mock.call_args[1]["additional_run_info"], 14678)
@unittest.mock.patch.object(TrainEvaluator, "_loss")
@unittest.mock.patch.object(TrainEvaluator, "finish_up")
@unittest.mock.patch("autosklearn.automl_common.common.utils.backend.Backend")
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_fit_predict_and_loss_iterative_noniterativemodel_additional_run_info(
self,
mock,
backend_mock,
finish_up_mock,
loss_mock,
):
mock.estimator_supports_iterative_fit.return_value = False
mock.fit_transformer.return_value = ("Xt", {})
mock.get_additional_run_info.return_value = 14678
loss_mock.return_value = {"accuracy": 0.5}
D = get_binary_classification_datamanager()
backend_mock.load_datamanager.return_value = D
backend_mock.temporary_directory = tempfile.gettempdir()
mock.side_effect = lambda **kwargs: mock
configuration = unittest.mock.Mock(spec=Configuration)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(
backend_mock,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="holdout",
output_y_hat_optimization=False,
metrics=[accuracy],
additional_components=dict(),
)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, {})
evaluator.Y_targets[0] = np.array([1] * 23).reshape((-1, 1))
evaluator.Y_train_targets = np.array([1] * 69).reshape((-1, 1))
return_value = evaluator.fit_predict_and_loss(iterative=True)
self.assertIsNone(return_value)
self.assertEqual(finish_up_mock.call_count, 1)
self.assertEqual(finish_up_mock.call_args[1]["additional_run_info"], 14678)
@unittest.mock.patch("autosklearn.evaluation.train_evaluator.concat_data")
@unittest.mock.patch.object(TrainEvaluator, "_loss")
@unittest.mock.patch.object(TrainEvaluator, "finish_up")
@unittest.mock.patch("autosklearn.automl_common.common.utils.backend.Backend")
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_fit_predict_and_loss_budget_additional_run_info(
self,
mock,
backend_mock,
finish_up_mock,
loss_mock,
_,
):
class Counter:
counter = 0
def __call__(self):
self.counter += 1
return False if self.counter <= 1 else True
mock.configuration_fully_fitted.side_effect = Counter()
mock.get_current_iter.side_effect = Counter()
mock.get_max_iter.return_value = 1
mock.estimator_supports_iterative_fit.return_value = True
mock.fit_transformer.return_value = ("Xt", {})
mock.get_additional_run_info.return_value = {"val": 14678}
mock.get_max_iter.return_value = 512
loss_mock.return_value = {"accuracy": 0.5}
D = get_binary_classification_datamanager()
backend_mock.load_datamanager.return_value = D
backend_mock.temporary_directory = tempfile.gettempdir()
mock.side_effect = lambda **kwargs: mock
configuration = unittest.mock.Mock(spec=Configuration)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(
backend_mock,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="holdout",
output_y_hat_optimization=False,
metrics=[accuracy],
budget_type="iterations",
budget=50,
additional_components=dict(),
)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, {})
evaluator.Y_targets[0] = np.array([1] * 23).reshape((-1, 1))
evaluator.Y_train_targets = np.array([1] * 69).reshape((-1, 1))
return_value = evaluator.fit_predict_and_loss(iterative=False)
self.assertIsNone(return_value)
self.assertEqual(finish_up_mock.call_count, 1)
self.assertEqual(
finish_up_mock.call_args[1]["additional_run_info"], {"val": 14678}
)
@unittest.mock.patch("autosklearn.evaluation.train_evaluator.concat_data")
@unittest.mock.patch.object(TrainEvaluator, "_loss")
@unittest.mock.patch.object(TrainEvaluator, "finish_up")
@unittest.mock.patch("autosklearn.automl_common.common.utils.backend.Backend")
@unittest.mock.patch(
"autosklearn.pipeline.classification.SimpleClassificationPipeline"
)
def test_fit_predict_and_loss_budget_2_additional_run_info(
self, mock, backend_mock, finish_up_mock, loss_mock, _
):
mock.estimator_supports_iterative_fit.return_value = False
mock.fit_transformer.return_value = ("Xt", {})
mock.get_additional_run_info.return_value = {"val": 14678}
loss_mock.return_value = {"accuracy": 0.5}
D = get_binary_classification_datamanager()
backend_mock.load_datamanager.return_value = D
backend_mock.temporary_directory = tempfile.gettempdir()
mock.side_effect = lambda **kwargs: mock
configuration = unittest.mock.Mock(spec=Configuration)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(
backend_mock,
queue_,
port=self.port,
configuration=configuration,
resampling_strategy="holdout",
output_y_hat_optimization=False,
metrics=[accuracy],
budget_type="subsample",
budget=50,
additional_components=dict(),
)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, {})
evaluator.Y_targets[0] = np.array([1] * 23).reshape((-1, 1))
evaluator.Y_train_targets = np.array([1] * 69).reshape((-1, 1))
return_value = evaluator.fit_predict_and_loss(iterative=False)
self.assertIsNone(return_value)
self.assertEqual(finish_up_mock.call_count, 1)
self.assertEqual(
finish_up_mock.call_args[1]["additional_run_info"], {"val": 14678}
)
def test_get_results(self):
queue_ = multiprocessing.Queue()
for i in range(5):
queue_.put((i * 1, 1 - (i * 0.2), 0, "", StatusType.SUCCESS))
result = read_queue(queue_)
self.assertEqual(len(result), 5)
self.assertEqual(result[0][0], 0)
self.assertAlmostEqual(result[0][1], 1.0)
def test_datasets(self):
for getter in get_dataset_getters():
testname = "%s_%s" % (
os.path.basename(__file__).replace(".pyc", "").replace(".py", ""),
getter.__name__,
)
with self.subTest(testname):
D = getter()
D_ = copy.deepcopy(D)
y = D.data["Y_train"]
if len(y.shape) == 2 and y.shape[1] == 1:
D_.data["Y_train"] = y.flatten()
self.backend_mock.load_datamanager.return_value = D_
queue_ = multiprocessing.Queue()
metric_lookup = {
MULTILABEL_CLASSIFICATION: f1_macro,
BINARY_CLASSIFICATION: accuracy,
MULTICLASS_CLASSIFICATION: accuracy,
REGRESSION: r2,
}
evaluator = TrainEvaluator(
self.backend_mock,
queue_,
port=self.port,
resampling_strategy="cv",
resampling_strategy_args={"folds": 2},
output_y_hat_optimization=False,
metrics=[metric_lookup[D.info["task"]]],
additional_components=dict(),
)
evaluator.fit_predict_and_loss()
return_value = evaluator.queue.get(timeout=1)
self.assertTrue(np.isfinite(return_value["loss"]))
############################################################################
# Test obtaining a splitter object from scikit-learn
@unittest.mock.patch.object(TrainEvaluator, "__init__")
def test_get_splitter(self, te_mock):
te_mock.return_value = None
D = unittest.mock.Mock(spec=AbstractDataManager)
D.data = dict(Y_train=np.array([0, 0, 0, 1, 1, 1]))
D.info = dict(task=BINARY_CLASSIFICATION)
D.feat_type = {}
# holdout, binary classification
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "holdout"
evaluator.resampling_strategy_args = {}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, sklearn.model_selection.StratifiedShuffleSplit)
# holdout, binary classification, no shuffle
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "holdout"
evaluator.resampling_strategy_args = {"shuffle": False}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, sklearn.model_selection.PredefinedSplit)
# holdout, binary classification, fallback to custom shuffle split
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1, 2])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "holdout"
evaluator.resampling_strategy_args = {}
cv = evaluator.get_splitter(D)
self.assertIsInstance(
cv, autosklearn.evaluation.splitter.CustomStratifiedShuffleSplit
)
# cv, binary classification
D.data["Y_train"] = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "cv"
evaluator.resampling_strategy_args = {"folds": 5}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, sklearn.model_selection._split.StratifiedKFold)
# cv, binary classification, shuffle is True
D.data["Y_train"] = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "cv"
evaluator.resampling_strategy_args = {"folds": 5}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, sklearn.model_selection._split.StratifiedKFold)
self.assertTrue(cv.shuffle)
# cv, binary classification, shuffle is False
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "cv"
evaluator.resampling_strategy_args = {"folds": 5, "shuffle": False}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, sklearn.model_selection._split.KFold)
self.assertFalse(cv.shuffle)
# cv, binary classification, fallback to custom splitter
D.data["Y_train"] = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "cv"
evaluator.resampling_strategy_args = {"folds": 5}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, autosklearn.evaluation.splitter.CustomStratifiedKFold)
# regression, shuffle split
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "holdout"
evaluator.resampling_strategy_args = {}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, sklearn.model_selection._split.ShuffleSplit)
# regression, no shuffle
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "holdout"
evaluator.resampling_strategy_args = {"shuffle": False}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, sklearn.model_selection._split.PredefinedSplit)
# regression cv, KFold
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "cv"
evaluator.resampling_strategy_args = {"folds": 5}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, sklearn.model_selection._split.KFold)
self.assertTrue(cv.shuffle)
# regression cv, KFold, no shuffling
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "cv"
evaluator.resampling_strategy_args = {"folds": 5, "shuffle": False}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, sklearn.model_selection._split.KFold)
self.assertFalse(cv.shuffle)
# multioutput regression, shuffle split
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "holdout"
evaluator.resampling_strategy_args = {}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, sklearn.model_selection._split.ShuffleSplit)
# multioutput regression, no shuffle
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "holdout"
evaluator.resampling_strategy_args = {"shuffle": False}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, sklearn.model_selection._split.PredefinedSplit)
# multioutput regression cv, KFold
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "cv"
evaluator.resampling_strategy_args = {"folds": 5}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, sklearn.model_selection._split.KFold)
self.assertTrue(cv.shuffle)
# multioutput regression cv, KFold, no shuffling
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "cv"
evaluator.resampling_strategy_args = {"folds": 5, "shuffle": False}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, sklearn.model_selection._split.KFold)
self.assertFalse(cv.shuffle)
@unittest.mock.patch.object(TrainEvaluator, "__init__")
def test_get_splitter_cv_object(self, te_mock):
te_mock.return_value = None
D = unittest.mock.Mock(spec=AbstractDataManager)
D.data = dict(Y_train=np.array([0, 0, 0, 1, 1, 1]))
D.info = dict(task=BINARY_CLASSIFICATION)
D.feat_type = {}
# GroupKFold, classification with args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
D.data["X_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = GroupKFold(n_splits=2)
evaluator.resampling_strategy_args = {"groups": np.array([1, 1, 2, 1, 2, 2])}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, GroupKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 2
)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# GroupKFold, classification no args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = GroupKFold(n_splits=2)
evaluator.resampling_strategy_args = None
self.assertRaisesRegex(
ValueError,
"The 'groups' parameter should not be None",
evaluator.get_splitter,
D,
)
# GroupKFold, regression with args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = GroupKFold(n_splits=2)
evaluator.resampling_strategy_args = {"groups": np.array([1, 1, 2, 1, 2, 2])}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, GroupKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 2
)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# GroupKFold, regression no args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = GroupKFold(n_splits=2)
evaluator.resampling_strategy_args = None
self.assertRaisesRegex(
ValueError,
"The 'groups' parameter should not be None",
evaluator.get_splitter,
D,
)
# GroupKFold, multi-output regression with args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = GroupKFold(n_splits=2)
evaluator.resampling_strategy_args = {"groups": np.array([1, 1, 2, 1, 2, 2])}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, GroupKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 2
)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# GroupKFold, multi-output regression no args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = GroupKFold(n_splits=2)
evaluator.resampling_strategy_args = None
self.assertRaisesRegex(
ValueError,
"The 'groups' parameter should not be None",
evaluator.get_splitter,
D,
)
# KFold, classification with args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = KFold(n_splits=4, shuffle=True, random_state=5)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, KFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 4
)
self.assertTrue(cv.shuffle)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# KFold, classification no args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = KFold(n_splits=3)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, KFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 3
)
self.assertFalse(cv.shuffle)
self.assertIsNone(cv.random_state)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# KFold, regression with args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = KFold(n_splits=4, shuffle=True, random_state=5)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, KFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 4
)
self.assertTrue(cv.shuffle)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# KFold, regression no args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = KFold(n_splits=3)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, KFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 3
)
self.assertFalse(cv.shuffle)
self.assertIsNone(cv.random_state)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# KFold, multi-output regression with args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = KFold(n_splits=4, shuffle=True, random_state=5)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, KFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 4
)
self.assertTrue(cv.shuffle)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# KFold, multi-output regression no args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = KFold(n_splits=3)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, KFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 3
)
self.assertFalse(cv.shuffle)
self.assertIsNone(cv.random_state)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeaveOneGroupOut, classification with args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeaveOneGroupOut()
evaluator.resampling_strategy_args = {"groups": np.array([1, 1, 2, 1, 2, 2])}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeaveOneGroupOut)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeaveOneGroupOut, classification no args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeaveOneGroupOut()
evaluator.resampling_strategy_args = None
self.assertRaisesRegex(
ValueError,
"The 'groups' parameter should not be None",
evaluator.get_splitter,
D,
)
# LeaveOneGroupOut, regression with args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeaveOneGroupOut()
evaluator.resampling_strategy_args = {"groups": np.array([1, 1, 2, 1, 2, 2])}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeaveOneGroupOut)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeaveOneGroupOut, regression no args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeaveOneGroupOut()
evaluator.resampling_strategy_args = None
self.assertRaisesRegex(
ValueError,
"The 'groups' parameter should not be None",
evaluator.get_splitter,
D,
)
# LeaveOneGroupOut, multi-output regression with args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeaveOneGroupOut()
evaluator.resampling_strategy_args = {"groups": np.array([1, 1, 2, 1, 2, 2])}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeaveOneGroupOut)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeaveOneGroupOut, multi-output regression no args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeaveOneGroupOut()
evaluator.resampling_strategy_args = None
self.assertRaisesRegex(
ValueError,
"The 'groups' parameter should not be None",
evaluator.get_splitter,
D,
)
# LeavePGroupsOut, classification with args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeavePGroupsOut(n_groups=1)
evaluator.resampling_strategy_args = {"groups": np.array([1, 1, 2, 1, 2, 2])}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeavePGroupsOut)
self.assertEqual(cv.n_groups, 1)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeavePGroupsOut, classification no args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeaveOneGroupOut()
evaluator.resampling_strategy_args = None
self.assertRaisesRegex(
ValueError,
"The 'groups' parameter should not be None",
evaluator.get_splitter,
D,
)
# LeavePGroupsOut, regression with args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeavePGroupsOut(n_groups=1)
evaluator.resampling_strategy_args = {"groups": np.array([1, 1, 2, 1, 2, 2])}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeavePGroupsOut)
self.assertEqual(cv.n_groups, 1)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeavePGroupsOut, regression no args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeavePGroupsOut(n_groups=1)
evaluator.resampling_strategy_args = None
self.assertRaisesRegex(
ValueError,
"The 'groups' parameter should not be None",
evaluator.get_splitter,
D,
)
# LeavePGroupsOut, multi-output regression with args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeavePGroupsOut(n_groups=1)
evaluator.resampling_strategy_args = {"groups": np.array([1, 1, 2, 1, 2, 2])}
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeavePGroupsOut)
self.assertEqual(cv.n_groups, 1)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeavePGroupsOut, multi-output regression no args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeavePGroupsOut(n_groups=1)
evaluator.resampling_strategy_args = None
self.assertRaisesRegex(
ValueError,
"The 'groups' parameter should not be None",
evaluator.get_splitter,
D,
)
# LeaveOneOut, classification
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeaveOneOut()
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeaveOneOut)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeaveOneOut, regression
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeaveOneOut()
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeaveOneOut)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeaveOneOut, multi-output regression
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeaveOneOut()
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeaveOneOut)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeavePOut, classification with args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = LeavePOut(p=3)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeavePOut)
self.assertEqual(cv.p, 3)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeavePOut, classification no args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeavePOut(p=2)
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeavePOut)
self.assertEqual(cv.p, 2)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeavePOut, regression with args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = LeavePOut(p=3)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeavePOut)
self.assertEqual(cv.p, 3)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeavePOut, regression no args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeavePOut(p=2)
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeavePOut)
self.assertEqual(cv.p, 2)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeavePOut, multi-output regression with args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = LeavePOut(p=3)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeavePOut)
self.assertEqual(cv.p, 3)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# LeavePOut, multi-output regression no args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = LeavePOut(p=2)
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, LeavePOut)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# PredefinedSplit, classification with args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = PredefinedSplit(
test_fold=np.array([0, 1, 0, 1, 0, 1])
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, PredefinedSplit)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# PredefinedSplit, regression with args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = PredefinedSplit(
test_fold=np.array([0, 1, 0, 1, 0, 1])
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, PredefinedSplit)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# PredefinedSplit, multi-output regression with args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = PredefinedSplit(
test_fold=np.array([0, 1, 0, 1, 0, 1])
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, PredefinedSplit)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# RepeatedKFold, classification with args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = RepeatedKFold(
n_splits=4, n_repeats=3, random_state=5
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, RepeatedKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 4 * 3
)
self.assertEqual(cv.n_repeats, 3)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# RepeatedKFold, classification no args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = RepeatedKFold(n_splits=5, n_repeats=10)
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, RepeatedKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 5 * 10
)
self.assertEqual(cv.n_repeats, 10)
self.assertIsNone(cv.random_state)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# RepeatedKFold, regression with args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = RepeatedKFold(
n_splits=4, n_repeats=3, random_state=5
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, RepeatedKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 4 * 3
)
self.assertEqual(cv.n_repeats, 3)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# RepeatedKFold, regression no args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = RepeatedKFold(n_splits=5, n_repeats=10)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, RepeatedKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 5 * 10
)
self.assertEqual(cv.n_repeats, 10)
self.assertIsNone(cv.random_state)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# RepeatedKFold, multi-output regression with args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = RepeatedKFold(
n_splits=4, n_repeats=3, random_state=5
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, RepeatedKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 4 * 3
)
self.assertEqual(cv.n_repeats, 3)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# RepeatedKFold, multi-output regression no args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = RepeatedKFold(n_splits=5, n_repeats=10)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, RepeatedKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 5 * 10
)
self.assertEqual(cv.n_repeats, 10)
self.assertIsNone(cv.random_state)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# RepeatedStratifiedKFold, classification with args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = RepeatedStratifiedKFold(
n_splits=2, n_repeats=3, random_state=5
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, RepeatedStratifiedKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 2 * 3
)
self.assertEqual(cv.n_repeats, 3)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# RepeatedStratifiedKFold, classification no args
D.data["Y_train"] = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
D.data["X_train"] = D.data["Y_train"]
evaluator = TrainEvaluator()
evaluator.resampling_strategy = RepeatedStratifiedKFold(
n_splits=5, n_repeats=10
)
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, RepeatedStratifiedKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 5 * 10
)
self.assertEqual(cv.n_repeats, 10)
self.assertIsNone(cv.random_state)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# StratifiedKFold, classification with args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
D.data["X_train"] = D.data["Y_train"]
evaluator = TrainEvaluator()
evaluator.resampling_strategy = StratifiedKFold
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = StratifiedKFold(
n_splits=2, shuffle=True, random_state=5
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, StratifiedKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 2
)
self.assertTrue(cv.shuffle)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# StratifiedKFold, classification no args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = StratifiedKFold(n_splits=3, shuffle=False)
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, StratifiedKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 3
)
self.assertFalse(cv.shuffle)
self.assertIsNone(cv.random_state)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# TimeSeriesSplit, multi-output regression with args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = TimeSeriesSplit(n_splits=4, max_train_size=3)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, TimeSeriesSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 4
)
self.assertEqual(cv.max_train_size, 3)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# TimeSeriesSplit, multi-output regression with args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = TimeSeriesSplit(n_splits=3)
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, TimeSeriesSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 3
)
self.assertIsNone(cv.max_train_size)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# TimeSeriesSplit, regression with args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = TimeSeriesSplit(n_splits=4, max_train_size=3)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, TimeSeriesSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 4
)
self.assertEqual(cv.max_train_size, 3)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# TimeSeriesSplit, regression no args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = TimeSeriesSplit(n_splits=3)
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, TimeSeriesSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 3
)
self.assertIsNone(cv.max_train_size)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# StratifiedKFold, classification no args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = StratifiedKFold(n_splits=3)
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, StratifiedKFold)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 3
)
self.assertFalse(cv.shuffle)
self.assertIsNone(cv.random_state)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# GroupShuffleSplit, classification with args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = {"groups": np.array([1, 1, 2, 1, 2, 2])}
evaluator.resampling_strategy = GroupShuffleSplit(
n_splits=2, test_size=0.3, random_state=5
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, GroupShuffleSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 2
)
self.assertEqual(cv.test_size, 0.3)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# GroupShuffleSplit, classification no args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = GroupShuffleSplit(n_splits=5)
evaluator.resampling_strategy_args = None
self.assertRaisesRegex(
ValueError,
"The 'groups' parameter should not be None",
evaluator.get_splitter,
D,
)
# GroupShuffleSplit, regression with args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = {"groups": np.array([1, 1, 2, 1, 2, 2])}
evaluator.resampling_strategy = GroupShuffleSplit(
n_splits=2, test_size=0.3, random_state=5
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, GroupShuffleSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 2
)
self.assertEqual(cv.test_size, 0.3)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# GroupShuffleSplit, regression no args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = GroupShuffleSplit(n_splits=5)
evaluator.resampling_strategy_args = None
self.assertRaisesRegex(
ValueError,
"The 'groups' parameter should not be None",
evaluator.get_splitter,
D,
)
# GroupShuffleSplit, multi-output regression with args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = {"groups": np.array([1, 1, 2, 1, 2, 2])}
evaluator.resampling_strategy = GroupShuffleSplit(
n_splits=2, test_size=0.3, random_state=5
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, GroupShuffleSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 2
)
self.assertEqual(cv.test_size, 0.3)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# GroupShuffleSplit, multi-output regression no args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = GroupShuffleSplit(n_splits=5)
evaluator.resampling_strategy_args = None
self.assertRaisesRegex(
ValueError,
"The 'groups' parameter should not be None",
evaluator.get_splitter,
D,
)
# StratifiedShuffleSplit, classification with args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = StratifiedShuffleSplit(
n_splits=2, test_size=0.3, random_state=5
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, StratifiedShuffleSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 2
)
self.assertEqual(cv.test_size, 0.3)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# StratifiedShuffleSplit, classification no args
D.data["Y_train"] = np.array(
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]
)
D.data["X_train"] = D.data["Y_train"]
evaluator = TrainEvaluator()
evaluator.resampling_strategy = StratifiedShuffleSplit(n_splits=10)
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, StratifiedShuffleSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 10
)
self.assertIsNone(cv.test_size)
self.assertIsNone(cv.random_state)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# ShuffleSplit, classification with args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
D.data["X_train"] = D.data["Y_train"]
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = ShuffleSplit(
n_splits=2, test_size=0.3, random_state=5
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, ShuffleSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 2
)
self.assertEqual(cv.test_size, 0.3)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# ShuffleSplit, classification no args
D.data["Y_train"] = np.array([0, 0, 0, 1, 1, 1])
evaluator = TrainEvaluator()
evaluator.resampling_strategy = ShuffleSplit(n_splits=10)
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, ShuffleSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 10
)
self.assertIsNone(cv.test_size)
self.assertIsNone(cv.random_state)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# ShuffleSplit, regression with args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = ShuffleSplit(
n_splits=2, test_size=0.3, random_state=5
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, ShuffleSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 2
)
self.assertEqual(cv.test_size, 0.3)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# ShuffleSplit, regression no args
D.data["Y_train"] = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
D.info["task"] = REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = ShuffleSplit(n_splits=10)
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, ShuffleSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 10
)
self.assertIsNone(cv.test_size)
self.assertIsNone(cv.random_state)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# ShuffleSplit, multi-output regression with args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy_args = None
evaluator.resampling_strategy = ShuffleSplit(
n_splits=2, test_size=0.3, random_state=5
)
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, ShuffleSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 2
)
self.assertEqual(cv.test_size, 0.3)
self.assertEqual(cv.random_state, 5)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
# ShuffleSplit, multi-output regression no args
D.data["Y_train"] = np.array(
[[0.0, 0.1], [0.2, 0.3], [0.4, 0.5], [1.0, 1.1], [1.2, 1.3], [1.4, 1.5]]
)
D.info["task"] = MULTIOUTPUT_REGRESSION
evaluator = TrainEvaluator()
evaluator.resampling_strategy = ShuffleSplit(n_splits=10)
evaluator.resampling_strategy_args = None
cv = evaluator.get_splitter(D)
self.assertIsInstance(cv, ShuffleSplit)
self.assertEqual(
cv.get_n_splits(groups=evaluator.resampling_strategy_args["groups"]), 10
)
self.assertIsNone(cv.test_size)
self.assertIsNone(cv.random_state)
next(
cv.split(
D.data["Y_train"],
D.data["Y_train"],
groups=evaluator.resampling_strategy_args["groups"],
)
)
@unittest.mock.patch.object(TrainEvaluator, "__init__")
def test_holdout_split_size(self, te_mock):
te_mock.return_value = None
D = unittest.mock.Mock(spec=AbstractDataManager)
D.feat_type = {}
evaluator = TrainEvaluator()
evaluator.resampling_strategy = "holdout"
# Exact Ratio
D.data = dict(Y_train=np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]))
D.info = dict(task=BINARY_CLASSIFICATION)
evaluator.resampling_strategy_args = {"shuffle": True, "train_size": 0.7}
cv = evaluator.get_splitter(D)
self.assertEqual(cv.get_n_splits(), 1)
train_samples, test_samples = next(
cv.split(D.data["Y_train"], D.data["Y_train"])
)
self.assertEqual(len(train_samples), 7)
self.assertEqual(len(test_samples), 3)
# No Shuffle
evaluator.resampling_strategy_args = {"shuffle": False, "train_size": 0.7}
cv = evaluator.get_splitter(D)
self.assertEqual(cv.get_n_splits(), 1)
train_samples, test_samples = next(
cv.split(D.data["Y_train"], D.data["Y_train"])
)
self.assertEqual(len(train_samples), 7)
self.assertEqual(len(test_samples), 3)
# Rounded Ratio
D.data = dict(Y_train=np.array([0, 0, 0, 0, 0, 1, 1, 1, 1]))
evaluator.resampling_strategy_args = {"shuffle": True, "train_size": 0.7}
cv = evaluator.get_splitter(D)
self.assertEqual(cv.get_n_splits(), 1)
train_samples, test_samples = next(
cv.split(D.data["Y_train"], D.data["Y_train"])
)
self.assertEqual(len(train_samples), 6)
self.assertEqual(len(test_samples), 3)
# Rounded Ratio No Shuffle
evaluator.resampling_strategy_args = {"shuffle": False, "train_size": 0.7}
cv = evaluator.get_splitter(D)
self.assertEqual(cv.get_n_splits(), 1)
train_samples, test_samples = next(
cv.split(D.data["Y_train"], D.data["Y_train"])
)
self.assertEqual(len(train_samples), 6)
self.assertEqual(len(test_samples), 3)
# More data
evaluator.resampling_strategy_args = {"shuffle": True, "train_size": 0.7}
D.data = dict(Y_train=np.zeros((900, 1)))
cv = evaluator.get_splitter(D)
self.assertEqual(cv.get_n_splits(), 1)
train_samples, test_samples = next(
cv.split(D.data["Y_train"], D.data["Y_train"])
)
self.assertEqual(len(train_samples), 630)
self.assertEqual(len(test_samples), 270)
evaluator.resampling_strategy_args = {"train_size": 0.752}
D.data = dict(Y_train=np.zeros((900, 1)))
cv = evaluator.get_splitter(D)
self.assertEqual(cv.get_n_splits(), 1)
train_samples, test_samples = next(
cv.split(D.data["Y_train"], D.data["Y_train"])
)
self.assertEqual(len(train_samples), 676)
self.assertEqual(len(test_samples), 224)
# Multilabel Exact Ratio
D.data = dict(
Y_train=np.array(
[
[0, 0],
[0, 1],
[1, 1],
[1, 0],
[1, 1],
[1, 1],
[1, 1],
[1, 0],
[1, 1],
[1, 1],
]
)
)
D.info = dict(task=MULTILABEL_CLASSIFICATION)
evaluator.resampling_strategy_args = {"shuffle": True, "train_size": 0.7}
cv = evaluator.get_splitter(D)
self.assertEqual(cv.get_n_splits(), 1)
train_samples, test_samples = next(
cv.split(D.data["Y_train"], D.data["Y_train"])
)
self.assertEqual(len(train_samples), 7)
self.assertEqual(len(test_samples), 3)
# Multilabel No Shuffle
D.data = dict(
Y_train=np.array(
[[0, 0], [0, 1], [1, 1], [1, 0], [1, 1], [1, 1], [1, 1], [1, 0], [1, 1]]
)
)
evaluator.resampling_strategy_args = {"shuffle": False, "train_size": 0.7}
cv = evaluator.get_splitter(D)
self.assertEqual(cv.get_n_splits(), 1)
train_samples, test_samples = next(
cv.split(D.data["Y_train"], D.data["Y_train"])
)
self.assertEqual(len(train_samples), 6)
self.assertEqual(len(test_samples), 3)
|
TestTrainEvaluator
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-mentions-per-user.py
|
{
"start": 75,
"end": 947
}
|
class ____(object):
def countMentions(self, numberOfUsers, events):
"""
:type numberOfUsers: int
:type events: List[List[str]]
:rtype: List[int]
"""
result = [0]*numberOfUsers
lookup = [1]*numberOfUsers
events.sort(key=lambda x: (int(x[1]), x[0] == "MESSAGE"))
for m, t, s in events:
if m == "OFFLINE":
lookup[int(s)] = int(t)+60
continue
if s == "ALL":
for i in xrange(len(lookup)):
result[i] += 1
elif s == "HERE":
for i in xrange(len(lookup)):
if lookup[i] <= int(t):
result[i] += 1
else:
for idx in s.split():
result[int(idx[2:])] += 1
return result
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/mobilebert/modeling_mobilebert.py
|
{
"start": 7188,
"end": 9312
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size**-0.5
self.query = nn.Linear(config.true_hidden_size, self.all_head_size)
self.key = nn.Linear(config.true_hidden_size, self.all_head_size)
self.value = nn.Linear(
config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size
)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_causal = False
def forward(
self,
query_tensor: torch.Tensor,
key_tensor: torch.Tensor,
value_tensor: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
input_shape = query_tensor.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
# get all proj
query_layer = self.query(query_tensor).view(*hidden_shape).transpose(1, 2)
key_layer = self.key(key_tensor).view(*hidden_shape).transpose(1, 2)
value_layer = self.value(value_tensor).view(*hidden_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
dropout=0.0 if not self.training else self.dropout.p,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
return attn_output, attn_weights
|
MobileBertSelfAttention
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-gcp/tests/test_cloud_run_worker_v2_filtering.py
|
{
"start": 1543,
"end": 14269
}
|
class ____:
def test_populate_env_filters_plaintext_api_key_when_secret_configured(
self, cloud_run_worker_v2_job_config
):
# Add plaintext API key to env
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
cloud_run_worker_v2_job_config.prefect_api_key_secret = SecretKeySelector(
secret="prefect-api-key", version="latest"
)
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext version
assert {"name": "PREFECT_API_KEY", "value": "plaintext-api-key"} not in env_vars
# Should contain secret version
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {"secret": "prefect-api-key", "version": "latest"}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_filters_plaintext_auth_string_when_secret_configured(
self, cloud_run_worker_v2_job_config
):
# Add plaintext auth string to env
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
cloud_run_worker_v2_job_config.prefect_api_auth_string_secret = (
SecretKeySelector(secret="prefect-auth-string", version="latest")
)
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext version
assert {
"name": "PREFECT_API_AUTH_STRING",
"value": "plaintext-auth-string",
} not in env_vars
# Should contain secret version
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {"secret": "prefect-auth-string", "version": "latest"}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_filters_both_plaintext_when_secrets_configured(
self, cloud_run_worker_v2_job_config
):
# Add plaintext versions to env
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
cloud_run_worker_v2_job_config.prefect_api_key_secret = SecretKeySelector(
secret="prefect-api-key", version="latest"
)
cloud_run_worker_v2_job_config.prefect_api_auth_string_secret = (
SecretKeySelector(secret="prefect-auth-string", version="latest")
)
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext versions
assert {"name": "PREFECT_API_KEY", "value": "plaintext-api-key"} not in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"value": "plaintext-auth-string",
} not in env_vars
# Should contain secret versions
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {"secret": "prefect-api-key", "version": "latest"}
},
} in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {"secret": "prefect-auth-string", "version": "latest"}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_keeps_plaintext_when_no_secrets_configured(
self, cloud_run_worker_v2_job_config
):
# Add plaintext versions to env but don't configure secrets
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should contain plaintext versions since no secrets are configured
assert {"name": "PREFECT_API_KEY", "value": "plaintext-api-key"} in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"value": "plaintext-auth-string",
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_filters_api_key_when_configured_via_env_from_secrets(
self, cloud_run_worker_v2_job_config
):
# Add plaintext API key to env
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
# Configure API key via env_from_secrets instead of dedicated field
cloud_run_worker_v2_job_config.env_from_secrets = {
"PREFECT_API_KEY": SecretKeySelector(
secret="prefect-api-key", version="latest"
)
}
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext version
assert {"name": "PREFECT_API_KEY", "value": "plaintext-api-key"} not in env_vars
# Should contain secret version from env_from_secrets
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {"secret": "prefect-api-key", "version": "latest"}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_filters_auth_string_when_configured_via_env_from_secrets(
self, cloud_run_worker_v2_job_config
):
# Add plaintext auth string to env
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
# Configure auth string via env_from_secrets instead of dedicated field
cloud_run_worker_v2_job_config.env_from_secrets = {
"PREFECT_API_AUTH_STRING": SecretKeySelector(
secret="prefect-auth-string", version="latest"
)
}
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext version
assert {
"name": "PREFECT_API_AUTH_STRING",
"value": "plaintext-auth-string",
} not in env_vars
# Should contain secret version from env_from_secrets
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {"secret": "prefect-auth-string", "version": "latest"}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_filters_both_when_configured_via_env_from_secrets(
self, cloud_run_worker_v2_job_config
):
# Add plaintext versions to env
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
# Configure both via env_from_secrets
cloud_run_worker_v2_job_config.env_from_secrets = {
"PREFECT_API_KEY": SecretKeySelector(
secret="prefect-api-key", version="latest"
),
"PREFECT_API_AUTH_STRING": SecretKeySelector(
secret="prefect-auth-string", version="latest"
),
}
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext versions
assert {"name": "PREFECT_API_KEY", "value": "plaintext-api-key"} not in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"value": "plaintext-auth-string",
} not in env_vars
# Should contain secret versions from env_from_secrets
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {"secret": "prefect-api-key", "version": "latest"}
},
} in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {"secret": "prefect-auth-string", "version": "latest"}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_prioritizes_dedicated_secret_fields_over_env_from_secrets(
self, cloud_run_worker_v2_job_config
):
# Add plaintext versions to env
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
# Configure via both dedicated fields and env_from_secrets
cloud_run_worker_v2_job_config.prefect_api_key_secret = SecretKeySelector(
secret="dedicated-api-key", version="latest"
)
cloud_run_worker_v2_job_config.prefect_api_auth_string_secret = (
SecretKeySelector(secret="dedicated-auth-string", version="latest")
)
cloud_run_worker_v2_job_config.env_from_secrets = {
"PREFECT_API_KEY": SecretKeySelector(
secret="env-from-secrets-api-key", version="latest"
),
"PREFECT_API_AUTH_STRING": SecretKeySelector(
secret="env-from-secrets-auth-string", version="latest"
),
}
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext versions
assert {"name": "PREFECT_API_KEY", "value": "plaintext-api-key"} not in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"value": "plaintext-auth-string",
} not in env_vars
# Should contain dedicated field secrets (should be added after env_from_secrets)
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {"secret": "dedicated-api-key", "version": "latest"}
},
} in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {"secret": "dedicated-auth-string", "version": "latest"}
},
} in env_vars
# Should also contain env_from_secrets versions
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {
"secret": "env-from-secrets-api-key",
"version": "latest",
}
},
} in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {
"secret": "env-from-secrets-auth-string",
"version": "latest",
}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
|
TestCloudRunWorkerJobV2ConfigurationFiltering
|
python
|
django__django
|
django/core/management/color.py
|
{
"start": 1896,
"end": 3168
}
|
class ____:
pass
def make_style(config_string=""):
"""
Create a Style object from the given config_string.
If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used.
"""
style = Style()
color_settings = termcolors.parse_color_setting(config_string)
# The nocolor palette has all available roles.
# Use that palette as the basis for populating
# the palette as defined in the environment.
for role in termcolors.PALETTES[termcolors.NOCOLOR_PALETTE]:
if color_settings:
format = color_settings.get(role, {})
style_func = termcolors.make_style(**format)
else:
def style_func(x):
return x
setattr(style, role, style_func)
# For backwards compatibility,
# set style for ERROR_OUTPUT == ERROR
style.ERROR_OUTPUT = style.ERROR
return style
@functools.cache
def no_style():
"""
Return a Style object with no color scheme.
"""
return make_style("nocolor")
def color_style(force_color=False):
"""
Return a Style object from the Django color scheme.
"""
if not force_color and not supports_color():
return no_style()
return make_style(os.environ.get("DJANGO_COLORS", ""))
|
Style
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/matchClass7.py
|
{
"start": 136,
"end": 311
}
|
class ____:
val: str
def func1(val: DC1):
result = val
match result:
case DC1(result):
reveal_type(result, expected_text="str")
@dataclass
|
DC1
|
python
|
pytorch__pytorch
|
torch/_inductor/config.py
|
{
"start": 88498,
"end": 90035
}
|
class ____:
force_extern_kernel_in_multi_template: bool = False
max_mm_configs: Optional[int] = None
runtime_triton_dtype_assert = False
runtime_triton_shape_assert = False
static_cpp_dtype_assert = False
# regex to control the set of considered autotuning
# choices (aka configs) by name and / or description
autotune_choice_name_regex: Optional[str] = None
autotune_choice_desc_regex: Optional[str] = None
graphsafe_rng_func_ignores_fallback_random = False
track_memory_lifecycle: Optional[Literal["assert", "log"]] = None
# If set to True, AOTI-generated CMakelists.txt will still use libtorch
# for unit testing
use_libtorch = False
# Assume bucketing reduces latency (mostly for testing)
assume_bucketing_reduces_latency: bool = True
# A test config to ease the test for perf of reduction config filtering
force_filter_reduction_configs = (
os.getenv("TORCHINDUCTOR_FORCE_FILTER_REDUCTION_CONFIGS") == "1"
)
# a testing config to distort benchmarking result
# - empty string to disable
# - "inverse" to inverse the numbers
# - "random" return a random value
distort_benchmarking_result = os.getenv(
"TORCHINDUCTOR_DISTORT_BENCHMARKING_RESULT", ""
)
bisect_pre_grad_graph = False
bisect_keep_custom_backend_for_inductor = False
if TYPE_CHECKING:
from torch.utils._config_typing import * # noqa: F401, F403
# adds patch, save_config, etc
install_config_module(sys.modules[__name__])
|
test_configs
|
python
|
ansible__ansible
|
lib/ansible/utils/collection_loader/_collection_finder.py
|
{
"start": 14375,
"end": 18327
}
|
class ____:
def __init__(self, collection_finder, pathctx):
# when called from a path_hook, find_module doesn't usually get the path arg, so this provides our context
self._pathctx = _to_text(pathctx)
self._collection_finder = collection_finder
# cache the native FileFinder (take advantage of its filesystem cache for future find/load requests)
self._file_finder = None
# class init is fun- this method has a self arg that won't get used
def _get_filefinder_path_hook(self=None):
_file_finder_hook = None
# try to find the FileFinder hook to call for fallback path-based imports in Py3
_file_finder_hook = [ph for ph in sys.path_hooks if 'FileFinder' in repr(ph)]
if len(_file_finder_hook) != 1:
raise Exception('need exactly one FileFinder import hook (found {0})'.format(len(_file_finder_hook)))
_file_finder_hook = _file_finder_hook[0]
return _file_finder_hook
_filefinder_path_hook = _get_filefinder_path_hook()
def _get_finder(self, fullname):
split_name = fullname.split('.')
toplevel_pkg = split_name[0]
if toplevel_pkg == 'ansible_collections':
# collections content? delegate to the collection finder
return self._collection_finder
else:
# Something else; we'd normally restrict this to `ansible` descendent modules so that any weird loader
# behavior that arbitrary Python modules have can be serviced by those loaders. In some dev/test
# scenarios (eg a venv under a collection) our path_hook signs us up to load non-Ansible things, and
# it's too late by the time we've reached this point, but also too expensive for the path_hook to figure
# out what we *shouldn't* be loading with the limited info it has. So we'll just delegate to the
# normal path-based loader as best we can to service it. This also allows us to take advantage of Python's
# built-in FS caching and byte-compilation for most things.
# create or consult our cached file finder for this path
if not self._file_finder:
try:
self._file_finder = _AnsiblePathHookFinder._filefinder_path_hook(self._pathctx)
except ImportError:
# FUTURE: log at a high logging level? This is normal for things like python36.zip on the path, but
# might not be in some other situation...
return None
return self._file_finder
def find_module(self, fullname, path=None):
# we ignore the passed in path here- use what we got from the path hook init
finder = self._get_finder(fullname)
if finder is None:
return None
elif isinstance(finder, FileFinder):
# this codepath is erroneously used under some cases in py3,
# and the find_module method on FileFinder does not accept the path arg
# see https://github.com/pypa/setuptools/pull/2918
return finder.find_module(fullname)
else:
return finder.find_module(fullname, path=[self._pathctx])
def find_spec(self, fullname, target=None):
split_name = fullname.split('.')
toplevel_pkg = split_name[0]
finder = self._get_finder(fullname)
if finder is None:
return None
elif toplevel_pkg == 'ansible_collections':
return finder.find_spec(fullname, path=[self._pathctx])
else:
return finder.find_spec(fullname)
def iter_modules(self, prefix):
# NB: this currently represents only what's on disk, and does not handle package redirection
return _iter_modules_impl([self._pathctx], prefix)
def __repr__(self):
return "{0}(path='{1}')".format(self.__class__.__name__, self._pathctx)
|
_AnsiblePathHookFinder
|
python
|
pytorch__pytorch
|
torch/_inductor/tiling_utils.py
|
{
"start": 21600,
"end": 21836
}
|
class ____:
"""
Tiling of a var by `tiling_factor` that yields additional coalesced mem accesses by `benefit_score`
"""
var: sympy.Symbol
tiling_factor: int
score: int
@dataclasses.dataclass(frozen=True)
|
VarTiling
|
python
|
fastai__fastai
|
fastai/callback/rnn.py
|
{
"start": 396,
"end": 884
}
|
class ____(Callback):
"`Callback` that resets the model at each validation/training step"
def before_train(self): self.model.reset()
def before_validate(self): self.model.reset()
def after_fit(self): self.model.reset()
_docs = dict(before_train="Reset the model before training",
before_validate="Reset the model before validation",
after_fit="Reset the model after fitting")
# %% ../../nbs/34_callback.rnn.ipynb 6
|
ModelResetter
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py
|
{
"start": 962,
"end": 6266
}
|
class ____:
"""Finder to locate distributions.
The main purpose of this class is to memoize found distributions' names, so
only one distribution is returned for each package name. At lot of pip code
assumes this (because it is setuptools's behavior), and not doing the same
can potentially cause a distribution in lower precedence path to override a
higher precedence one if the caller is not careful.
Eventually we probably want to make it possible to see lower precedence
installations as well. It's useful feature, after all.
"""
FoundResult = Tuple[importlib.metadata.Distribution, Optional[BasePath]]
def __init__(self) -> None:
self._found_names: Set[NormalizedName] = set()
def _find_impl(self, location: str) -> Iterator[FoundResult]:
"""Find distributions in a location."""
# Skip looking inside a wheel. Since a package inside a wheel is not
# always valid (due to .data directories etc.), its .dist-info entry
# should not be considered an installed distribution.
if _looks_like_wheel(location):
return
# To know exactly where we find a distribution, we have to feed in the
# paths one by one, instead of dumping the list to importlib.metadata.
for dist in importlib.metadata.distributions(path=[location]):
info_location = get_info_location(dist)
try:
name = get_dist_canonical_name(dist)
except BadMetadata as e:
logger.warning("Skipping %s due to %s", info_location, e.reason)
continue
if name in self._found_names:
continue
self._found_names.add(name)
yield dist, info_location
def find(self, location: str) -> Iterator[BaseDistribution]:
"""Find distributions in a location.
The path can be either a directory, or a ZIP archive.
"""
for dist, info_location in self._find_impl(location):
if info_location is None:
installed_location: Optional[BasePath] = None
else:
installed_location = info_location.parent
yield Distribution(dist, info_location, installed_location)
def find_linked(self, location: str) -> Iterator[BaseDistribution]:
"""Read location in egg-link files and return distributions in there.
The path should be a directory; otherwise this returns nothing. This
follows how setuptools does this for compatibility. The first non-empty
line in the egg-link is read as a path (resolved against the egg-link's
containing directory if relative). Distributions found at that linked
location are returned.
"""
path = pathlib.Path(location)
if not path.is_dir():
return
for child in path.iterdir():
if child.suffix != ".egg-link":
continue
with child.open() as f:
lines = (line.strip() for line in f)
target_rel = next((line for line in lines if line), "")
if not target_rel:
continue
target_location = str(path.joinpath(target_rel))
for dist, info_location in self._find_impl(target_location):
yield Distribution(dist, info_location, path)
def _find_eggs_in_dir(self, location: str) -> Iterator[BaseDistribution]:
from pip._vendor.pkg_resources import find_distributions
from pip._internal.metadata import pkg_resources as legacy
with os.scandir(location) as it:
for entry in it:
if not entry.name.endswith(".egg"):
continue
for dist in find_distributions(entry.path):
yield legacy.Distribution(dist)
def _find_eggs_in_zip(self, location: str) -> Iterator[BaseDistribution]:
from pip._vendor.pkg_resources import find_eggs_in_zip
from pip._internal.metadata import pkg_resources as legacy
try:
importer = zipimport.zipimporter(location)
except zipimport.ZipImportError:
return
for dist in find_eggs_in_zip(importer, location):
yield legacy.Distribution(dist)
def find_eggs(self, location: str) -> Iterator[BaseDistribution]:
"""Find eggs in a location.
This actually uses the old *pkg_resources* backend. We likely want to
deprecate this so we can eventually remove the *pkg_resources*
dependency entirely. Before that, this should first emit a deprecation
warning for some versions when using the fallback since importing
*pkg_resources* is slow for those who don't need it.
"""
if os.path.isdir(location):
yield from self._find_eggs_in_dir(location)
if zipfile.is_zipfile(location):
yield from self._find_eggs_in_zip(location)
@functools.lru_cache(maxsize=None) # Warn a distribution exactly once.
def _emit_egg_deprecation(location: Optional[str]) -> None:
deprecated(
reason=f"Loading egg at {location} is deprecated.",
replacement="to use pip for package installation",
gone_in="24.3",
issue=12330,
)
|
_DistributionFinder
|
python
|
getsentry__sentry
|
tests/sentry/replays/endpoints/test_project_replay_clicks_index.py
|
{
"start": 271,
"end": 11067
}
|
class ____(APITestCase, ReplaysSnubaTestCase):
endpoint = "sentry-api-0-project-replay-clicks-index"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.replay_id = uuid4().hex
self.url = reverse(
self.endpoint, args=(self.organization.slug, self.project.slug, self.replay_id)
)
def test_feature_flag_disabled(self) -> None:
response = self.client.get(self.url)
assert response.status_code == 404
def test_invalid_uuid_404s(self) -> None:
with self.feature(REPLAYS_FEATURES):
url = reverse(self.endpoint, args=(self.organization.slug, self.project.slug, "abc"))
response = self.client.get(url)
assert response.status_code == 404
def test_get_replay_multiple_selectors(self) -> None:
"""Test only one replay returned."""
replay1_id = self.replay_id
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=10)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, self.project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, self.project.id, replay1_id))
self.store_replays(
mock_replay_click(
seq1_timestamp,
self.project.id,
replay1_id,
node_id=1,
tag="div",
id="myid",
class_=["class1", "class2"],
role="button",
testid="1",
alt="Alt",
aria_label="AriaLabel",
title="MyTitle",
text="Hello",
)
)
self.store_replays(
mock_replay_click(
seq2_timestamp,
self.project.id,
replay1_id,
node_id=2,
tag="button",
id="myid",
alt="NotAlt",
class_=["class1", "class3"],
)
)
with self.feature(REPLAYS_FEATURES):
# Assert a node was returned.
response = self.client.get(self.url + "?query=click.tag:div click.tag:button")
assert response.status_code == 200
response_data = response.json()["data"]
assert len(response_data) == 2
assert response_data[0]["node_id"] == 1
assert response_data[1]["node_id"] == 2
def test_get_replays_filter_clicks(self) -> None:
"""Test replays conform to the interchange format."""
replay1_id = self.replay_id
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, self.project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, self.project.id, replay1_id))
self.store_replays(
mock_replay_click(
seq2_timestamp,
self.project.id,
replay1_id,
node_id=1,
tag="div",
id="id1",
class_=["class1", "class2"],
component_name="SignUpForm",
role="button",
testid="1",
alt="Alt",
aria_label="AriaLabel",
title="MyTitle",
text="Hello",
)
)
self.store_replays(
mock_replay_click(
seq2_timestamp,
self.project.id,
replay1_id,
node_id=2,
tag="button",
id="id2",
class_=["class1", "class3"],
)
)
with self.feature(REPLAYS_FEATURES):
queries = [
"click.alt:Alt",
"click.class:class2",
"click.class:class3",
"click.id:id1",
"click.label:AriaLabel",
"click.role:button",
"click.tag:div",
"click.tag:button",
"click.testid:1",
"click.textContent:Hello",
"click.title:MyTitle",
"click.selector:div",
"click.selector:div#id1",
"click.selector:div[alt=Alt]",
"click.selector:div[data-sentry-component=SignUpForm]",
"click.selector:div[title=MyTitle]",
"click.selector:div[data-testid='1']",
"click.selector:div[role=button]",
"click.selector:div#id1.class1.class2",
# Single quotes around attribute value.
"click.selector:div[role='button']",
"click.selector:div#id1.class1.class2[role=button][aria-label='AriaLabel'][data-sentry-component=SignUpForm]",
]
for query in queries:
response = self.client.get(self.url + f"?query={query}")
assert response.status_code == 200, query
response_data = response.json()
assert len(response_data["data"]) == 1, query
queries = [
"click.alt:NotAlt",
"click.class:class4",
"click.id:other",
"click.label:NotAriaLabel",
"click.role:form",
"click.tag:header",
"click.testid:2",
"click.textContent:World",
"click.title:NotMyTitle",
# "!click.selector:div#myid",
"click.selector:div#notmyid",
# Assert all classes must match.
"click.selector:div#myid.class1.class2.class3",
# Invalid selectors return no rows.
"click.selector:$#%^#%",
# Integer type role values are not allowed and must be wrapped in single quotes.
"click.selector:div[title=1]",
]
for query in queries:
response = self.client.get(self.url + f"?query={query}")
assert response.status_code == 200, query
response_data = response.json()
assert len(response_data["data"]) == 0, query
def test_get_replays_filter_clicks_not_selector(self) -> None:
replay1_id = self.replay_id
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=22)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, self.project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, self.project.id, replay1_id))
self.store_replays(
mock_replay_click(
seq2_timestamp,
self.project.id,
replay1_id,
node_id=1,
tag="div",
id="id1",
class_=["class1", "class2"],
role="button",
testid="1",
alt="Alt",
aria_label="AriaLabel",
title="MyTitle",
text="Hello",
)
)
self.store_replays(
mock_replay_click(
seq2_timestamp,
self.project.id,
replay1_id,
node_id=2,
tag="button",
id="id2",
class_=["class1", "class3"],
)
)
with self.feature(REPLAYS_FEATURES):
# Assert `NOT` selectors match every click.
response = self.client.get(self.url + "?query=!click.selector:div#myid")
assert response.status_code == 200
response_data = response.json()
assert len(response_data["data"]) == 2
def test_get_replay_explicit_and_to_implicit_or(self) -> None:
"""Test explicit AND operation are implicitly converted to OR operations."""
replay1_id = self.replay_id
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=10)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, self.project.id, replay1_id))
self.store_replays(mock_replay(seq2_timestamp, self.project.id, replay1_id))
self.store_replays(
mock_replay_click(
seq1_timestamp,
self.project.id,
replay1_id,
node_id=1,
tag="div",
id="myid",
class_=["class1", "class2"],
role="button",
testid="1",
alt="Alt",
aria_label="AriaLabel",
title="MyTitle",
text="Hello",
)
)
self.store_replays(
mock_replay_click(
seq2_timestamp,
self.project.id,
replay1_id,
node_id=2,
tag="button",
id="myid",
alt="NotAlt",
class_=["class1", "class3"],
)
)
with self.feature(REPLAYS_FEATURES):
# Explicit AND becomes logical OR
response = self.client.get(self.url + "?query=click.tag:div AND click.tag:button")
assert response.status_code == 200
response_data = response.json()["data"]
assert len(response_data) == 2
assert response_data[0]["node_id"] == 1
assert response_data[1]["node_id"] == 2
# ParenExpression implicit AND becomes logical OR
response = self.client.get(self.url + "?query=(click.tag:div click.tag:button)")
assert response.status_code == 200
response_data = response.json()["data"]
assert len(response_data) == 2
assert response_data[0]["node_id"] == 1
assert response_data[1]["node_id"] == 2
# ParenExpression explicit AND becomes logical OR
response = self.client.get(self.url + "?query=(click.tag:div AND click.tag:button)")
assert response.status_code == 200
response_data = response.json()["data"]
assert len(response_data) == 2
assert response_data[0]["node_id"] == 1
assert response_data[1]["node_id"] == 2
def test_get_replays_invalid_filter_field(self) -> None:
"""Test invalid filter fields error."""
with self.feature(REPLAYS_FEATURES):
response = self.client.get(self.url + "?query=abc:123")
assert response.status_code == 400
|
OrganizationReplayDetailsTest
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/test_themes.py
|
{
"start": 1555,
"end": 1752
}
|
class ____(ThemedModel):
another_string = String("world")
FILE_CONTENTS = b"""
attrs:
ThemedModel:
number: 57
SubOfThemedModel:
another_string: "boo"
"""
|
SubOfThemedModel
|
python
|
walkccc__LeetCode
|
solutions/3533. Concatenated Divisibility/3533.py
|
{
"start": 0,
"end": 1217
}
|
class ____:
def concatenatedDivisibility(self, nums: list[int], k: int) -> list[int]:
n = len(nums)
nums.sort()
lengths = [len(str(num)) for num in nums]
pows = [pow(10, length, k) for length in lengths]
@functools.lru_cache(None)
def dp(mask: int, mod: int) -> bool:
"""
Returns True if there is a way to form a number divisible by `k` using the
numbers in `nums`, where nums[i] is used iff `mask & (1 << i)`.
"""
if mask == (1 << n) - 1:
return mod == 0
for i in range(n):
if (mask >> i & 1) == 0:
newMod = (mod * pows[i] + nums[i]) % k
if dp(mask | 1 << i, newMod):
return True
return False
def reconstruct(mask: int, mod: int) -> list[int]:
"""
Reconstructs the numbers that form a number divisible by `k` using the
numbers in `nums`, where nums[i] is used iff `mask & (1 << i)`.
"""
for i in range(n):
if (mask >> i & 1) == 0:
newMod = (mod * pows[i] + nums[i]) % k
if dp(mask | 1 << i, newMod):
return [nums[i]] + reconstruct(mask | 1 << i, newMod)
return []
return reconstruct(0, 0) if dp(0, 0) else []
|
Solution
|
python
|
django__django
|
tests/force_insert_update/models.py
|
{
"start": 468,
"end": 587
}
|
class ____(models.Model):
name = models.IntegerField(primary_key=True)
value = models.IntegerField()
|
WithCustomPK
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/testing/schema.py
|
{
"start": 2460,
"end": 3495
}
|
class ____:
"""Helper to compare types inside of datastructures based on affinity.
E.g.::
eq_(
inspect(connection).get_columns("foo"),
[
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.INTEGER),
"nullable": False,
"default": None,
"autoincrement": False,
},
{
"name": "data",
"type": testing.eq_type_affinity(sqltypes.NullType),
"nullable": True,
"default": None,
"autoincrement": False,
},
],
)
"""
def __init__(self, target):
self.target = sqltypes.to_instance(target)
def __eq__(self, other):
return self.target._type_affinity is other._type_affinity
def __ne__(self, other):
return self.target._type_affinity is not other._type_affinity
|
eq_type_affinity
|
python
|
ray-project__ray
|
python/ray/air/tests/test_integration_comet.py
|
{
"start": 10673,
"end": 12262
}
|
class ____(unittest.TestCase):
def setUp(self):
self.logger = CometLoggerCallback()
self.trials = [
MockTrial({"p1": 1}, "trial_1", 1, "artifact"),
MockTrial({"p1": 2}, "trial_2", 2, "artifact"),
MockTrial({"p1": 2}, "trial_3", 3, "artifact"),
]
def test_not_started_exception(self, experiment):
logger = self.logger
with self.assertRaises(KeyError):
logger.log_trial_end(self.trials[0])
def test_repeat_throws_error(self, experiment):
logger = self.logger
trial = self.trials[0]
logger.log_trial_start(trial)
logger.log_trial_end(trial)
with self.assertRaises(KeyError):
logger.log_trial_end(trial)
def test_log_trial_end(self, experiment):
logger = self.logger
trials = self.trials
method = experiment.return_value.end
# Should not have ended yet
method.assert_not_called()
for trial in trials:
logger.log_trial_start(trial)
logger.log_trial_end(trial)
self.assertEqual(len(method.call_args_list), len(trials))
def test_del(self, experiment):
logger = self.logger
for trial in self.trials:
logger.log_trial_start(trial)
end = experiment.return_value.end
end.assert_not_called()
logger.__del__()
self.assertEqual(len(end.call_args_list), len(self.trials))
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
LogTrialEndTests
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-stripe/unit_tests/integration/test_payment_methods.py
|
{
"start": 3598,
"end": 9898
}
|
class ____(TestCase):
@HttpMocker()
def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
StripeRequestBuilder.customers_endpoint(_ACCOUNT_ID, _CLIENT_SECRET).with_any_query_params().build(),
_customers_response().with_record(_a_customer().with_id("parent_id")).build(),
)
http_mocker.get(
_payment_methods_request("parent_id").with_limit(100).build(),
_payment_methods_response().with_record(_a_payment_method()).with_record(_a_payment_method()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 2
@HttpMocker()
def test_given_two_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
StripeRequestBuilder.customers_endpoint(_ACCOUNT_ID, _CLIENT_SECRET).with_any_query_params().build(),
_customers_response().with_record(_a_customer().with_id("parent_id")).build(),
)
http_mocker.get(
_payment_methods_request("parent_id").with_limit(100).build(),
_payment_methods_response()
.with_pagination()
.with_record(_a_payment_method().with_id("last_record_id_from_first_page"))
.build(),
)
http_mocker.get(
_payment_methods_request("parent_id").with_starting_after("last_record_id_from_first_page").with_limit(100).build(),
_payment_methods_response().with_record(_a_payment_method()).with_record(_a_payment_method()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 3
@HttpMocker()
def test_when_read_then_add_cursor_field(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
StripeRequestBuilder.customers_endpoint(_ACCOUNT_ID, _CLIENT_SECRET).with_any_query_params().build(),
_customers_response().with_record(_a_customer().with_id("parent_id")).build(),
)
http_mocker.get(
_payment_methods_request("parent_id").with_limit(100).build(),
_payment_methods_response().with_record(_a_payment_method()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE).with_lookback_window_in_days(10))
assert output.records[0].record.data["updated"] == output.records[0].record.data["created"]
@HttpMocker()
def test_given_http_status_400_when_read_then_stream_did_not_run(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
StripeRequestBuilder.customers_endpoint(_ACCOUNT_ID, _CLIENT_SECRET).with_any_query_params().build(),
_customers_response().with_record(_a_customer().with_id("parent_id")).build(),
)
http_mocker.get(
_payment_methods_request("parent_id").with_any_query_params().build(),
a_response_with_status(400),
)
output = self._read(_config())
assert_stream_did_not_run(output, _STREAM_NAME, "Your account is not set up to use Issuing")
@HttpMocker()
def test_given_http_status_401_when_read_then_config_error(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
StripeRequestBuilder.customers_endpoint(_ACCOUNT_ID, _CLIENT_SECRET).with_any_query_params().build(),
_customers_response().with_record(_a_customer().with_id("parent_id")).build(),
)
http_mocker.get(
_payment_methods_request("parent_id").with_any_query_params().build(),
a_response_with_status(401),
)
output = self._read(_config(), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
@HttpMocker()
def test_given_rate_limited_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
StripeRequestBuilder.customers_endpoint(_ACCOUNT_ID, _CLIENT_SECRET).with_any_query_params().build(),
_customers_response().with_record(_a_customer().with_id("parent_id")).build(),
)
http_mocker.get(
_payment_methods_request("parent_id").with_any_query_params().build(),
[
a_response_with_status(429),
_payment_methods_response().with_record(_a_payment_method()).build(),
],
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_once_before_200_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
StripeRequestBuilder.customers_endpoint(_ACCOUNT_ID, _CLIENT_SECRET).with_any_query_params().build(),
_customers_response().with_record(_a_customer().with_id("parent_id")).build(),
)
http_mocker.get(
_payment_methods_request("parent_id").with_any_query_params().build(),
[a_response_with_status(500), _payment_methods_response().with_record(_a_payment_method()).build()],
)
output = self._read(_config())
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_when_read_then_raise_config_error(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
StripeRequestBuilder.customers_endpoint(_ACCOUNT_ID, _CLIENT_SECRET).with_any_query_params().build(),
_customers_response().with_record(_a_customer().with_id("parent_id")).build(),
)
http_mocker.get(
_payment_methods_request("parent_id").with_any_query_params().build(),
a_response_with_status(500),
)
with patch.object(HttpStatusErrorHandler, "max_retries", new=0):
output = self._read(_config(), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
def _read(self, config: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput:
return _read(config, SyncMode.full_refresh, expecting_exception=expecting_exception)
@freezegun.freeze_time(_NOW.isoformat())
|
FullRefreshTest
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 90990,
"end": 91066
}
|
class ____(BinOpFrame):
operation = M.le
_operator_repr = "<="
|
LEFrame
|
python
|
palantir__python-language-server
|
versioneer.py
|
{
"start": 15934,
"end": 18956
}
|
class ____(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
|
NotThisMethod
|
python
|
getsentry__sentry
|
src/sentry/utils/sdk_crashes/path_replacer.py
|
{
"start": 48,
"end": 273
}
|
class ____(ABC):
"""
Replaces SDK frame paths with a new path. Runs only for SDK frames.
"""
@abstractmethod
def replace_path(self, path_field: str, path_value: str) -> str | None:
pass
|
PathReplacer
|
python
|
scipy__scipy
|
scipy/fftpack/tests/test_basic.py
|
{
"start": 3626,
"end": 3754
}
|
class ____(_TestFFTBase):
def setup_method(self):
self.cdt = np.complex128
self.rdt = np.float64
|
TestDoubleFFT
|
python
|
prabhupant__python-ds
|
data_structures/binary_trees/sum_of_all_nodes.py
|
{
"start": 46,
"end": 291
}
|
class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def sum_nodes(root):
if root is None:
return 0
return root.val + sum_nodes(root.left) + sum_nodes(root.right)
|
Node
|
python
|
optuna__optuna
|
optuna/cli.py
|
{
"start": 14468,
"end": 16257
}
|
class ____(_BaseCommand):
"""Show a list of studies."""
_study_list_header = [
("name", ""),
("direction", ""),
("n_trials", ""),
("datetime_start", ""),
]
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"-f",
"--format",
type=str,
choices=("value", "json", "table", "yaml"),
default="table",
help="Output format.",
)
parser.add_argument(
"--flatten",
default=False,
action="store_true",
help="Flatten nested columns such as directions.",
)
def take_action(self, parsed_args: Namespace) -> int:
storage = _get_storage(parsed_args.storage, parsed_args.storage_class)
summaries = optuna.get_all_study_summaries(storage, include_best_trial=False)
records = []
for s in summaries:
start = (
s.datetime_start.strftime(_DATETIME_FORMAT)
if s.datetime_start is not None
else None
)
record: dict[tuple[str, str], Any] = {}
record[("name", "")] = s.study_name
record[("direction", "")] = tuple(d.name for d in s.directions)
record[("n_trials", "")] = s.n_trials
record[("datetime_start", "")] = start
record[("user_attrs", "")] = s.user_attrs
records.append(record)
if any(r[("user_attrs", "")] != {} for r in records):
self._study_list_header.append(("user_attrs", ""))
print(
_format_output(
records, self._study_list_header, parsed_args.format, parsed_args.flatten
)
)
return 0
|
_Studies
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/functions.py
|
{
"start": 66208,
"end": 75370
}
|
class ____(VariableTracker):
_nonvar_fields = {
"value",
"reason",
*VariableTracker._nonvar_fields,
}
def __init__(self, value: Any, reason: Optional[str] = None, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.value = value
self.reason = reason
def as_python_constant(self) -> Any:
return self.value
@classmethod
def create_with_source(cls, value: Any, source: Source) -> "SkipFunctionVariable":
# Use closure match guard (i.e. guard on __code__ object instead of
# function id) to avoid guarding on nested functions.
if inspect.getattr_static(value, "_torchdynamo_disable", False):
# For torch._dynamo.disable function, ensure that the original
# function is guarded. Otherwise, the else branch will guard on the
# _dynamo.disable.__code__
guard_on_source = source
guard_on_value = value
while getattr(guard_on_value, "_torchdynamo_orig_callable", False):
guard_on_value = guard_on_value._torchdynamo_orig_callable
guard_on_source = AttrSource(
guard_on_source, "_torchdynamo_orig_callable"
)
guard_on_source.make_guard(GuardBuilder.CLOSURE_MATCH)
elif inspect.isbuiltin(value):
install_guard(source.make_guard(GuardBuilder.BUILTIN_MATCH))
elif not is_wrapper_or_member_descriptor(value):
# These descriptors are not guaranteed to return the same object on
# attribute lookup. They are unlikely to be changed, so we can skip
# guarding them.
install_guard(source.make_guard(GuardBuilder.CLOSURE_MATCH))
return cls(value, source=source)
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
if inspect.getattr_static(self.value, "_torchdynamo_disable", False):
msg = inspect.getattr_static(self.value, "_torchdynamo_disable_msg", None)
unimplemented(
gb_type="Skip calling `torch.compiler.disable()`d function",
context=str(self.value),
explanation=f"Skip calling function `{self.value}` since it was wrapped "
f"with `torch.compiler.disable` (reason: {msg})",
hints=[
"Remove the `torch.compiler.disable` call",
],
)
elif self.value is torch._dynamo.graph_break:
graph_break_msg = kwargs.get("msg")
if graph_break_msg:
graph_break_msg = graph_break_msg.as_python_constant()
unimplemented(
gb_type="Call to `torch._dynamo.graph_break()`",
context=f"Called `torch._dynamo.graph_break()` with args `{args}`, kwargs `{kwargs}`",
explanation=f"User-inserted graph break. Message: {graph_break_msg}",
hints=[
"Remove the `torch._dynamo.graph_break()` call.",
],
)
elif self.value is torch._dynamo.skip_frame:
skip_frame_msg = kwargs.get("msg")
if skip_frame_msg:
skip_frame_msg = skip_frame_msg.as_python_constant()
else:
skip_frame_msg = ""
raise SkipFrame(
format_skip_frame_message(
tx.f_code,
f"Skip frame due to `torch._dynamo.skip_frame()`. Message: {skip_frame_msg}",
)
)
elif self.value is torch._dynamo.step_unsupported:
raise StepUnsupported
else:
if config.dont_skip_tracing:
from .builder import SourcelessBuilder
# re-build the function, attempting to not skip
rebuilt_fn = SourcelessBuilder.create(tx, self.value)
# if we still get SkipFunctionVariable, then we *really* should skip this function
if not isinstance(rebuilt_fn, SkipFunctionVariable):
return rebuilt_fn.call_function(tx, args, kwargs)
qualname = getattr(self.value, "__qualname__", "<unknown qualname>")
module_or = getattr(self.value, "__module__", None)
module_name = "<unknown module>" if module_or is None else str(module_or)
try:
path = inspect.getfile(self.value)
explanation = (
f"Dynamo developers have intentionally marked that the function `{qualname}` "
f"in file `{path}` should not be traced."
)
hints = [
f"Avoid calling the function `{qualname}`.",
]
# TODO improve trace_rules reasoning to provide better hints.
# How do we tell that a function/file should NOT be removed from skip files?
# Do a very basic check for now.
if "_dynamo" not in path:
hints += [
f"Apply `@torch._dynamo.dont_skip_tracing` to the function `{qualname}` "
"to force tracing into the function. "
"More graph breaks may occur as a result of attempting to trace into the function.",
"Please file an issue to PyTorch.",
]
except TypeError:
known_python_builtin_modules = {"_abc", "_warnings"}
if module_or in known_python_builtin_modules:
explanation = (
f"Dynamo does not know how to trace the Python builtin "
f"`{module_name}.{qualname}`."
)
hints = [
"If you are attempting to call a logging function (e.g. `_warnings.warn`), "
"you can try adding it to `torch._dynamo.config.reorderable_logging_functions`.",
"Please file an issue on GitHub "
"so the PyTorch team can add support for it. ",
]
elif module_or is not None and module_or.startswith("optree"):
explanation = f"Dynamo cannot trace optree C/C++ function {module_name}.{qualname}."
hints = [
" Consider using torch.utils._pytree - "
"https://github.com/pytorch/pytorch/blob/main/torch/utils/_pytree.py"
]
# also warn on it because most users won't see the graph break message
torch._dynamo.utils.warn_once(explanation + "\n" + "\n".join(hints))
else:
explanation = (
f"Dynamo does not know how to trace the builtin `{module_name}.{qualname}.` "
f"This function is either a Python builtin (e.g. _warnings.warn) "
f"or a third-party C/C++ Python extension (perhaps created with pybind)."
)
hints = [
"If it is a Python builtin, please file an issue on GitHub "
"so the PyTorch team can add support for it and see the next case for a workaround.",
"If it is a third-party C/C++ Python extension, please "
"either wrap it into a PyTorch-understood custom operator "
"(see https://pytorch.org/tutorials/advanced/custom_ops_landing_page.html "
"for more details) or, if it is traceable, use "
"`torch.compiler.allow_in_graph`.",
]
# also warn on it because most users won't see the graph break message
torch._dynamo.utils.warn_once(explanation + "\n" + "\n".join(hints))
if qualname == "allow_in_graph":
explanation = (
"Found an allow_in_graph decorator to a function which "
"is created inside the parent function that is getting "
"compiled. This is not supported for now."
)
hints = []
reason = self.reason if self.reason else "<missing reason>"
unimplemented(
gb_type="Attempted to call function marked as skipped",
context=f"module: {module_name}, qualname: {qualname}, skip reason: {reason}",
explanation=explanation,
hints=hints,
)
def call_obj_hasattr(
self, tx: "InstructionTranslator", name: str
) -> ConstantVariable:
return variables.ConstantVariable.create(hasattr(self.value, name))
def var_getattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker:
if name in cmp_name_to_op_mapping:
return variables.GetAttrVariable(self, name)
return fn_var_getattr(tx, self.value, self.source, name)
|
SkipFunctionVariable
|
python
|
kamyu104__LeetCode-Solutions
|
Python/moving-stones-until-consecutive.py
|
{
"start": 29,
"end": 421
}
|
class ____(object):
def numMovesStones(self, a, b, c):
"""
:type a: int
:type b: int
:type c: int
:rtype: List[int]
"""
s = [a, b, c]
s.sort()
if s[0]+1 == s[1] and s[1]+1 == s[2]:
return [0, 0]
return [1 if s[0]+2 >= s[1] or s[1]+2 >= s[2] else 2, s[2]-s[0]-2]
# Time: O(1)
# Space: O(1)
|
Solution
|
python
|
python-markdown__markdown
|
tests/test_apis.py
|
{
"start": 2593,
"end": 3897
}
|
class ____(unittest.TestCase):
""" Tests of ConvertFile. """
def setUp(self):
self.saved = sys.stdin, sys.stdout
sys.stdin = StringIO('foo')
sys.stdout = TextIOWrapper(BytesIO())
def tearDown(self):
sys.stdin, sys.stdout = self.saved
def getTempFiles(self, src):
""" Return the file names for two temp files. """
infd, infile = tempfile.mkstemp(suffix='.txt')
with os.fdopen(infd, 'w') as fp:
fp.write(src)
outfd, outfile = tempfile.mkstemp(suffix='.html')
return infile, outfile, outfd
def testFileNames(self):
infile, outfile, outfd = self.getTempFiles('foo')
markdown.markdownFromFile(input=infile, output=outfile)
with os.fdopen(outfd, 'r') as fp:
output = fp.read()
self.assertEqual(output, '<p>foo</p>')
def testFileObjects(self):
infile = BytesIO(bytes('foo', encoding='utf-8'))
outfile = BytesIO()
markdown.markdownFromFile(input=infile, output=outfile)
outfile.seek(0)
self.assertEqual(outfile.read().decode('utf-8'), '<p>foo</p>')
def testStdinStdout(self):
markdown.markdownFromFile()
sys.stdout.seek(0)
self.assertEqual(sys.stdout.read(), '<p>foo</p>')
|
TestConvertFile
|
python
|
ray-project__ray
|
python/ray/_common/tests/test_filters.py
|
{
"start": 127,
"end": 4186
}
|
class ____:
def test_driver_process(self, shutdown_only):
log_context = ["job_id", "worker_id", "node_id"]
filter = CoreContextFilter()
record = logging.makeLogRecord({})
assert filter.filter(record)
# Ray is not initialized so no context except PID which should be available
for attr in log_context:
assert not hasattr(record, attr)
# PID should be available even when Ray is not initialized
assert hasattr(record, "process")
assert hasattr(record, "_ray_timestamp_ns")
ray.init()
record = logging.makeLogRecord({})
assert filter.filter(record)
runtime_context = ray.get_runtime_context()
expected_values = {
"job_id": runtime_context.get_job_id(),
"worker_id": runtime_context.get_worker_id(),
"node_id": runtime_context.get_node_id(),
"process": record.process,
}
for attr in log_context:
assert hasattr(record, attr)
assert getattr(record, attr) == expected_values[attr]
# This is not a worker process, so actor_id and task_id should not exist.
for attr in ["actor_id", "task_id"]:
assert not hasattr(record, attr)
assert hasattr(record, "_ray_timestamp_ns")
def test_task_process(self, shutdown_only):
@ray.remote
def f():
filter = CoreContextFilter()
record = logging.makeLogRecord({})
assert filter.filter(record)
should_exist = ["job_id", "worker_id", "node_id", "task_id", "process"]
runtime_context = ray.get_runtime_context()
expected_values = {
"job_id": runtime_context.get_job_id(),
"worker_id": runtime_context.get_worker_id(),
"node_id": runtime_context.get_node_id(),
"task_id": runtime_context.get_task_id(),
"task_name": runtime_context.get_task_name(),
"task_func_name": runtime_context.get_task_function_name(),
"process": record.process,
}
for attr in should_exist:
assert hasattr(record, attr)
assert getattr(record, attr) == expected_values[attr]
assert not hasattr(record, "actor_id")
assert not hasattr(record, "actor_name")
assert hasattr(record, "_ray_timestamp_ns")
obj_ref = f.remote()
ray.get(obj_ref)
def test_actor_process(self, shutdown_only):
@ray.remote
class A:
def f(self):
filter = CoreContextFilter()
record = logging.makeLogRecord({})
assert filter.filter(record)
should_exist = [
"job_id",
"worker_id",
"node_id",
"actor_id",
"task_id",
"process",
]
runtime_context = ray.get_runtime_context()
expected_values = {
"job_id": runtime_context.get_job_id(),
"worker_id": runtime_context.get_worker_id(),
"node_id": runtime_context.get_node_id(),
"actor_id": runtime_context.get_actor_id(),
"actor_name": runtime_context.get_actor_name(),
"task_id": runtime_context.get_task_id(),
"task_name": runtime_context.get_task_name(),
"task_func_name": runtime_context.get_task_function_name(),
"process": record.process,
}
for attr in should_exist:
assert hasattr(record, attr)
assert getattr(record, attr) == expected_values[attr]
assert hasattr(record, "_ray_timestamp_ns")
actor = A.remote()
ray.get(actor.f.remote())
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
|
TestCoreContextFilter
|
python
|
pytorch__pytorch
|
torch/fx/experimental/symbolic_shapes.py
|
{
"start": 143017,
"end": 328557
}
|
class ____:
# This is a wrapper over the actual __init__ function.
#
# Where to add a new constructor parameter to ShapeEnv?
# =====================================================
# This __init__ function should be used only for parameters related to event recording.
# These are parameters that we don't wish to pass down the road to new ShapeEnv instances
# created from replaying events.
#
# If you wish to add a parameter to the constructor of ShapeEnv, unrelated to event
# recording, do so in the _init function.
def __init__(
self,
*,
should_record_events: Optional[bool] = None,
tracked_fakes: Optional[list[Any]] = None,
**kwargs: Any,
) -> None:
self._init(**kwargs)
# Disable event recording when replaying.
kwargs["should_record_events"] = False
from torch.fx.experimental.validator import translation_validation_enabled
self._translation_validation_enabled = translation_validation_enabled()
# If not specified, enable event recording if both:
# - Translation validation is on
# - Translation validation bisection is not disabled
self.should_record_events = (
should_record_events
if should_record_events is not None
else (
self._translation_validation_enabled
and not config.translation_validation_no_bisect
)
)
# Enable event recording check if both:
# - It should record events
# - The recording check is enabled
self.check_recorded_events = (
self.should_record_events and config.check_shape_env_recorded_events
)
# This will make sure we only record the top-level function call.
self.is_recording = False
# Keep track of the list of tracked fakes.
self.tracked_fakes = tracked_fakes
# List of events for reconstructing ShapeEnv at arbitrary points in time.
self.events: list[ShapeEnvEvent] = (
[ShapeEnvEvent(ShapeEnv, kwargs=kwargs)]
if self.should_record_events
else []
)
# FakeTensor per-ShapeEnv operation cache. This is used for caching
# operations that contain symbolic shapes which have guards on the
# ShapeEnv (so are ShapeEnv-dependent).
#
# NOTE: It's important that SymNodes in this cache have their ShapeEnv
# stripped otherwise you end up with cycles which can only be cleaned
# with the GC.
self.fake_tensor_cache: dict[
torch._subclasses.fake_tensor._DispatchCacheKey,
torch._subclasses.fake_tensor._DispatchCacheEntry,
] = {}
# Pro-tip: if you add new field to ShapeEnv, this affects some accept
# tests. Accept their output with:
#
# EXPECTTEST_ACCEPT=1 python test/dynamo/test_dynamic_shapes.py -k test_shape_env_equal
#
def _init(
self,
*,
allow_scalar_outputs: bool = True,
allow_dynamic_output_shape_ops: bool = True,
# NB: These are legacy configuration that help us make good choices
# when the constraint/dynamic dims are not explicitly passed to us.
# Ideally we will fix all call sites to be explicit and not have
# implicit choices, but this apparently was pretty involved.
assume_static_by_default: bool = False,
# Note - On 0/1 specialization
#
# The following options affect decisions we make about eager
# specialization. Disabling them will increase trace time (as we do
# more symbolic reasoning) and can also harm the quality of generated
# code (because inductor may not be able to specialize for bounds
# being equal--although if we later respecialize because of a guard,
# your code may be just as good as it was before.)
#
# When True, eagerly specialize input sizes which have 0/1.
specialize_zero_one: bool = True,
# When True, assume input sizes which have the same size are
# symbolically equal.
duck_shape: Optional[bool] = None,
# For debugging
co_fields: Optional[dict[str, str]] = None,
# When True, whenever safe, we will generate a deferred runtime assert
# instead of a guard whenever we know that an expression must be True,
# otherwise it would be an error, even for backed SymInts (where we
# could ostensibly unconditionally generate guards). This is useful
# for export, where preventing "error checking" sizes from showing up
# in guards is helpful, since these guards in some sense are overly
# pedantic. See also https://github.com/pytorch/pytorch/issues/121749
prefer_deferred_runtime_asserts_over_guards: bool = False,
# XXX Add any new settings that could affect FakeTensor evaluation
# to: torch._subclasses.fake_tensor._ShapeEnvSettings
trace_asserts: bool = False,
) -> None:
if duck_shape is None:
duck_shape = config.use_duck_shape
self.settings = ShapeEnvSettings(
# Not directly used by ShapeEnv; indirectly used by FakeTensor
allow_scalar_outputs=allow_scalar_outputs,
allow_dynamic_output_shape_ops=allow_dynamic_output_shape_ops,
# End
assume_static_by_default=assume_static_by_default,
specialize_zero_one=specialize_zero_one,
duck_shape=duck_shape,
prefer_deferred_runtime_asserts_over_guards=prefer_deferred_runtime_asserts_over_guards,
trace_asserts=trace_asserts,
)
self.guards: list[ShapeGuard] = []
self.axioms: dict[sympy.Expr, sympy.Expr] = {}
# A set of ids that have already been allocated. This is used
# for when we allocate symbol ids using the hash of the source
# names to ensure we don't have collisions via linear probing
self.unique_ids: set[int] = set()
# Maps symbolic ints to their original concrete values
# Currently populated from tensors
self.var_to_val: dict[sympy.Symbol, sympy.Integer] = {}
# Like var_to_val, but only set when propagate_real_tensors is on.
# Used as last resort to avoid GuardOnDataDependent error
self.unbacked_var_to_val: dict[sympy.Symbol, sympy.Integer] = {}
# Like above, but used exclusively for OBLIVIOUS_SIZE. These
# potentially could be put together but I am not sure, writing out
# the logic individually before abstracting.
self.oblivious_var_to_val: dict[sympy.Symbol, sympy.Integer] = {}
# Maps symbolic ints to their min/max range. These ranges
# are conservative: the int MUST fall in the range, but the
# range may contain ints which may not actually appear in
# practice
self.var_to_range: dict[sympy.Symbol, ValueRanges] = {}
self.var_to_range_sloc: dict[sympy.Symbol, ValueRangesSLoc] = {}
self.source_name_to_debug_name: dict[str, str] = {}
self.var_to_sources: dict[sympy.Symbol, list[Source]] = {}
# A set of unbacked symbols that are inputs (i.e: not data dependent).
self.unbacked_inputs: OrderedSet[sympy.Symbol] = OrderedSet()
self.var_to_stack: dict[sympy.Symbol, CapturedTraceback] = {}
self.var_to_hint_override: dict[sympy.Symbol, int] = {}
# Maps a source to the *original* symbol that was assigned to it
self.source_to_var: dict[str, sympy.Symbol] = {}
# Maps from sympy ints to expressions representing them
# Populated from equality guards (i.e. a.shape[0] == b.shape[0])
self.replacements: dict[sympy.Symbol, sympy.Expr] = {}
# The sloc of the guard that triggered this replacement to be added
self.replacements_slocs: dict[sympy.Symbol, SLoc] = {}
self.unbacked_renamings: dict[sympy.Symbol, sympy.Symbol] = {}
# Set holds a % b expressions that evaluate to 0.
self.divisible: set[sympy.Expr] = set()
# Set that holds "size-like" symbols. When we perform
# "size-oblivious" tests, these can be assumed to be >= 2.
self.size_like: set[sympy.Symbol] = set()
# Duck-shaping says that if two input tensors have the same size,
# they get assigned the same symbolic variable
self.val_to_var: dict[int, sympy.Symbol] = {}
self.unbacked_symfloat_counter = 0
self.unbacked_symint_counter = 0
# Similar to guards, but these MUST evaluate to true and can
# only be evaluated at runtime midway through (i.e., they always
# involve unbacked symints)
#
# For efficiency reasons, we index in the following way. Suppose you have
# a runtime assert i0 + i1 <= s1. We pick the most recently allocated
# symbol in the source expression and add the assert to the list for
# that symbol e.g., {i1: [i0 + i1 <= s1]}.
#
# We access the runtime asserts in two situations:
#
# - When we are guarding on an expression, we will attempt to
# statically evaluate it, in case the unbacked SymInts can
# simplify away. If we have a runtime assert, we may be able
# to discharge the guard entirely. We only need to attempt
# runtime asserts that mention freevars of the expression in
# question.
#
# - When we are performing codegen (in Inductor for eager, or
# when finalizing the export FX graph), we need to know what
# extra runtime asserts to insert. Whenever an unbacked
# SymInt comes into scope, all runtime asserts involving it
# become eligible for insertion (so long as all of their other
# free unbacked symbols are also in scope). We technically
# can handle any choice of key by kicking inexpressible asserts
# to the next unbacked symbol to wait on, but if we choose the
# latest key, an assert will only show up at the moment when
# we can actually codegen it.
self.deferred_runtime_asserts: dict[
Optional[sympy.Symbol], list[RuntimeAssert]
] = {}
# This exists so we can efficiently invalidate the cache (it's used as
# part of the cache key); otherwise we'd have to iterate through
# deferred_runtime_asserts to compute its length
self.num_deferred_runtime_asserts = 0
self.log = log
self.log.info("create_env")
self.frozen = False
self.runtime_asserts_frozen = False
self.dim_constraints: Optional[DimConstraints] = None
self.counter: Counter[str] = collections.Counter()
# Mapping from sympy.Symbol to the number of guards which mention this
# symbol
self.symbol_guard_counter: Counter[sympy.Symbol] = collections.Counter()
# A selection of important fields on co_field; solely used for
# signpost_event
self.co_fields = co_fields if co_fields else {}
# Whenever we allocate a fresh unbacked Symbol, we add it to this
# pending list. Unbacked symbol allocation can occur at unpredictable
# points during meta tensor propagation, but at some point, we
# have to know what the binding site for an unbacked symbol is, and
# this is computed when we actually place the node in the graph. The
# important thing is that we always actually handle every unaccounted
# for unbacked symbol, so this list helps us keep track of them and
# then make sure they are all accounted for.
#
# We could potentially give rise to errors earlier by lexically
# scoping when we do propagation, and only allowing unbacked symbols
# to be allocated at this point in time. However this is inconvenient
# to do in Dynamo, because fake tensor propagation is far from when we
# analyze binding sites (set_example_value), so we do it in a more
# mutatey way.
#
# NB: fresh unbacked symbols NEVER get substitutions applied to them,
# they are binding sites!
self.pending_fresh_unbacked_symbols: list[sympy.Symbol] = []
# Version counter used to invalidate cached values
self._prev_cache_key = self._get_key()
self._version_counter = 0
# Each time divisible is changed this should be set to True, this is set in _update_version_counter.
self._resimplify_floor_div_axioms = True
# Cache for FX nodes.
# Maps an already built node a tuple of:
# 1. node's target
# 2. list of arguments
# This drastically reduces the size of the FX graph, avoiding
# duplicated nodes.
self.fx_node_cache: dict[tuple[Callable, tuple[Any, ...]], torch.fx.Node] = {}
self.source_to_symbol: dict[str, sympy.Symbol] = {}
# Suppose you want to replace an unbacked symbol with another
# unbacked symbol. This is error prone because you can cause
# references to unbacked symbols to time travel backwards. E.g.,
#
# u1 = x.item()
# ... use of u1 ...
# u2 = y.item()
# u3 = z.item()
# torch._check(u1 == u2 + u3)
#
# If you replace u1 with u2 + u3, then the use of u1 now
# references u2 and u3 prior to them actually being bound at
# runtime.
#
# To control for this, we track the order unbacked symbols
# were allocated, and only allow substitutions if they respect
# the dependency from this order; an unbacked symbol can only
# be substituted with unbacked symbols that come before it in the
# order.
#
# This also imposes an ordering on the unbacked symbol binding
# sites themselves: you are not allowed to reorder unbacked symbol
# bindings. At the moment, this is not tracked, but we potentially
# could track this at the IR level using a higher order operator
# with something like effect token tracking.
self.unbacked_alloc_order: dict[sympy.Symbol, int] = {}
self.specialization_stacks: dict[Source, traceback.StackSummary] = {}
self.trace_asserts = trace_asserts
self.specializations: OrderedSet[Specialization] = OrderedSet()
from torch.fx.experimental.validator import translation_validation_enabled
self._translation_validation_enabled = translation_validation_enabled()
if self._translation_validation_enabled:
from torch.fx.experimental.validator import TranslationValidator
self.validator = TranslationValidator()
self.graph = torch.fx.Graph()
# Create an output graph and start inserting before that.
# This is needed when 'deepcopy'-ing this object.
self.graph.inserting_before(self.graph.output(None))
# Mapping of each node name to the node itself.
#
# This is useful for matching an FX node from a recorded ShapeEnv.graph
# to the FX node of the ShapeEnv we are running the event on.
#
# Whenever you add a node to self.graph, you must add a mapping to this
# variable. Otherwise, the built FX graph on the replayed ShapeEnv will
# not be valid.
self.name_to_node: dict[str, torch.fx.Node] = {}
@property
def allow_scalar_outputs(self) -> bool:
return self.settings.allow_scalar_outputs
@property
def allow_dynamic_output_shape_ops(self) -> bool:
return self.settings.allow_dynamic_output_shape_ops
@property
def assume_static_by_default(self) -> bool:
return self.settings.assume_static_by_default
@property
def specialize_zero_one(self) -> bool:
return self.settings.specialize_zero_one
@property
def duck_shape(self) -> bool:
return self.settings.duck_shape
@property
def prefer_deferred_runtime_asserts_over_guards(self) -> bool:
return self.settings.prefer_deferred_runtime_asserts_over_guards
@contextmanager
def patch_source_specialization(
self, source: Source, check_fn: Callable[[sympy.Symbol], sympy.Expr]
) -> Iterator[None]:
"""
Temporarily add symbol-level axioms to the ShapeEnv. This is useful when you want to "fork"
and have parallel universes of ShapeEnvs. For example, we use this when doing multi-graph
compile so we can support various graphs with varying levels of specializations.
This context manager allows for temporarily adding constraints to the shape environment
based on a specialization function applied to a symbol associated with a source.
Args:
source: The source of the symbol to specialize
check_fn: A function that takes a sympy Symbol and returns a sympy expression
representing a constraint/specialization to be applied
"""
name = source.name()
sym = self.source_to_var[name]
expr = check_fn(SymInt(SymNode(sym, self, int, None))).node._expr
new_axioms = dict(self.get_implications(self.simplify(expr)))
added_replacements = {}
for axiom in new_axioms:
if (
isinstance(axiom, sympy.Eq)
and isinstance(axiom.lhs, sympy.Symbol)
and isinstance(axiom.rhs, sympy.Integer)
and axiom.lhs not in self.replacements
):
self.replacements[axiom.lhs] = axiom.rhs
added_replacements[axiom.lhs] = axiom.rhs
self.axioms.update(new_axioms)
# We need to freeze the ShapeEnv because any additional modification of
# the ShapeEnv will cause unsoundness for subsequent specialization calls.
self.frozen = True
try:
yield
finally:
for k in new_axioms:
self.axioms.pop(k, None)
for k in added_replacements:
self.replacements.pop(k, None)
self.frozen = False
def check_equal(self, other: ShapeEnv) -> None:
"""Compare another ShapeEnv for equivalence"""
# ShapeEnv fields that are not relevant for the outcome of
# ShapeEnv.produce_guards call:
# - Debugging variables
# - Translation validation related variables
# - Events recording related variables
non_state_variable_names = (
"counter",
"log",
"var_to_stack",
"fx_node_cache",
"graph",
"validator",
"check_recorded_events",
"should_record_events",
"is_recording",
"tracked_fakes",
"events",
"source_name_to_debug_name",
"_prev_cache_key",
"_version_counter",
"dim_constraints",
# source locations are OK to diverge
"var_to_range_sloc",
"replacements_slocs",
"_resimplify_floor_div_axioms",
"_expr_sym_node_id",
"specialization_stacks",
)
# Mapping of the value of each to-be-compared field into the values that
# should actually be compared.
#
# You should modify this if, for example, the field that holds state and
# debugging information. e.g. ShapeGuard holds the actual guard (sympy.Expr)
# and the stack when it was added to the set of guards. In order to compare
# it, we throw away the stack information.
def map_value(key: str, value: Any) -> Any:
if key == "guards":
# Transform the list of ShapeGuard into a list of expressions.
return [g.expr for g in value]
elif key == "deferred_runtime_asserts":
# Transform the list of RuntimeAsserts into a list of expressions.
return {s: [ra.expr for ra in ras] for s, ras in value.items()}
elif key == "name_to_node":
# Compare just the set of keys is the same.
return set(value.keys())
elif key in (
"symbol_guard_counter",
"pending_fresh_unbacked_symbols",
"fake_tensor_cache",
):
# Skip this for comparisons
return None
return value
shape_env_check_state_equal(self, other, non_state_variable_names, map_value)
def _snapshot_tracked_fakes(self) -> Optional[list[Any]]:
if self.tracked_fakes is None:
return None
from torch._dynamo.variables.builder import TrackedFake
def maybe_transform_fake(fake: TrackedFake) -> TrackedFake:
inner_fake = (
fake.fake
if isinstance(fake.fake, (torch.SymInt, torch.SymFloat))
else FakeTensorMeta.from_fake(fake.fake)
)
# Even though TrackedFake accepts either a Union[SymInt, FakeTensor], here we give it a
# FakeTensorMeta for two reasons:
# 1. this is all the information we need when recording ShapeEnvEvents.
# 2. it works even if each TrackedFake changes its metadata.
return TrackedFake(inner_fake, fake.source, fake.symbolic_context) # type: ignore[arg-type]
return [maybe_transform_fake(fake) for fake in self.tracked_fakes]
def _last_event_index(self) -> int:
return len(self.events) - 1
@contextmanager
def _recording(self) -> Iterator[None]:
self.is_recording = True
try:
yield
finally:
self.is_recording = False
@record_shapeenv_event()
def _eliminate_unbacked(self, orig_s: sympy.Symbol, new_s: sympy.Expr) -> None:
self._set_replacement(orig_s, new_s, "eliminate_unbacked")
@record_shapeenv_event()
def set_unbacked_var_to_val(self, k: sympy.Symbol, v: int) -> None:
"""Used only when propagate_real_tensors; registers a value for an
unbacked symbol, which can be used last resort to resolve hints."""
log.info("set_unbacked_var_to_val %s = %s", k, v)
self.unbacked_var_to_val[k] = sympy.sympify(v)
# Unlike set_replacement, this records a shapeenv event
@record_shapeenv_event()
def _rename_unbacked_to(self, orig_s: sympy.Symbol, new_s: sympy.Symbol) -> None:
assert isinstance(orig_s, sympy.Symbol), orig_s
assert isinstance(new_s, sympy.Symbol), new_s
assert free_unbacked_symbols(new_s), new_s
assert free_unbacked_symbols(orig_s), orig_s
dest = self.replacements.get(orig_s)
if dest is not None:
assert not free_unbacked_symbols(dest), f"{orig_s} -> {dest}"
self._set_replacement(orig_s, new_s, "rename_unbacked_to")
self.unbacked_renamings[orig_s] = new_s
if dest is not None:
self._set_replacement(new_s, dest, "rename_unbacked_to_dest")
@record_shapeenv_event()
def _constrain_is_bounded(self, a: sympy.Symbol, upper_bound: int) -> None:
# TODO: Do something nontrivial when upper_bound is expression
pass
@record_shapeenv_event()
def _constrain_range_for_size(
self, a: sympy.Symbol, min: Optional[int] = None, max: Optional[int] = None
) -> None:
if min is None:
min = 0
if max is None:
max = int_oo
if max < min:
raise ValueError(
"Maximum value to constrain_as_size can't be less than the specified min value, "
f"received min={min} and max={max}"
)
self.constrain_symbol_range(
a,
compiler_min=min,
compiler_max=max,
)
self.size_like.add(a)
@record_shapeenv_event()
def _constrain_range(self, a: sympy.Expr, min: int, max: int) -> None:
if isinstance(a, sympy.Integer):
if not (min <= int(a) <= max):
raise ValueRangeError(f"Invalid value {int(a)} for range [{min}:{max}]")
return
# TODO: Shouldn't we install a guard if the symbol is backed? Or is the
# semantics that this is an "unchecked" assert (but it this actually
# something useful? Might be better to restrict only for unbacked
# SymInt).
if isinstance(a, sympy.Symbol):
self.constrain_symbol_range(
a,
compiler_min=min,
compiler_max=max,
)
@record_shapeenv_event()
def _constrain_unify(self, a: SymInt, b: SymInt) -> None:
"""
Given two SymInts, constrain them so that they must be equal. NB:
this will not work with SymInts that represent nontrivial expressions
(yet!)
"""
# TODO: this does not install a deferred runtime assert yet
# TODO: Maybe dedupe this with _maybe_guard_rel?
# Update Feb 2024: this is extra important to do, this doesn't handle
# unbacked replacements properly nor does it generate deferred runtime
# asserts
if not isinstance(a, SymInt):
if not isinstance(b, SymInt):
assert a == b
else:
assert isinstance(b.node.expr, sympy.Symbol), (
"constraining non-Symbols NYI"
)
assert b.node.shape_env is self
self.replacements[b.node.expr] = sympy.Integer(a)
else:
# TODO: Actually, we can support this as long as one of them is a symbol.
# NB: We can't actually do "unification" as our operators are not
# injective
assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
assert a.node.shape_env is self
if not isinstance(b, SymInt):
self.replacements[a.node.expr] = sympy.Integer(b)
else:
assert a.node.shape_env is b.node.shape_env
assert isinstance(b.node.expr, sympy.Symbol), (
"constraining non-Symbols NYI"
)
new_var = self._find(a.node.expr)
self.replacements[b.node.expr] = new_var
def _ignore_fresh_unbacked_symbols_tls(self) -> bool:
return getattr(TLS, "ignore_fresh_unbacked_symbols", False)
@record_shapeenv_event()
def _ignore_fresh_unbacked_symbols_set(self, b: bool) -> bool:
prev = self._ignore_fresh_unbacked_symbols_tls()
TLS.ignore_fresh_unbacked_symbols = b
return prev
@contextmanager
def ignore_fresh_unbacked_symbols(self) -> Iterator[None]:
"""
Indicates that the newly allocated unbacked SymInts are being
discarded
"""
prev = self._ignore_fresh_unbacked_symbols_set(True)
try:
yield
finally:
self._ignore_fresh_unbacked_symbols_set(prev)
@record_shapeenv_event()
def freeze(self) -> None:
"""Freeze this ShapeEnv to stop accumulating guards
A frozen ShapeEnv will ignore any further guards generated on it and
only emit a warning which may lead to accuracy problems.
"""
self.frozen = True
@record_shapeenv_event()
def freeze_runtime_asserts(self) -> None:
"""Freeze this ShapeEnv to stop adding deferred runtime asserts.
We will error if you try to install a new runtime assert when it is
frozen. This would indicate a lowering violation, or perhaps something
we know statically is already True but we are checking it again in a way
that is not clearly dischargeable.
"""
# self.prefer_deferred_runtime_asserts_over_guards = False
self.runtime_asserts_frozen = True
def _create_symbol_for_source(self, source: Source) -> Optional[sympy.Symbol]:
if not self._translation_validation_enabled:
return None
srcname = source.name()
if source not in self.source_to_symbol:
self.source_to_symbol[srcname] = sympy.Symbol(srcname, integer=True)
return self.source_to_symbol[srcname]
def _add_z3var(self, symbol: sympy.Symbol, type: type) -> None:
if self._translation_validation_enabled:
self.validator.add_var(symbol, type)
def _add_target_expr(self, expr: SympyBoolean) -> None:
if self._translation_validation_enabled:
self.validator.add_target_expr(expr)
def _add_assertion(self, expr: SympyBoolean) -> None:
if self._translation_validation_enabled:
self.validator.add_assertion(expr)
def _check_translation_validate(self) -> None:
if self._translation_validation_enabled:
self.validator.validate()
@record_shapeenv_event()
def _create_fx_call_function(
self,
op: Callable,
args: tuple,
) -> tuple[Optional[torch.fx.Node], bool]:
# Cache this tuple in order to avoid duplicated nodes.
node_key = (op, args)
# Flags whether the returned node was cached or not.
fresh = False
if self._translation_validation_enabled and node_key not in self.fx_node_cache:
# Presence of None in the arguments implies that we should ignore this operation.
if any(a is None for a in args):
# We check if we are not mixing SymNode that should not be ignored
# (fx_node is not None) with those that should (fx_node is None).
assert all(not isinstance(a, torch.fx.Node) for a in args)
return None, fresh
fresh = True
# If translation validation is enabled, all arguments must have its
# own FX node.
assert all(a is not None for a in args), (
f"missing arg in FX graph ({op.__name__}): {args}"
)
node = self.fx_node_cache[node_key] = self.graph.call_function(op, args)
self.name_to_node[node.name] = node
return self.fx_node_cache.get(node_key, None), fresh
def _create_fx_placeholder_and_z3var(
self,
symbol: sympy.Symbol,
type: type,
) -> Optional[torch.fx.Node]:
if not self._translation_validation_enabled:
return None
node_key = (self.graph.placeholder, (symbol,))
# Check if we haven't added this symbol already.
# If so, skip the placeholder creation, as it
# generates invalid Python code.
if node_key not in self.fx_node_cache:
# Add a Z3 variable according to 'type'.
self._add_z3var(symbol, type)
# Create the FX placeholder out of a mangled name.
mangled_name = re.sub(
r"[^a-zA-Z0-9]", "_", re.sub(r"[()]", "", symbol.name)
)
node = self.fx_node_cache[node_key] = self.graph.placeholder(mangled_name)
self.name_to_node[node.name] = node
# Attach the 'symbol' to the placeholder so that we can retrieve
# the Z3 variable later.
node.meta["symbol"] = symbol
return self.fx_node_cache[node_key]
def _remove_fx_node(self, node: Optional[torch.fx.Node]) -> None:
if self._translation_validation_enabled and node is not None:
self.name_to_node.pop(node.name)
self.graph.erase_node(node)
def _add_fx_node_metadata(self, node: torch.fx.Node) -> None:
from torch._dynamo.utils import get_current_node
if self.should_record_events:
node.meta[SHAPEENV_EVENT_KEY] = self._last_event_index()
node.meta[CURRENT_NODE_KEY] = get_current_node()
@staticmethod
def _suppress_guards_tls() -> bool:
return getattr(TLS, "suppress_guards", False)
@record_shapeenv_event()
def _suppress_guards_enter(self) -> None:
if not hasattr(TLS, "suppress_guards_stack"):
TLS.suppress_guards_stack = []
old = self._suppress_guards_tls()
TLS.suppress_guards_stack.append(old)
TLS.suppress_guards = True
@record_shapeenv_event()
def _suppress_guards_exit(self) -> None:
old = (
TLS.suppress_guards_stack.pop()
if len(TLS.suppress_guards_stack) > 0
else False
)
TLS.suppress_guards = old
def suppress_guards(self) -> _GeneratorContextManager[None]:
"""Context manager to ignore all guards generated inside"""
return _suppress_guards(self)
def _get_key(self) -> tuple[int, int, int, int]:
"""
Defines the current "state" of the guards we've accumulated in this ShapeEnv.
Determines when we need to invalidate our cache
"""
return (
len(self.replacements),
len(self.divisible),
self.num_deferred_runtime_asserts,
len(self.unbacked_var_to_val),
)
def _update_version_counter(self) -> None:
# if the change to shape env effects self.divisible set
# _resimplify_floor_div_axioms.
# This is used to trigger a resimplication of FloorDiv to CleanDivs
# in implication inside the function resimplify_floor_div.
if len(self.divisible) != self._prev_cache_key[1]:
self._resimplify_floor_div_axioms = True
# The shape environment is queried orders of magnitude more often than
# it is changed, so we summarise the cache key into a linearly
# increasing version counter which is cheaper to check in _lru_cache
# Only update version counter if the state actually changed
cur_key = self._get_key()
if self._prev_cache_key != cur_key:
self._prev_cache_key = cur_key
self._version_counter += 1
def _produce_dyn_sizes(
self,
ex_size: Sequence[IntLikeType],
source: Source,
symbolic_context: SymbolicContext,
) -> list[sympy.Expr]:
return self._produce_dyn_sizes_from_int_tuple(
tuple(ex_size), source, symbolic_context
)
def _produce_dyn_sizes_from_int_tuple(
self,
tensor_size: Sequence[IntLikeType],
source: Source,
symbolic_context: SymbolicContext,
hint_overrides: Optional[dict[int, int]] = None,
) -> list[sympy.Expr]:
assert all(not is_symbolic(val) for val in tensor_size), (
f"Expect size to be a plain tuple of ints but got {tensor_size}"
)
from torch._dynamo.source import TensorProperty, TensorPropertySource
if not hint_overrides:
hint_overrides = {}
_assert_symbol_context(symbolic_context)
dynamic_dims = symbolic_context.dynamic_sizes # type: ignore[attr-defined]
constraint_dims = symbolic_context.constraint_sizes # type: ignore[attr-defined]
size = []
for i, val in enumerate(tensor_size):
sym = self.create_symbol(
hint_overrides.get(i, val),
TensorPropertySource(source, TensorProperty.SIZE, i),
dynamic_dims[i],
constraint_dims[i],
do_not_specialize_zero_one=config.backed_size_oblivious,
symbolic_context=symbolic_context,
)
if (
isinstance(symbolic_context, StatelessSymbolicContext)
and symbolic_context.specialize_on
):
for specialization in symbolic_context.specialize_on[i]:
self.specializations.add(
Specialization(
TensorPropertySource(source, TensorProperty.SIZE, i),
specialization,
)
)
if (
config.backed_size_oblivious
and isinstance(sym, sympy.Symbol) # could be static
and symbol_is_type(sym, SymT.SIZE)
):
self.size_like.add(sym)
size.append(sym)
return size
def create_symbolic_sizes_strides_storage_offset(
self,
ex: torch.Tensor,
source: Source,
*,
symbolic_context: Optional[SymbolicContext] = None,
) -> tuple[
tuple[IntLikeType, ...],
tuple[IntLikeType, ...],
IntLikeType,
]:
"""
Returns a list of symbolic sizes and strides for the given tensor.
We try our best to express stride in terms of the sizes, so as to not
introduce new symbolic variables.
"""
ex_size = tuple(
self._maybe_specialize_sym_int_with_hint(sz) for sz in ex.size()
)
ex_stride = tuple(
self._maybe_specialize_sym_int_with_hint(sd) for sd in ex.stride()
)
ex_storage_offset = self._maybe_specialize_sym_int_with_hint(
ex.storage_offset()
)
return self._create_symbolic_sizes_strides_storage_offset(
ex_size,
ex_stride,
ex_storage_offset,
[_is_dim_dynamic(ex, i) for i in range(ex.dim())],
source,
symbolic_context=symbolic_context,
)
# Dynamo may want to wrap FakeTensors with SymInt sizes up e.g. make_fx(opt_f(), tracing_mode="symbolic").
# We create symbols in shape_env using the backed hints behind SymInt.
# Case 1: when SymInt is backed, dynamo can proceed with FakeTensors that have concrete shape.
# produce_guards will trigger specializations on the outer stuff
# Case 2: when the SymInt is unbacked, we will throw an data dependent error in require_hint().
#
# It's probably good for now but it's important to note that this approach has implications for
# the original shape_env when checking guards in different order.
# Example:
# ---------
# Consider a function "opt_f" as shown below:
# @torch.compile()
# def opt_f(x: bool, y: Tensor):
# if x == True:
# return y + torch.randn([4])
# else:
# return y
# Depending on the sequence of calls, we might install two different sets of guards:
# 1. opt_f(False, y):
# - "x == False" (always works for any size y)
# 2. opt_f(True, y):
# - Triggers recompilation and results in guards like:
# - "x == True and y.size(0) == 4"
# - (or "y.size(0) == 4 and x == True")
# The order of checking the guards matters. In this specific example:
# If True branch guard check precedes False branch and for True branch, y.size(0) check precedes x == True,
# we may have an unnecessary shape specialization for y.
def _maybe_specialize_sym_int_with_hint(
self, maybe_sym: IntLikeType
) -> IntLikeType:
assert isinstance(maybe_sym, (int, torch.SymInt))
if is_symbolic(maybe_sym):
assert maybe_sym.node.shape_env is not self, (
"expect the symbol is created from an shape env other than current one."
)
return maybe_sym.node.require_hint()
return maybe_sym
@record_shapeenv_event()
def _create_symbolic_sizes_strides_storage_offset(
self,
# NB: SymInt is allowed here due to nested int, normally you don't
# actually pass true symbolic sizes to this function
ex_size: Sequence[IntLikeType],
ex_stride: Sequence[IntLikeType],
ex_storage_offset: IntLikeType,
is_dim_dynamic: Sequence[bool],
source: Source,
*,
symbolic_context: Optional[SymbolicContext] = None,
hint_overrides: Optional[dict[int, int]] = None,
) -> tuple[
tuple[IntLikeType, ...],
tuple[IntLikeType, ...],
IntLikeType,
]:
dim = len(ex_size)
if not hint_overrides:
hint_overrides = {}
# Reimplement the legacy behavior
if symbolic_context is None:
constraint_sizes: list[DimConstraint] = [None] * dim
constraint_strides: list[DimConstraint] = [None] * dim
dynamic_dims = []
dynamic_strides = []
for i in range(dim):
# NB: This is encapsulation breaking! Legacy behavior was
# bad.
if is_dim_dynamic[i]:
r = DimDynamic.DYNAMIC
elif self.assume_static_by_default:
r = DimDynamic.STATIC
else:
r = DimDynamic.DUCK
dynamic_dims.append(r)
dynamic_strides.append(r)
dynamic_dims = [DimDynamic.DUCK] * dim
dynamic_strides = [DimDynamic.INFER_STRIDE] * dim
# symbolic_context is None - set one
symbolic_context = StatelessSymbolicContext(
dynamic_sizes=dynamic_dims,
dynamic_strides=dynamic_strides,
constraint_sizes=constraint_sizes,
constraint_strides=constraint_strides,
)
# We got a StatelessSymbolicContext
_assert_symbol_context(symbolic_context)
constraint_sizes = symbolic_context.constraint_sizes # type: ignore[attr-defined]
constraint_strides = symbolic_context.constraint_strides # type: ignore[attr-defined]
dynamic_sizes = symbolic_context.dynamic_sizes # type: ignore[attr-defined]
dynamic_strides = symbolic_context.dynamic_strides # type: ignore[attr-defined]
# TODO: make this configurable from outside symbolic_context; we made a symbolic_context
# decision here where if all sizes are static, we are going to
# specialize all of the inner strides/offset too. We don't have to
# do this, and arguably we should ALWAYS allow for dynamic offset,
# this is cheap.
# TODO: This should be DYNAMIC, using DUCK for BC
dynamic_offset = (
DimDynamic.STATIC
if all(r == DimDynamic.STATIC for r in dynamic_sizes)
else DimDynamic.DUCK
)
are_sizes_static = all(r == DimDynamic.STATIC for r in dynamic_sizes)
assert len(dynamic_sizes) == dim, f"{len(dynamic_sizes)} != {dim}"
assert len(dynamic_strides) == dim, f"{len(dynamic_sizes)} != {dim}"
assert len(constraint_sizes) == dim
assert len(constraint_strides) == dim
from torch._dynamo.source import TensorProperty, TensorPropertySource
size: list[sympy.Expr] = self._produce_dyn_sizes_from_int_tuple(
ex_size, source, symbolic_context, hint_overrides=hint_overrides
)
stride = self._compute_symbolic_stride(
source,
size,
ex_size,
ex_stride,
dynamic_strides,
constraint_strides,
are_sizes_static,
symbolic_context,
)
sym_sizes = [
self.create_symintnode(
sym,
hint=hint_overrides.get(i, hint),
source=TensorPropertySource(source, TensorProperty.SIZE, i),
)
for i, (sym, hint) in enumerate(zip(size, ex_size))
]
for i, sym in enumerate(sym_sizes):
if isinstance(sym, torch.SymInt) and i in hint_overrides:
self.var_to_hint_override[sym.node.expr] = hint_overrides[i]
sym_stride = []
for i, stride_expr in enumerate(stride):
# NB: Don't duck size the stride; instead use the expression
# we computed
assert stride_expr is not None
sym_stride.append(
self.create_symintnode(
stride_expr,
hint=ex_stride[i],
source=TensorPropertySource(source, TensorProperty.STRIDE, i),
)
)
sym_storage_offset = self.create_symintnode(
self.create_symbol(
ex_storage_offset,
TensorPropertySource(source, TensorProperty.STORAGE_OFFSET),
dynamic_dim=dynamic_offset,
constraint_dim=None,
symbolic_context=symbolic_context,
),
hint=ex_storage_offset,
source=TensorPropertySource(source, TensorProperty.STORAGE_OFFSET),
)
return tuple(sym_sizes), tuple(sym_stride), sym_storage_offset
def _compute_symbolic_stride(
self,
source: Source,
size: Sequence[sympy.Expr],
ex_size: Sequence[IntLikeType],
ex_stride: Sequence[IntLikeType],
dynamic_strides: Sequence[DimDynamic],
constraint_strides: Sequence[
Optional[Union[StrictMinMaxConstraint, RelaxedUnspecConstraint]]
],
are_sizes_static: bool,
symbolic_context: SymbolicContext,
) -> list[sympy.Expr]:
from torch._dynamo.source import TensorProperty, TensorPropertySource
stride: list[Optional[sympy.Expr]] = [None] * len(size)
candidates: dict[IntLikeType, sympy.Expr] = {}
# iterate over unbound strides in val ascending order with
# index descending as a tie breaker since for cases like
# [(1, 1), (1, 0)], we want to fill in the right most
# stride first.
val_list = [(val, -i) for i, val in enumerate(ex_stride)]
val_list.sort(key=_nested_int_aware_sort)
for val, neg_i in val_list:
i = -neg_i
contiguous_stride = (
i != len(ex_stride) - 1
and ex_stride[i] == ex_size[i + 1] * ex_stride[i + 1]
)
if val in (0, 1) and not contiguous_stride:
out_stride = sympy.Integer(val)
else:
dynamic_stride = dynamic_strides[i]
if dynamic_stride == DimDynamic.INFER_STRIDE and val in candidates:
# Set stride to a candidate only for DimDynamic.INFER_STRIDE
out_stride = candidates[val]
else:
# Set INFER_STRIDE to STATIC or DUCK depending on sizes
dyn_stride = dynamic_stride
if dynamic_stride == DimDynamic.INFER_STRIDE:
dyn_stride = (
DimDynamic.STATIC if are_sizes_static else DimDynamic.DUCK
)
out_stride = self.create_symbol(
val,
TensorPropertySource(source, TensorProperty.STRIDE, i),
dynamic_dim=dyn_stride,
constraint_dim=constraint_strides[i],
symbolic_context=symbolic_context,
)
stride[i] = out_stride
candidates[ex_size[i] * val] = size[i] * out_stride
assert all(x is not None for x in stride)
return stride
@record_shapeenv_event()
def create_symintnode(
self,
sym: sympy.Expr,
*,
hint: Optional[int],
source: Optional[Source] = None,
) -> IntLikeType:
"""Create a SymInt value from a symbolic expression
If you know what the current hint value of the SymInt to be created
is, pass it into hint. Otherwise, pass None and we will make our best
guess
"""
if self._translation_validation_enabled and source is not None:
# Create a new symbol for this source.
symbol = self._create_symbol_for_source(source)
assert symbol is not None
# Create a new FX placeholder and Z3 variable for 'symbol'.
fx_node = self._create_fx_placeholder_and_z3var(symbol, int)
# Add an equality assertion for the newly created symbol and 'sym'.
self._add_assertion(sympy.Eq(symbol, sym))
else:
fx_node = None
out: IntLikeType
if isinstance(sym, sympy.Integer):
if hint is not None:
assert int(sym) == hint
out = int(sym)
else:
# How can this occur? When we mark_unbacked, we end up with a real
# tensor that has hints for all sizes, but we MUST NOT create a
# SymNode with a hint, because we're hiding the hint from our eyes
# with the unbacked Symbol. And in fact, the hint compute may be
# inconsistent with size oblivious tests.
if free_unbacked_symbols(sym):
hint = None
out = SymInt(SymNode(sym, self, int, hint, fx_node=fx_node))
return out
@record_shapeenv_event()
def create_symfloatnode(
self,
sym: sympy.Expr,
*,
hint: Optional[int | float | bool],
source: Optional[Source] = None,
) -> FloatLikeType:
"""Create a SymFloat value from a symbolic expression"""
if self._translation_validation_enabled and source is not None:
# Create a new symbol for this source.
symbol = self._create_symbol_for_source(source)
assert symbol is not None
# Create a new FX placeholder and Z3 variable for 'symbol'.
fx_node = self._create_fx_placeholder_and_z3var(symbol, float)
# Add an equality assertion for the newly created symbol and 'sym'.
self._add_assertion(sympy.Eq(symbol, sym))
else:
fx_node = None
out: FloatLikeType
if isinstance(sym, sympy.Float):
if hint is not None:
assert float(sym) == hint
out = float(sym)
else:
# You could give this the same treatment as SymInt above if
# you supported mark_unbacked on a float, but it's a kind of
# strange thing to do though because floats don't get 0/1
# specialization anyway
if free_unbacked_symbols(sym):
assert hint is None, sym
out = SymFloat(SymNode(sym, self, float, hint, fx_node=fx_node))
return out
@record_shapeenv_event()
def create_unspecified_symint_and_symbol(
self, value: int, source: Source, dynamic_dim: DimDynamic
) -> IntLikeType:
"""Create a SymInt wrapping a new unspecified symbol"""
return self.create_symintnode(
self.create_unspecified_symbol(
value,
source=source,
dynamic_dim=dynamic_dim,
),
hint=value,
source=source,
)
def create_symboolnode(self, sym: sympy.Expr) -> SymBool:
"""Create a SymBool object from a sympy boolean expression"""
# This function is only being used in serialization, so we do not track it
# for validation.
return SymBool(SymNode(sym, self, bool, None))
def _log_create_unbacked_symbol(
self,
prefix: str,
symbol: sympy.Symbol,
vr: ValueRanges,
source: Optional[Source] = None,
sym_node: Optional[SymNode] = None,
) -> None:
is_debug = config.extended_debug_create_symbol is not None and str(
symbol
) in config.extended_debug_create_symbol.split(",")
sloc: Union[str, SLoc]
if source is None:
sloc, maybe_extra_debug = self._get_stack_summary(is_debug)
else:
sloc, maybe_extra_debug = source.name(), ""
log.info(
"%s %s [%s, %s] %s%s",
prefix,
symbol,
vr.lower,
vr.upper,
sloc,
maybe_extra_debug,
stack_info=is_debug,
)
trace_structured(
"create_unbacked_symbol",
metadata_fn=lambda: {
"symbol": str(symbol),
"node_id": id(sym_node),
"vr": f"[{vr.lower}, {vr.upper}]",
"user_stack": structured.get_user_stack(3),
"stack": structured.get_framework_stack(),
},
)
@record_shapeenv_event()
def create_unbacked_symfloat(self) -> SymFloat:
"""Create a symbolic float without a hint value"""
symbol: sympy.Symbol = make_symbol(
SymT.UNBACKED_FLOAT, self.unbacked_symfloat_counter
)
self.unbacked_symfloat_counter += 1
self.counter["create_unbacked_symbol"] += 1
if not self._ignore_fresh_unbacked_symbols_tls():
self.pending_fresh_unbacked_symbols.append(symbol)
self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1)
vr = self.var_to_range[symbol] = ValueRanges.unknown()
assert vr.is_float
sloc = self._get_sloc()
self.var_to_range_sloc[symbol] = ValueRangesSLoc(sloc, sloc)
# Create a new FX placeholder and Z3 variable for 'symbol'.
fx_node = self._create_fx_placeholder_and_z3var(symbol, float)
sym_node = SymNode(symbol, self, float, None, fx_node=fx_node)
self._log_create_unbacked_symbol(
"create_unbacked_symfloat", symbol, vr, sym_node=sym_node
)
return SymFloat(sym_node)
@record_shapeenv_event()
def create_unbacked_symint(self, source: Optional[Source] = None) -> SymInt:
"""Create a symbolic integer without a hint value"""
symbol: sympy.Symbol = make_symbol(
SymT.UNBACKED_INT, self.unbacked_symint_counter, integer=True
)
self.unbacked_symint_counter += 1
if not self._ignore_fresh_unbacked_symbols_tls():
self.pending_fresh_unbacked_symbols.append(symbol)
self.counter["create_unbacked_symbol"] += 1
self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1)
vr = self.var_to_range[symbol] = self._default_unspecified_value_range()
assert vr.is_int
sloc = self._get_sloc()
self.var_to_range_sloc[symbol] = ValueRangesSLoc(sloc, sloc)
# Create a new FX placeholder and Z3 variable for 'symbol'.
fx_node = self._create_fx_placeholder_and_z3var(symbol, int)
sym_node = SymNode(symbol, self, int, None, fx_node=fx_node)
self._log_create_unbacked_symbol(
"create_unbacked_symint", symbol, vr, source, sym_node=sym_node
)
return SymInt(sym_node)
def is_unbacked_symint(self, symbol: sympy.Symbol) -> bool:
"""Check if a sympy symbol matches the naming convention for unbacked symbols"""
return symbol_is_type(symbol, SymT.UNBACKED_INT)
@record_shapeenv_event()
def create_unbacked_symbool(self) -> SymBool:
"""Create a symbolic boolean without a hint value"""
symbol: sympy.Symbol = make_symbol(
SymT.UNBACKED_INT, self.unbacked_symint_counter, integer=True
)
self.unbacked_symint_counter += 1
if not self._ignore_fresh_unbacked_symbols_tls():
self.pending_fresh_unbacked_symbols.append(symbol)
self.counter["create_unbacked_symbol"] += 1
self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1)
vr = self.var_to_range[symbol] = ValueRanges(0, 1)
assert vr.is_int
sloc = self._get_sloc("default value range for unbacked SymBool")
self.var_to_range_sloc[symbol] = ValueRangesSLoc(sloc, sloc)
# Create a new FX placeholder and Z3 variable for 'symbol'.
fx_node = self._create_fx_placeholder_and_z3var(symbol, bool)
sym_node = SymNode(sympy.Eq(symbol, 1), self, bool, None, fx_node=fx_node)
self._log_create_unbacked_symbol(
"create_unbacked_symbool", symbol, vr, sym_node=sym_node
)
return SymBool(sym_node)
@record_shapeenv_event()
def create_unspecified_symbol(
self,
val: Union[int, SymInt, float, SymFloat],
source: Source,
dynamic_dim: DimDynamic = DimDynamic.DUCK,
constraint_dim: DimConstraint = None, # NB: includes None
symbolic_context: Optional[StatelessSymbolicContext] = None,
) -> sympy.Expr:
"""
Create a symbol with an unspecified value
Compared to standard symbols we do not assume the value is positive,
nor do we specialze on zero or one values.
"""
# 'positive' is None for unspecified symbols, since we can't
# assume that it will be neither positive nor negative.
# We don't want to specialize zero one val for unspecified symbol
# so that we can always get a new symbol despite val.
return self.create_symbol(
val,
source,
dynamic_dim,
constraint_dim,
positive=None,
do_not_specialize_zero_one=True,
symbolic_context=symbolic_context,
)
@record_shapeenv_event()
def create_symbol(
self,
val: int,
source: Source,
dynamic_dim: DimDynamic = DimDynamic.DUCK,
constraint_dim: DimConstraint = None, # NB: includes None
positive: Optional[bool] = True,
do_not_specialize_zero_one: bool = False,
symbolic_context: Optional[StatelessSymbolicContext] = None,
) -> sympy.Expr:
"""Create a new symbol which is tracked by this ShapeEnv"""
# check if constraint_dim is actually static integer
if (
isinstance(constraint_dim, StrictMinMaxConstraint)
and constraint_dim.vr.lower == constraint_dim.vr.upper
):
dynamic_dim = DimDynamic.STATIC
if constraint_dim.vr.lower != val:
raise ConstraintViolationError(
f"Static shape constraint of {constraint_dim.vr.lower} does not match input size of {val}, "
f"for {source.name()}"
)
if symbolic_context:
from torch._dynamo.source import TensorPropertySource
assert isinstance(source, TensorPropertySource)
# TODO: storage_offset handling?
assert source.idx is not None
symbolic_context.dynamic_sizes[source.idx] = dynamic_dim
symbolic_context.constraint_sizes[source.idx] = None
constraint_dim = None
# see note [Tensor Fakification and Symbol Caching]
source_name = source.name()
if (
isinstance(symbolic_context, StatefulSymbolicContext)
and id(self) not in symbolic_context.shape_env_to_source_to_symbol_cache
):
symbolic_context.shape_env_to_source_to_symbol_cache[id(self)] = {}
if (
isinstance(symbolic_context, StatefulSymbolicContext)
and source_name
and (
source_name
in symbolic_context.shape_env_to_source_to_symbol_cache[id(self)]
)
):
return symbolic_context.shape_env_to_source_to_symbol_cache[id(self)][
source_name
]
if dynamic_dim in (DimDynamic.SIZE_LIKE_UNBACKED, DimDynamic.OBLIVIOUS_SIZE):
out = self.create_unbacked_symint(source).node.expr
self._constrain_range_for_size(out)
self.unbacked_inputs.add(out)
if isinstance(symbolic_context, StatefulSymbolicContext) and source_name:
symbolic_context.shape_env_to_source_to_symbol_cache[id(self)][
source_name
] = out
if dynamic_dim is DimDynamic.OBLIVIOUS_SIZE:
self.oblivious_var_to_val[out] = val
return out
if do_not_specialize_zero_one:
specialize_zero_one = False
else:
specialize_zero_one = self.specialize_zero_one
assert isinstance(source, Source), f"{type(source)} {source}"
assert not (positive and val < 0), f"positive set for negative value: {val}"
# It's always sound to allocate a symbol as DYNAMIC. If the user
# constrained the symbol, force the symbolic_context to DYNAMIC, because our
# constraint code will do weird stuff if, e.g., it's duck shaped
if constraint_dim is not None:
dynamic_dim = DimDynamic.DYNAMIC
if dynamic_dim is DimDynamic.STATIC:
out = sympy.Integer(val)
if isinstance(symbolic_context, StatefulSymbolicContext) and source_name:
symbolic_context.shape_env_to_source_to_symbol_cache[id(self)][
source_name
] = out
return out
elif dynamic_dim is DimDynamic.DUCK:
# duck_shape can be used to globally turn off duck shaping, even
# if it was requested
duck = self.duck_shape
elif dynamic_dim is DimDynamic.DYNAMIC:
duck = False
else:
raise AssertionError(f"unhandled dynamic_dim {dynamic_dim}")
sloc = self._get_sloc()
if val in (0, 1) and specialize_zero_one:
if val == 0:
return sympy.S.Zero
else:
return sympy.S.One
elif not duck or val not in self.val_to_var:
# If we're not duck shaping, we always create a new symbol
# Even if we're duck shaping, if we haven't seen this particular
# value before, we also create a new symbol
symbol_id = self._generate_unique_id(source.name())
if type(val) is int or is_nested_int(val):
sympy_expr = make_symbol(
SymT.SIZE, symbol_id, positive=positive, integer=True
)
else:
sympy_expr = make_symbol(
SymT.FLOAT, symbol_id, positive=positive, real=True
)
self.source_to_var[source_name] = sympy_expr
# We always associate vars to vals
if isinstance(val, int):
self.var_to_val[sympy_expr] = sympy.Integer(val)
elif isinstance(val, float):
self.var_to_val[sympy_expr] = sympy.Float(val)
else:
# Only used for jagged layout nested tensors
self.var_to_val[sympy_expr] = SingletonInt(
val.node.nested_int(), coeff=val.node.nested_int_coeff()
)
# Do the appending later, because we always want to populate this
self.var_to_sources[sympy_expr] = []
# Create a Z3 variable for the new symbol.
self._add_z3var(sympy_expr, int)
if duck:
# Make sure to reuse this symbol for subsequent duck shaping
# pyrefly: ignore [unsupported-operation]
self.val_to_var[val] = sympy_expr
if isinstance(val, int):
if positive:
# Add assertions for the newly created symbols
self._add_assertion(sympy_expr > 1)
# Apply default range, which assumes not zero-one
self.var_to_range[sympy_expr] = self._default_value_range(
do_not_specialize_zero_one
)
self.var_to_range_sloc[sympy_expr] = ValueRangesSLoc(
self._get_sloc(
"user code shown is first use of this value--the guard itself is not "
"due user code but due to 0/1 specialization in the framework; to "
"avoid specialization try torch._dynamo.decorators.mark_unbacked(tensor, dim)"
if self.specialize_zero_one
else None
),
sloc,
)
else:
self.var_to_range[sympy_expr] = (
self._default_unspecified_value_range()
)
self.var_to_range_sloc[sympy_expr] = ValueRangesSLoc(sloc, sloc)
# Small performance optimization: if we have a min-max constraint,
# we can proactively narrow to that range
if isinstance(constraint_dim, StrictMinMaxConstraint):
assert not duck
self._update_var_to_range(
sympy_expr, constraint_dim.vr, is_constraint=True
)
vr = self.var_to_range[sympy_expr]
assert vr.is_int
if val not in vr:
raise ConstraintViolationError(
f"{val} not in range [{vr.lower}, {vr.upper}]"
)
range_str = f"[{vr.lower}, {vr.upper}]"
elif isinstance(val, float):
self.var_to_range[sympy_expr] = vr = ValueRanges(-sympy.oo, sympy.oo)
self.var_to_range_sloc[sympy_expr] = ValueRangesSLoc(sloc, sloc)
range_str = f"[{vr.lower}, {vr.upper}]"
assert vr.is_float
else:
# Skip var_range logic for SingletonInt
# Only used for jagged layout nested tensors
range_str = ""
r = sympy_expr
is_debug = config.extended_debug_create_symbol is not None and str(
sympy_expr
) in config.extended_debug_create_symbol.split(",")
maybe_more_info = ""
if not is_debug and os.getenv("TORCHDYNAMO_EXTENDED_ADVICE", "1") not in (
"0",
"",
):
maybe_more_info = (
", for more info run with "
f'TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="{sympy_expr}" '
"or to suppress this message run with "
'TORCHDYNAMO_EXTENDED_ADVICE="0"'
)
sloc, maybe_extra_debug = self._get_stack_summary(is_debug)
self.log.info(
"create_symbol %s = %s for %s %s %s%s%s",
sympy_expr,
val,
source.name(),
range_str,
sloc,
maybe_more_info,
maybe_extra_debug,
stack_info=is_debug,
)
trace_structured(
"create_symbol",
metadata_fn=lambda: {
"symbol": str(sympy_expr),
"val": repr(val),
"vr": range_str,
"source": source.name(),
"user_stack": structured.from_traceback(
TracingContext.extract_stack()
),
"stack": structured.from_traceback(
CapturedTraceback.extract(skip=1).summary()
),
},
)
self.counter["create_symbol"] += 1
else:
# This implements duck-shaping: input sizes that match are assigned
# the same symint
r = self.val_to_var[val]
self.source_to_var[source_name] = r
self.log.debug("create_symbol %s duck sized %s", r, source.name())
if isinstance(r, sympy.Symbol):
r_sources = self.var_to_sources[r]
r_sources.append(source)
if not source.is_ephemeral() and r_sources[0].is_ephemeral():
# prefer non-ephemeral source first since it may be guarded on later
r_sources[0], r_sources[-1] = r_sources[-1], r_sources[0]
# This ensures we get zeros in symbol_guard_counts, which makes
# some queries simpler (since we will accumulate mass on 0 this
# way)
self.symbol_guard_counter[r] = 0
if isinstance(symbolic_context, StatefulSymbolicContext) and source_name:
symbolic_context.shape_env_to_source_to_symbol_cache[id(self)][
source_name
] = r
return r
def add_var_to_val(self, expr: sympy.Symbol, val: int) -> None:
"""Adds a new symbol to the symbolic environment."""
log.debug("add_var_to_val %s %s", expr, val, stack_info=True)
assert expr not in self.var_to_val, f"{expr} already exists"
self.var_to_val[expr] = sympy.Integer(val)
def _debug_name(self, source: Source) -> str:
src_name = source.name()
return self.source_name_to_debug_name.get(src_name, src_name)
def _render_range_for_constraint_violation(
self, source: Source, c: Union[StrictMinMaxConstraint, RelaxedUnspecConstraint]
) -> str:
if isinstance(c, StrictMinMaxConstraint):
lower, upper = c.vr.lower, c.vr.upper
default = self._default_value_range()
if lower <= default.lower:
lower = None
if upper >= default.upper:
upper = None
c_render = (
f"{self._debug_name(source)} = {source.name()} in the specified range"
)
if lower is not None and upper is not None:
c_render += f" {lower} <= {self._debug_name(source)} <= {upper}"
elif lower is None and upper is not None:
c_render += f" {self._debug_name(source)} <= {upper}"
elif lower is not None and upper is None:
c_render += f" {lower} <= {self._debug_name(source)}"
return c_render
return c.render(source)
def produce_guards(self, *args: Any, **kwargs: Any) -> list[str]:
"""
Like produce_guards_verbose, but only returns the non-verbose python guard expressions
(no verbose guards produced.)
"""
return self.produce_guards_verbose(*args, **kwargs, langs=("python",))[0].exprs
def produce_guards_verbose(
self,
placeholders: Sequence[FakeTensor],
sources: Sequence[Source],
source_ref: Callable[[Source], str] = lambda n: n.name(),
*,
guards: Optional[list[ShapeGuard]] = None,
input_contexts: Optional[DimList[SymbolicContext]] = None,
# Encodes user-specified input shape equations of the form s = s' and s = fn(s').
# (See docs on EqualityConstraint for details of the encoding.)
equalities_inputs: Optional[EqualityConstraint] = None,
_simplified: bool = False,
# Indicates if we should produce guards for known static values.
ignore_static: bool = True,
langs: tuple[str, ...] = ("python", "verbose_python"),
) -> list[_ShapeGuardsHelper]:
"""
Generates a list of guards strings which, when evaluated in a context that
defines tensors for all the sources, returns True or False depending
on if the guards in the list evaluated to True or not. Primarily used by Dynamo,
but this is also helpful for manual testing of guards (see
evaluate_guards_for_args)
For convenience in testing, a source is allowed to be a str,
in which case we will assume it is a LocalSource
simplified lets you omit duck sizing, equality and 0/1 guards.
This is useful for testing when you don't care about the boilerplate
guards, and it may be helpful for user output too (be careful though;
some equality guards are nontrivial! It would be nice to get simplified
output to print them too). It's private because it's not
intended for normal use
Returns guards in python and python with verbose comments (verbose) by
default.
"""
self.log.info("produce_guards")
# Check if we get to the same ShapeEnv state by replaying the recorded events.
# This will create a new ShapeEnv instance, and call all recorded function
# calls on this new instance. Finally, it will check whether this new instance
# has equal state.
#
# It's important that we do it in the beginning of this function, since it modifies
# self.dim_constraints through its execution. Changes that happen in this method
# aren't interesting, since this is the function call we wish to reproduce at the
# end. If we wish to simply reproduce ShapeEnv instances even after this call,
# this method should also be recorded.
if self.check_recorded_events:
shape_env = replay_shape_env_events(self.events)
self.check_equal(shape_env)
assert len(placeholders) == len(sources), (
f"len({placeholders}) != len({sources})"
)
Tensorlike = (torch.Tensor, FakeTensorMeta)
def _create_no_constraints_context(t: Tensor) -> StatelessSymbolicContext:
return StatelessSymbolicContext(
# Ignored; only the constraints part is relevant below.
dynamic_sizes=[DimDynamic.DYNAMIC] * t.dim(),
dynamic_strides=[DimDynamic.INFER_STRIDE] * t.dim(),
constraint_sizes=[None] * t.dim(),
constraint_strides=[None] * t.dim(),
)
# Expand optional inputs, or verify invariants are upheld
if input_contexts is None:
# pyrefly: ignore [bad-assignment]
input_contexts = [
# pyrefly: ignore [bad-argument-type]
_create_no_constraints_context(t) if isinstance(t, Tensorlike) else None
for t in placeholders
]
else:
assert len(input_contexts) == len(placeholders)
for i, (t, context) in enumerate(zip(placeholders, input_contexts)):
if isinstance(t, Tensorlike):
if context is None:
# pyrefly: ignore [bad-argument-type]
input_contexts[i] = _create_no_constraints_context(t)
else:
assert isinstance(t, (SymInt, int, SymFloat, float))
assert not isinstance(context, list)
# It took a lot of sweat to figure out the algorithm here. Let's
# explain how it works.
#
# The ShapeEnv lifecycle looks something like this:
#
# - For each input, you either generate a fresh Sympy symbol (s0) to
# represent its value (a binding site), or you reuse some
# preexisting symbol or expression, skipping the symbol allocation
# (e.g., duck sizing to a preexisting symbol, or expressing a
# stride as a multiplication of a separate stride and size.)
# Naively, you might expect to bind a fresh Sympy symbol for
# every input, but this is fairly wasteful as most of these
# symbols immediately simplify away, and if you don't eagerly
# specialize, e.g., 0/1 symbols, you end up with very complicated
# expressions that are not optimizable in practice.
#
# - You perform some compute on these symbols, occasionally
# introducing guards on boolean expressions on these symbols.
# In particular, whenever we guard on equality (_maybe_guard_rel),
# we can simplify shapes; e.g., when s0 == s1 * 2, we can now
# replace all occurrences of s0 with s1 * 2. Sometimes, a
# boolean expression evaluation doesn't introduce a guard, as
# the guard is already entailed by the simplifications we have
# applied.
#
# - In the end, you have a bunch of replacements (saying how to
# simplify shapes) and a bunch of guards (all the equality guards
# are trivial, because they're covered by the replacements).
#
# From the ShapeEnv, we must generate a Python expression that, when
# evaluated on a set of inputs, tells us whether or not these boolean
# expressions would have evaluated in the same way. However,
# we cannot easily compute this, as we elide recording boolean
# expressions when we think they are vacuously true. Thus, we seek
# an approximation: we must generate an expression, if true, would have
# produced an "equivalent" ShapeEnv, which would answer guard
# expressions in the same way.
#
# Our notion of equivalence is a bit subtle. For example, consider
# the ShapeEnv created from an input of size (5, 4) versus (4, 4)
# (no other guards.) Duck sizing would generate (s0, s1) in the first
# case but (s0, s0) in the second. We do NOT assume that size
# variables are disjoint; so in fact a graph that assumes the input
# could be (s0, s1) subsumes (s0, s0) (setting s0 == s1), but not
# vice versa. However, consider an analogous case (1,) versus (2,).
# Duck sizing generates (1,) and (s0,); the (s0,) graph does NOT
# subsume the (1,) graph because we assume that any size variables
# is NOT 0/1 (and make simplifications according to this; e.g., if
# we queried s0 == 0, we would immediately return False without
# returning a guard.)
#
# So, it is perhaps easier to flip things on their head: the guard
# expressions we generate here say what simplifications are valid,
# and what are not. Below, we explain each of the guard expressions
# we generate
# TODO: Make this more efficient by binding all the size/stride/offsets
# to locals before performing tests on them.
from torch._dynamo.source import TensorProperty, TensorPropertySource
# Actual codegen must be delayed as we don't necessarily know what
# the symbol mapping is
input_guards = []
symbol_to_source: dict[sympy.Symbol, list[Source]] = collections.defaultdict(
list
)
symbol_to_constraints: defaultdict[sympy.Symbol, set[Constraint]] = (
collections.defaultdict(set)
)
constraint_violations: list[tuple[bool, str, Callable[[], str]]] = []
printers: list[_ShapeGuardPrinter] = []
py_printer = ShapeGuardPythonPrinter(
symbol_to_source, source_ref, self.var_to_sources
)
for lang in langs:
if lang in ["python", "verbose_python"]:
printers.append(py_printer)
elif lang == "cpp":
printers.append(
_ShapeGuardCppPrinter(
symbol_to_source, source_ref, self.var_to_sources
)
)
else:
raise NotImplementedError(f"Unknown lang: {lang}")
def record_constraint_violation(
warn_only: bool,
debug_name: str,
msg: str,
hint: Optional[Callable[[], str]] = None,
) -> None:
constraint_violations.append(
(warn_only, debug_name, lambda: f"{msg}{hint()}" if hint else msg)
)
def is_dim(src: object) -> TypeGuard[TensorPropertySource]:
return (
isinstance(src, TensorPropertySource)
and src.prop is TensorProperty.SIZE
)
if equalities_inputs:
source_index = {}
for i, src in enumerate(sources):
source_index[src.name()] = i
def get_expression(tensor_dim_src: Source) -> sympy.Expr:
fake = placeholders[source_index[tensor_dim_src.base.name()]] # type: ignore[attr-defined]
assert tensor_dim_src.idx is not None # type: ignore[attr-defined]
symint = fake.shape[tensor_dim_src.idx] # type: ignore[attr-defined]
if isinstance(symint, torch.SymInt):
return symint.node.expr
else:
assert type(symint) is int, f"Expected int, got {type(symint)}"
return sympy.Integer(symint)
for src1, src2 in equalities_inputs.source_pairs:
expr1, expr2 = get_expression(src1), get_expression(src2) # type: ignore[]
# Check whether given input shape values satisfy a specified equation s = s'.
# - Raise when the equation was violated by the given input shape values.
# - Otherwise issue a guard to constrain them.
concrete_val = self.evaluate_expr(sympy.Eq(expr1, expr2))
if not concrete_val:
raise ConstraintViolationError(
f"{src1.name()} = {expr1 if isinstance(expr1, int) else expr1.xreplace(self.var_to_val)}"
" is not equal to "
f"{src2.name()} = {expr2 if isinstance(expr2, int) else expr2.xreplace(self.var_to_val)}"
)
for srcEq, root, fn in equalities_inputs.derived_equalities:
expr1 = get_expression(srcEq)
# recall that root is either a phantom symbol or an input source
if isinstance(root, sympy.Symbol):
expr2, debug_name = root, self.var_to_sources[root][0].name()
elif isinstance(root, sympy.Integer):
expr2, debug_name = root, str(root)
else:
expr2, debug_name = get_expression(root), self._debug_name(root)
expr2_ = fn(expr2)
# Check whether given input shape values satisfy a specified equation s = fn(s').
# - Raise when the equation was violated by the given input shape values.
# - Otherwise issue a guard to constrain them.
concrete_val = self.evaluate_expr(sympy.Eq(expr1, expr2_))
if not concrete_val:
raise ConstraintViolationError(
f"Expected input {srcEq.name()} to be equal to "
f"{fn(sympy.Symbol(debug_name))}, "
f"where {debug_name} = {expr2.xreplace(self.var_to_val)}, "
f"but got {expr1.xreplace(self.var_to_val)}"
)
for phantom_symbol in equalities_inputs.phantom_symbols:
if isinstance(phantom_symbol, sympy.Symbol):
# we created additional phantom symbols that are not input shape dimensions
symbol_to_source[phantom_symbol].extend(
self.var_to_sources[phantom_symbol]
)
# How do we know what the value of s0 is? Fresh variables can only be
# bound by inputs, so there MUST be some other input which binds the
# variable. If there is no such input, this is an error in our
# system. We record where all symbols come from, to help you diagnose
# why those symbols didn't occur.
#
# In fact, generally speaking it is only possible for the "outermost"
# user of a ShapeEnv to evaluate the guards, because some inputs may
# not be available to inner levels. For example, Dynamo can guard on
# tensors that never actually become graph arguments (they are
# pruned). In this case, only Dynamo knows about these arguments.
def track_symint(
source: Source, val: IntLikeType, constraint: DimConstraint = None
) -> None:
log.debug("track_symint %s %s %s", LazyString(source.name), val, constraint)
assert not isinstance(val, SymInt) or is_symbolic(val)
if isinstance(val, SymInt) and val.node.maybe_as_int() is not None:
val = val.node.maybe_as_int()
if isinstance(val, SymInt):
s = val.node.expr
if isinstance(s, sympy.Symbol):
symbol_to_source[s].append(source)
if constraint is not None and not isinstance(
constraint, RelaxedUnspecConstraint
):
symbol_to_constraints[s].add(constraint)
else:
constraint_violated = False
if isinstance(constraint, StrictMinMaxConstraint):
# try inferring the ranges of the expr s
sym_vrs = {
x: self.var_to_range.get(x, None) for x in s.free_symbols
}
if any(vr is None for vr in sym_vrs.values()):
# some of the free symbols in s don't have ranges
constraint_violated = True
elif isinstance(constraint, RelaxedUnspecConstraint):
if s.is_number:
i = int(s)
# Don't complain about 0/1 specialization, we
# expect to have to compile in this case anyway
if i not in (0, 1):
constraint_violated = True
if constraint_violated:
assert constraint is not None
def hint(s: sympy.Expr) -> str:
sexpr = py_printer.doprint(s)
return f"{sexpr}."
var_with_range = self._render_range_for_constraint_violation(
source, constraint
)
msg = (
f"Not all values of {var_with_range} are valid because "
f"{self._debug_name(source)} was inferred to be equal to "
)
record_constraint_violation(
constraint.warn_only,
self._debug_name(source),
msg,
hint=functools.partial(hint, s),
)
input_guards.append((source, s))
else:
s = sympy.Integer(val)
input_guards.append((source, s))
constraint_violated = False
if isinstance(constraint, StrictMinMaxConstraint):
if not (
s == constraint.vr.lower == constraint.vr.upper
): # allow static constraints
constraint_violated = True
elif isinstance(constraint, RelaxedUnspecConstraint):
# Don't complain about 0/1 specialization, we
# expect to have to compile in this case anyway
if val not in (0, 1):
constraint_violated = True
if constraint_violated:
assert constraint is not None
var_with_range = self._render_range_for_constraint_violation(
source, constraint
)
user_stack = self.specialization_stacks.get(source, None)
msg = (
f"You marked {self._debug_name(source)} as dynamic but your code "
f"specialized it to be a constant ({val}). If you're using mark_dynamic, "
f"either remove it or use maybe_mark_dynamic. If you're using Dim.DYNAMIC, "
f"replace it with either Dim.STATIC or Dim.AUTO."
+ (
"\n\nUser stack:\n" + "".join(user_stack.format())
if user_stack
else ""
)
)
record_constraint_violation(
constraint.warn_only, self._debug_name(source), msg
)
def track_symfloat(source: Source, val: FloatLikeType) -> None:
log.debug("track_symfloat %s %s", LazyString(source.name), val)
assert not isinstance(val, SymFloat) or is_symbolic(val)
if isinstance(val, SymFloat) and val.node.maybe_as_float() is not None:
val = val.node.maybe_as_float()
if isinstance(val, SymFloat):
s = val.node.expr
if isinstance(s, sympy.Symbol):
symbol_to_source[s].append(source)
input_guards.append((source, s))
else:
s = sympy.Float(val)
input_guards.append((source, s))
# pyrefly: ignore [no-matching-overload]
for t, source, context in zip(placeholders, sources, input_contexts):
if isinstance(source, str):
from torch._dynamo.source import LocalSource
source = LocalSource(source)
assert isinstance(source, Source)
if t is None:
continue
if isinstance(t, (SymInt, int)):
constraint = (
None if context is None else getattr(context, "constraint", None)
)
track_symint(source, t, constraint)
continue
elif isinstance(t, (SymFloat, float)):
track_symfloat(source, t)
continue
assert isinstance(t, Tensorlike)
if is_traceable_wrapper_subclass(t):
from torch._dynamo.source import AttrSource
assert isinstance(context, SubclassSymbolicContext)
# For subclasses, we need to track symints on BOTH the outer
# and inner tensors.
# TODO: type this better
sources_tensors_constraints: list[tuple[Source, Any, Any, Any]] = [
(source, t, context.constraint_sizes, context.constraint_strides)
]
attrs, _ = t.__tensor_flatten__()
for attr in attrs:
inner_t = getattr(t, attr)
inner_context = context.inner_contexts[attr]
sources_tensors_constraints.append(
(
AttrSource(source, attr),
inner_t,
inner_context.constraint_sizes, # type: ignore[attr-defined]
inner_context.constraint_strides, # type: ignore[attr-defined]
)
)
else:
sources_tensors_constraints = [
(source, t, context.constraint_sizes, context.constraint_strides) # type: ignore[attr-defined]
]
for (
src,
curr_t,
constraint_size,
constraint_stride,
) in sources_tensors_constraints:
if is_sparse_any(curr_t):
for i, ss in enumerate(curr_t.size()):
property_source = TensorPropertySource(
src, TensorProperty.SIZE, i
)
track_symint(property_source, ss, constraint_size[i])
else:
for i, ss in enumerate(curr_t.size()):
property_source = TensorPropertySource(
src, TensorProperty.SIZE, i
)
track_symint(property_source, ss, constraint_size[i])
for i, ss in enumerate(curr_t.stride()):
property_source = TensorPropertySource(
src, TensorProperty.STRIDE, i
)
track_symint(property_source, ss, constraint_stride[i])
track_symint(
TensorPropertySource(src, TensorProperty.STORAGE_OFFSET),
curr_t.storage_offset(),
)
# 1. Every input must equal the final simplified symbolic expression
# stored on the placeholder. Given a placeholder (s0*2, s1),
# if we have an input (2, 3), we must show s0*2 == 2 and s1 == 3.
# This does a lot of work: it covers duck sizing and equality guards.
all_exprs: list[list[str]] = [[] for _ in langs]
self.dim_constraints = DimConstraints(
symbol_to_source,
self.var_to_val,
set(symbol_to_constraints.keys()),
self.source_name_to_debug_name,
)
if not _simplified:
for source, expr in input_guards:
srcname = source.name()
if self._translation_validation_enabled:
# Ignore sources that were not turned into SymInts.
if srcname in self.source_to_symbol:
self._add_target_expr(
sympy.Eq(self.source_to_symbol[srcname], expr)
)
# Small optimization
if (
isinstance(expr, sympy.Symbol)
and symbol_to_source.get(expr)
and source == symbol_to_source[expr][0]
):
continue
# This logic excludes static values found on tensors from guarding, because
# dynamo's check_tensor_fn does that (see guards.cpp).
# However, for non tensor sources, we still need to guard here.
if ignore_static and isinstance(source, TensorPropertySource):
if expr.is_number:
self.log.debug(
"Skipping guard %s", f"{source_ref(source)} == {expr}"
)
continue
if is_dim(source):
self.dim_constraints.add_equality(source, expr)
for exprs, printer, lang in zip(all_exprs, printers, langs):
res = f"{printer.print_source(source)} == {printer.doprint(expr)}"
if lang == "verbose_python":
if (s0 := self.source_to_var.get(srcname)) is not None:
if source != self.var_to_sources[s0][0]:
res = (
f"{res} # duck sizing added this equality because these "
f"variables had the same size {self.var_to_val[s0]} "
"(to avoid this specialization, set torch.fx.experimental._config.use_duck_shape = False)"
)
elif (sloc := self.replacements_slocs.get(s0)) is not None:
res = f"{res} # {sloc}"
else:
res = f"{res} # (unknown var {s0}, please file a bug)"
else:
res = f"{res} # (unknown source {srcname}, please file a bug)"
exprs.append(res)
if (
isinstance(source, TensorPropertySource)
and source.prop is TensorProperty.SIZE
and equalities_inputs
and len(expr.free_symbols) == 1
):
symbol = next(iter(expr.free_symbols))
if (
isinstance(expr, sympy.Symbol)
and expr in symbol_to_constraints
and not equalities_inputs.is_equal(
source, symbol_to_source[expr][0]
)
):
msg = (
f"The values of {self._debug_name(source)} = {source.name()} and "
f"{self._debug_name(symbol_to_source[expr][0])} = {symbol_to_source[expr][0].name()} "
"must always be equal."
)
record_constraint_violation(
equalities_inputs.warn_only, self._debug_name(source), msg
)
if (
not isinstance(expr, sympy.Symbol)
and symbol in symbol_to_constraints
and not equalities_inputs.is_derived(
source,
symbol_to_source[symbol][0],
lambda x: expr.xreplace({symbol: x}),
)
):
src = symbol_to_source[symbol][0]
msg = (
f"The values of {self._debug_name(source)} = {source.name()} must always be related to "
f"the values of {self._debug_name(src)} = {src.name()} by "
f"{self._debug_name(source)} = {expr.xreplace({symbol: sympy.sympify(self._debug_name(src))})}."
)
record_constraint_violation(
equalities_inputs.warn_only, self._debug_name(source), msg
)
# NB: Not necessary to report constraint violations here:
# constraints are guaranteed to be on symbols (we've already
# caught constants and non-atomic expressions), so we only
# have relational constraints, but we don't support those
# at the moment
# 2. Every guard must evaluate to True (but remember many guards
# like s0 == s1*2 because trivial due to simplification)
issued = set()
def issue_guard(guard: ShapeGuard) -> None:
expr = self.simplify(guard.expr)
# Avoid re-issuing the same guard.
if expr in issued:
return
issued.add(expr)
try:
is_trivial = False
if any(
is_dim(source)
for s in expr.free_symbols
for source in symbol_to_source[s]
):
assert self.dim_constraints is not None
is_trivial = self.dim_constraints.add(expr)
for exprs, printer, lang in zip(all_exprs, printers, langs):
guard_expr = printer.doprint(expr)
if lang == "verbose_python":
guard_expr = f"{guard_expr} # {guard.sloc}"
exprs.append(guard_expr)
self._add_target_expr(expr)
# A non-relational constraint on a single sizevar can violate
# a constraint
if not is_trivial and len(expr.free_symbols) == 1:
symbol = next(iter(expr.free_symbols))
source = symbol_to_source[symbol][0]
constraints = symbol_to_constraints[symbol]
for c in constraints:
if isinstance(c, StrictMinMaxConstraint):
var_with_range = (
self._render_range_for_constraint_violation(source, c)
)
msg = (
f"Not all values of {var_with_range} "
f"satisfy the generated guard {py_printer.doprint(expr)}."
)
record_constraint_violation(
c.warn_only, self._debug_name(source), msg
)
elif isinstance(c, RelaxedUnspecConstraint):
# This is fine, we allow guards here as long as it
# didn't constrain it to one value (we don't
# actually know this; this depends on our
# ValueRanges reasoning capability)
pass
else:
raise AssertionError(f"unrecognized constraint {c}")
except Exception:
self.log.warning("Failing guard allocated at %s", guard.sloc)
raise
# First, issue all guards.
# This removes all the checks that follow from bounds
# We could simply emit those and also the bounds 2 <= size when necessary
for guard in guards if guards is not None else self.guards:
if (
self._maybe_evaluate_static(
guard.expr, axioms=(), size_oblivious=guard.size_oblivious
)
is not None
):
continue
issue_guard(guard)
# Because there are guards that export's constraint solver can suggest good fixes for, that we may have
# deferred as runtime asserts, and that produce_guards() alone won't do anything with (e.g. divisiblity guards),
# we want to send runtime asserts to export's constraint solver too. These will still stay in the graph as asserts,
# but export's constraint solver can decide whether to do anything with them (i.e. raise an error and provide
# suggested fixes, or decide it's out of scope and leave as a runtime assert in the graph).
for ra in self.deferred_runtime_asserts.get(None, []):
if self._maybe_evaluate_static(ra.expr, axioms=()) is not None:
continue
expr = self.simplify(ra.expr)
self.dim_constraints.add(expr)
# 3. Every symbol must be within its value range (this handles 0/1
# specialization too).
for symbol, sources in symbol_to_source.items():
r = self.var_to_range.get(symbol)
if r is None:
continue
vr_sloc = self.var_to_range_sloc[symbol]
assert sources
bounds = []
rf = source_ref(sources[0])
verbose_expr = ""
if r.lower not in (-sympy.oo, -int_oo):
if any(is_dim(source) for source in sources):
self.dim_constraints.add(sympy.Ge(symbol, r.lower))
# Only print lower bound in simplified mode if it is not the
# default
if not _simplified or r.lower != self._default_value_range().lower:
bounds.append(sympy.Le(r.lower, symbol, evaluate=False))
verbose_expr = f"{r.lower} <= {rf} # {vr_sloc.lower}"
if r.upper not in (sympy.oo, int_oo):
if any(is_dim(source) for source in sources):
self.dim_constraints.add(sympy.Le(symbol, r.upper))
# nontrivial upper bound is always interesting
bounds.append(sympy.Le(symbol, r.upper, evaluate=False))
if verbose_expr:
verbose_expr = f"{r.lower} <= {rf} <= {r.upper} # {vr_sloc.lower} and {vr_sloc.upper}"
else:
verbose_expr = f"{rf} <= {r.upper} # {vr_sloc.upper}"
if bounds:
bound = sympy.And(*bounds, evaluate=False)
for exprs, printer, lang in zip(all_exprs, printers, langs):
if lang == "verbose_python":
exprs.append(verbose_expr)
else:
exprs.append(printer.doprint(bound))
# NB: verbose_exprs are done above
# Check constraints
constraints = symbol_to_constraints[symbol]
for c in constraints:
if isinstance(c, StrictMinMaxConstraint):
# TODO: With int_oo, I think this condition is a noop
# now
if not (c.vr & self._default_value_range()).issubset(r):
source = sources[0]
expr = sympy.And(
sympy.Le(r.lower, symbol), sympy.Le(symbol, r.upper)
)
guard_expr = py_printer.doprint(expr)
var_with_range = (
self._render_range_for_constraint_violation(source, c)
)
msg = f"Not all values of {var_with_range} satisfy the generated guard {guard_expr}"
record_constraint_violation(
c.warn_only,
self._debug_name(source),
msg,
)
# We NaN specialize, which means similar to 0/1 specialization we
# should assume that the float is NOT nan. This is load bearing
# if you have something like an equality guard, nan will play
# merry hell with the reasoning.
if symbol_is_type(symbol, SymT.FLOAT):
res = f"not math.isnan({py_printer.print_source(sources[0])})"
for exprs, printer, lang in zip(all_exprs, printers, langs):
if lang == "verbose_python":
exprs.append(
f"{res} # implicit guard for float input due to NaN specialization in the framework"
)
elif lang == "python":
exprs.append(res)
elif lang == "cpp":
exprs.append(f"~std::isnan({printer.print_source(sources[0])})")
else:
raise NotImplementedError(f"Unimplemented for lang: {lang}")
if constraint_violations:
warn_msgs: list[str] = []
error_msgs: list[str] = []
debug_names = set()
for warn_only, debug_name, msg_cb in constraint_violations:
if warn_only:
str_msg = f" {len(warn_msgs) + 1}. {msg_cb()}"
warn_msgs.append(str_msg)
else:
str_msg = f" - {msg_cb()}"
error_msgs.append(str_msg)
# pyrefly: ignore [bad-argument-type]
debug_names.add(debug_name)
if len(error_msgs) > 0:
debug_names_str = ", ".join(sorted(debug_names))
err = "\n".join(error_msgs)
raise ConstraintViolationError(
f"Constraints violated ({debug_names_str})! "
'For more information, run with TORCH_LOGS="+dynamic".\n'
f"{err}"
)
elif len(warn_msgs) > 0:
log.debug("%s Warning only constraints violated", len(warn_msgs))
signpost_event(
"dynamic",
"produce_guards",
{
**self.co_fields,
**self.counter,
"num_guards": len(all_exprs[0]),
"free_symbols": sum(1 for v in symbol_to_source.values() if v),
# The keys are meaningless from an aggregate perspective, so
# don't include them. Biggest first.
"symbol_guard_counts": sorted(
self.symbol_guard_counter.values(), reverse=True
),
},
)
if self._translation_validation_enabled:
from torch.fx.experimental.validator import PopulateValidator
# Add all deferred runtime assertions; these are not technically
# handled by produce_guards but we need to put them in the target
# set
for ras in self.deferred_runtime_asserts.values():
for ra in ras:
self._add_target_expr(ra.expr)
# Add value range bound guards for all symbols with no trivial bounds.
# Reason: '_maybe_evaluate_static' may eliminate guards based on the
# refined value ranges.
for sym, vr in self.var_to_range.items():
if vr.lower not in (-sympy.oo, -int_oo):
self._add_target_expr(sympy.Le(vr.lower, sym))
if vr.upper not in (sympy.oo, int_oo):
self._add_target_expr(sympy.Le(sym, vr.upper))
# Before validating, populate the input of the validator with the
# built FX graph.
with fx_traceback.preserve_node_meta():
PopulateValidator(self.graph, self.validator).run()
# Only run translation validation when we are not passing custom guards
if guards is None:
self._check_translation_validate()
helpers: list[_ShapeGuardsHelper] = []
for exprs, printer, lang in zip(all_exprs, printers, langs):
if lang == "cpp":
assert isinstance(printer, _ShapeGuardCppPrinter)
helpers.append(_CppShapeGuardsHelper(exprs, printer.source_to_symbol))
else:
helpers.append(_ShapeGuardsHelper(exprs))
return helpers
def produce_guards_expression(
self,
placeholders: Sequence[Union[SymInt, FakeTensor]],
*,
guards: Optional[list[ShapeGuard]] = None,
ignore_static: bool = True,
) -> Optional[str]:
"""
Expected to be used with evaluate_guards_expression(). Produces the guards
for the given placeholders and returns a string expression to be evaluated
by evaluate_guards_expression given concrete values for the placeholders.
"""
from torch._dynamo.source import LocalSource
arg_names = [f"t{i}" for i in range(len(placeholders))]
produced_guards = self.produce_guards(
placeholders,
[LocalSource(a) for a in arg_names],
guards=guards,
ignore_static=ignore_static,
)
if produced_guards:
return " and ".join(produced_guards)
return None
def evaluate_symexpr(self, code: str) -> Union[int, float, bool]:
"""
To be used by compile_fx to evaluate symexprs
"""
args = {str(e): val for e, val in self.var_to_val.items()}
return eval(code, SYMPY_INTERP, args)
def deserialize_symexpr(self, code: str) -> Union[SymInt, SymFloat, SymBool]:
"""
To be used by compile_fx to deserialize symexprs
"""
args = {
str(e): SymInt(SymNode(e, self, int, int(val), fx_node=None))
for e, val in self.var_to_val.items()
}
return eval(code, SYMPY_INTERP, args)
def evaluate_guards_expression(self, code: str, args: Sequence[object]) -> bool:
"""
Expected to be used with produce_guards_expression(). Evaluates an expression
generated by produce_guards_expression for the given concrete args.
"""
arg_names = [f"t{i}" for i in range(len(args))]
return eval(code, SYMPY_INTERP, {"L": dict(zip(arg_names, args))})
def evaluate_guards_for_args(
self,
placeholders: Sequence[FakeTensor],
args: Sequence[Tensor],
*,
ignore_static: bool = True,
) -> bool:
"""Generate guards for a graph's placeholder values and evaluate the guards with args"""
code = self.produce_guards_expression(placeholders, ignore_static=ignore_static)
if code:
return self.evaluate_guards_expression(code, args)
return True
def get_pruned_guards(self, symints: Sequence[torch.SymInt]) -> list[ShapeGuard]:
"""
Get a list of guards, but pruned so it only provides guards that
reference symints from the passed in input
"""
# pyrefly: ignore [bad-assignment]
symints = {
s.node.expr for s in symints if isinstance(s.node.expr, sympy.Symbol)
}
guards = [
g for g in self.guards if all(s in symints for s in g.expr.free_symbols)
]
return guards
def bind_symbols(
self, placeholders: Sequence[FakeTensor], args: Sequence[Tensor]
) -> dict[sympy.Symbol, int]:
"""
Given a paired list of placeholders (fake tensors with
symbolic sizes) and concrete arguments (regular tensors
with real sizes), returns a dictionary mapping each
symbol to its real value. So for example, if you
have a placeholder with size (s0, s1), binding
(2, 4) to it will give you {s0: 2, s1: 4}. This is
not guaranteed to bind ALL symbols in the ShapeEnv;
we can't bind a symbol if it doesn't occur in any placeholder,
and symbols that already have replacements won't get bindings.
This is a little duplicative with evaluate_guards but
it's different enough that it seemed cleanest to make
another copy. This assumes the guards are already checked,
though if it's cheap we'll check for shenanigans
"""
bindings: dict[sympy.Symbol, int] = {}
def bind_symint(arg: object, val: object) -> None:
if isinstance(val, SymInt):
assert isinstance(arg, int)
s = val.node.expr
if isinstance(s, sympy.Symbol):
if s in bindings:
assert bindings[s] == arg, f"{bindings[s]} != {arg}"
else:
bindings[s] = arg
elif isinstance(-s, sympy.Symbol):
if -s in bindings:
assert bindings[-s] == -arg, f"{bindings[-s]} != {-arg}"
else:
bindings[-s] = -arg
for t, arg in zip(placeholders, args):
if t is None:
continue
if isinstance(t, SymInt):
bind_symint(arg, t)
continue
assert isinstance(t, torch.Tensor)
for i, s in enumerate(t.size()):
bind_symint(arg.size(i), s)
for i, s in enumerate(t.stride()):
bind_symint(arg.stride(i), s)
bind_symint(arg.storage_offset(), t.storage_offset())
return bindings
def get_nontrivial_guards(self) -> list[SympyBoolean]:
"""Returns a list of guard expressions that aren't statically known (i.e. not trivial)"""
return [
self.simplify(guard.expr)
for guard in self.guards
if self._maybe_evaluate_static(
guard.expr, axioms=(), size_oblivious=guard.size_oblivious
)
is None
]
def format_guards(self, verbose: bool = False) -> str:
"""Format this shape env's guard expressions with optional traceback info if verbose"""
return "\n".join(
f" - {guard.expr}{' ' + str(guard.sloc) if verbose else ''}"
for guard in self.guards
)
def bound_sympy(
self, expr: sympy.Expr, size_oblivious: bool = False
) -> ValueRanges:
"""Given a sympy expression, computes a ValueRanges bound for what values it can be"""
# TODO: maybe it's guaranteed x in is var_to_range?
var_to_range = {x: self.var_to_range.get(x, None) for x in expr.free_symbols}
if size_oblivious:
# Clamp values of size-like variables
# NB: discarding the old upper bound in intentional, per
# https://github.com/pytorch/pytorch/pull/123675
for x in self.size_like & var_to_range.keys():
if var_to_range[x] is not None:
# NB: do NOT set upper to 2 ** 48, we're using this solely
# to determine if we can do size-like replacement, the
# upper bound is irrelevant here
var_to_range[x] = ValueRanges(2, int_oo)
return bound_sympy(expr, var_to_range) # type: ignore[arg-type]
@_lru_cache
def get_axioms(
self,
symbols: Optional[tuple[sympy.Symbol]] = None,
compute_hint: bool = False,
) -> tuple[SympyBoolean, ...]:
"""
Given the symbols in an expression, it returns all the runtime asserts that have those symbols
concatenated with all the guards.
If symbols is None, it returns all the runtime asserts (and all the guards)
"""
if symbols is None:
runtime_asserts = (
r.expr for rs in self.deferred_runtime_asserts.values() for r in rs
)
else:
runtime_asserts = (
r.expr
for s in symbols
if s not in self.var_to_val
for r in self.deferred_runtime_asserts.get(s, ())
)
guards: Iterator[SympyBoolean] = (g.expr for g in self.guards)
axioms: Iterator[SympyBoolean] = itertools.chain(guards, runtime_asserts)
if compute_hint:
axioms = (
canonicalize_bool_expr(a.xreplace(self.var_to_val)) for a in axioms
)
return tuple(dict.fromkeys(axioms).keys())
@lru_cache(None)
def get_implications(
self, e: SympyBoolean
) -> tuple[tuple[SympyBoolean, sympy.logic.boolalg.BooleanAtom], ...]:
"""Given a expression, it returns a list of predicates that follow from it"""
equiv: dict[SympyBoolean, sympy.logic.boolalg.BooleanAtom] = {}
def add_expr(expr: SympyBoolean) -> None:
expr = canonicalize_bool_expr(expr)
if isinstance(expr, (sympy.Eq, sympy.Ne)):
# No need to canonicalize
# TODO We could further canonicalize Eq ordering the lhs and rhs somehow
# With this, we could remove the need for the commutativity part
opposite = sympy.Eq if isinstance(expr, sympy.Ne) else sympy.Ne
# Commutativity of == and !=
equiv[type(expr)(expr.lhs, expr.rhs, evaluate=False)] = sympy.true
equiv[type(expr)(expr.rhs, expr.lhs, evaluate=False)] = sympy.true
equiv[opposite(expr.lhs, expr.rhs, evaluate=False)] = sympy.false
equiv[opposite(expr.rhs, expr.lhs, evaluate=False)] = sympy.false
else:
# Expr and negation
equiv[expr] = sympy.true
# we do not pass evaluate=False like others on purpose here!
# we want not(a<b) to be a>=b and not ~(a<b).
equiv[canonicalize_bool_expr(sympy.Not(expr))] = sympy.false
add_expr(e)
# Other relational expressions this expression implies
if isinstance(e, sympy.Eq):
add_expr(sympy.Le(e.lhs, e.rhs, evaluate=False))
add_expr(sympy.Ge(e.lhs, e.rhs, evaluate=False))
elif isinstance(e, sympy.Lt):
add_expr(sympy.Le(e.lhs, e.rhs, evaluate=False))
add_expr(sympy.Ne(e.lhs, e.rhs, evaluate=False))
if e.lhs.is_integer and e.rhs.is_integer: # type: ignore[attr-defined]
add_expr(sympy.Le(e.lhs, e.rhs - 1, evaluate=False))
elif isinstance(e, sympy.Le):
add_expr(sympy.Lt(e.lhs, e.rhs + 1, evaluate=False))
return tuple(equiv.items())
@_lru_cache
def _maybe_evaluate_static(
self,
expr: sympy.Basic,
*,
unbacked_only: bool = False,
compute_hint: bool = False,
size_oblivious: bool = False,
axioms: Optional[tuple[SympyBoolean]] = None,
var_to_range: Optional[tuple[tuple[sympy.Symbol, ValueRanges]]] = None,
) -> Optional[sympy.Basic]:
"""
Tries to evaluate expr without introducing guards
If unbacked_only == True, then we only do substitutions on
unbacked SymInts (leaving regular hinted integers alone). This could
result in an expression that still contains backed SymInts, which you
could then potentially guard on.
Use compute_hint == True if you are trying to compute a non-binding
hint for the particular hint values of backed and unbacked SymInts,
e.g., if s0 happens to be 3 this run, compute_hint will substitute s0 with 3.
"""
# axioms with compute hint NYE
assert not compute_hint or not axioms
expr = self.simplify(expr, size_oblivious)
if compute_hint:
expr = expr.xreplace(self.var_to_val).xreplace(self.unbacked_var_to_val)
expr = canonicalize_bool_expr(expr)
def resimplify_floor_div(axioms: dict[sympy.Expr, sympy.Expr]) -> None:
if not self._resimplify_floor_div_axioms:
return
self._resimplify_floor_div_axioms = False
new_items = {}
for k, v in list(axioms.items()):
# A FloorDiv in implications could have became CleanDiv at this point, due to new facts
# to the shapeEnv. This handles such issue but its not ideal. This is the only expression
# simplification that depends on the global state of shape env.
# TODO try to get rid of CleanDiv since it breaks the invariant that's simplifications of sympy
# expressions only depend on the expression itself.
if k.has(FloorDiv):
new_items.update({self.simplify(k): v})
axioms.update(new_items)
# Pattern matching
if axioms is None:
resimplify_floor_div(self.axioms)
subst = self.axioms
else:
subst = {}
for e in axioms:
if e.free_symbols.issubset(expr.free_symbols):
subst.update(dict(self.get_implications(self.simplify(e))))
resimplify_floor_div(subst)
expr = expr.xreplace(subst)
# TODO: compute hint might have gotten broken here
fs = expr.free_symbols
if not fs and (expr.is_number or expr.is_Boolean):
return expr
if var_to_range is None:
var_ranges = self.var_to_range
else:
var_ranges = dict(var_to_range)
symbol_info = tuple(
_SymbolInfo(
s,
var_ranges.get(s),
self.var_to_val.get(s),
s in self.size_like,
)
for s in sorted(fs, key=str) # TODO: speed up sort?
)
r = _maybe_evaluate_static_worker(
expr, symbol_info, unbacked_only, size_oblivious
)
return r
@_lru_cache
def replace(self, expr: _SympyT) -> _SympyT:
"""
Apply symbol replacements to any symbols in the given expression.
"""
replacements = {}
# pyrefly: ignore [missing-attribute]
for s in expr.free_symbols:
r = self._find(s)
# Micro-optimization: only do replacements if r and s are different
# Otherwise, xreplace is not a no-op and will trigger expensive
# assumption queries if expr has a relational node.
if not r.is_Symbol or r != s:
replacements[s] = r
if replacements:
# pyrefly: ignore [missing-attribute]
return safe_expand(expr.xreplace(replacements))
else:
return expr
@_lru_cache
def _update_divisible(self) -> None:
new_divisible = set()
for k in self.divisible:
res = self.replace(k)
if not res.is_number:
new_divisible.add(k)
self.divisible = new_divisible
self._update_version_counter()
@_lru_cache
def simplify(self, expr: _SympyT, size_oblivious: bool = False) -> _SympyT:
"""Use known constraints and replacements to simplify the given expr"""
expr = safe_expand(expr)
expr = self.replace(expr)
# Simplify max(0/1, x) to x when x >= 0/1. max(1, x) is a commonly introduced
# expression when creating contiguous strides.
if not size_oblivious:
min_max_replacements = {}
for atom in expr.atoms(Max): # type: ignore[has-type]
if len(atom.args) > 2:
continue
a, b = atom.args
if b == 1 or b == 0:
a, b = b, a
if a == 1 and self._maybe_evaluate_static(sympy.Ge(b, 1)):
min_max_replacements[atom] = b
if a == 0 and self._maybe_evaluate_static(sympy.Ge(b, 0)):
min_max_replacements[atom] = b
if min_max_replacements:
expr = expr.xreplace(min_max_replacements)
if expr.has(TruncToInt):
trunc_replacements = {}
for atom in expr.atoms(TruncToInt):
if isinstance(atom.args[0], IntTrueDiv):
base, divisor = atom.args[0].args
if base % divisor == 0:
trunc_replacements[atom] = CleanDiv(base, divisor)
else:
# TruncToInt(IntTrueDiv(a,b)) == FloorDiv(a, b)
trunc_replacements[atom] = FloorDiv(base, divisor)
if trunc_replacements:
expr = expr.xreplace(trunc_replacements)
# TODO it would seem that this pass is not necessary given the
# below replacement of // with /, but for nested FloorDivs
# the non-recursive replacement doesn't work, and
# recursive makes it hard to look up divisibility,
# because existing divisibility info has FloorDiv in it, not /
# for now just do a separate pass to catch common nested case
if expr.has(FloorDiv):
self._update_divisible()
div_replacements = {}
for atom in expr.atoms(FloorDiv):
base, divisor = atom.args
if isinstance(divisor, FloorDiv):
base1, divisor1 = divisor.args
if (
self.replace(Mod(base, divisor)) in self.divisible
and base == base1
and self.replace(Mod(base1, divisor1)) in self.divisible
):
div_replacements[atom] = divisor1
if div_replacements:
expr = expr.xreplace(div_replacements)
expr = safe_expand(expr)
if expr.has(FloorDiv):
div_replacements = {}
pows = expr.atoms(sympy.Pow)
rationals = expr.atoms(sympy.Rational).difference(expr.atoms(sympy.Integer))
for fd in expr.atoms(FloorDiv):
base, divisor = fd.args
if self.replace(Mod(base, divisor)) in self.divisible:
div_replacements[fd] = CleanDiv(base, divisor)
if div_replacements:
new_expr = expr.xreplace(div_replacements)
new_expr = safe_expand(new_expr)
new_pows = new_expr.atoms(sympy.Pow)
new_rationals = new_expr.atoms(sympy.Rational).difference(
new_expr.atoms(sympy.Integer)
)
# divisions simplified away
if new_pows.issubset(pows) and new_rationals.issubset(rationals):
expr = new_expr
return expr
# TODO: overload for allow_none literal
@lru_cache(256)
def size_hint(
self, expr: sympy.Basic, *, allow_none: bool = False
) -> Optional[sympy.Basic]:
"""
Gets a size hint for a given expression from the underlying shapes we had.
Does not introduce a guard, so only use this when you can guarantee that
your code is still valid for arbitrary shapes (such as optimization decisions)
"""
result_expr = safe_expand(expr).xreplace(self.var_to_val)
if not result_expr.is_number:
from torch.utils._sympy.singleton_int import SingletonInt
if isinstance(result_expr, SingletonInt):
return None
r = self._maybe_evaluate_static(result_expr, compute_hint=True)
if r is not None:
return r
if allow_none:
return None
if self.oblivious_var_to_val:
# See https://github.com/pytorch/pytorch/issues/137100#issuecomment-2495778113
correct_hint = result_expr.xreplace(self.oblivious_var_to_val)
counterfactual_hint = result_expr.xreplace(
{k: max(v, 2) for k, v in self.oblivious_var_to_val.items()}
)
if (
not correct_hint.free_symbols
and not counterfactual_hint.free_symbols
):
if correct_hint == counterfactual_hint:
log.info("oblivious_size hit %s -> %s", expr, correct_hint)
return correct_hint
else:
log.info(
"oblivious_size counterfactual failed %s -> %s != %s",
expr,
correct_hint,
counterfactual_hint,
)
else:
log.info(
"oblivious_size miss %s -> %s (counterfactual: %s)",
expr,
correct_hint,
counterfactual_hint,
)
if self.unbacked_var_to_val:
unsound_expr = result_expr.xreplace(self.unbacked_var_to_val)
if not unsound_expr.free_symbols:
log.warning(
"propagate_real_tensors size_hint(%s) -> %s", expr, unsound_expr
)
trace_structured(
"propagate_real_tensors",
metadata_fn=lambda: {
"expr": repr(expr),
"result": repr(unsound_expr),
"stack": structured.from_traceback(
CapturedTraceback.extract(skip=1).summary()
),
},
)
self.guard_or_defer_runtime_assert(
sympy.Eq(result_expr, unsound_expr),
f"propagate_real_tensors: {result_expr} == {unsound_expr}",
)
return unsound_expr
raise self._make_data_dependent_error(result_expr, expr)
return result_expr
# NB: keep in sync with size_hint
@lru_cache(256)
def has_hint(self, expr: sympy.Expr) -> bool:
result_expr = safe_expand(expr).xreplace(self.var_to_val)
return (
result_expr.is_number
or self._maybe_evaluate_static(result_expr) is not None
)
def _make_data_dependent_error(
self,
expr: sympy.Basic,
unhinted_expr: sympy.Basic,
*,
expr_sym_node_id: Optional[int] = None,
) -> GuardOnDataDependentSymNode:
# TODO: in a Dynamo context, having user code, and having the
# name of the local, will be much better
size_like_symbols = []
for s in expr.free_symbols:
stacktrace = "".join(self.var_to_stack[s].format())
self.log.debug(
"Data dependent variable '%s' allocated at:\n%s", s, stacktrace
)
if s in self.size_like:
size_like_symbols.append(s)
size_oblivious_result_msg = ""
sloc, maybe_extra_debug = self._get_stack_summary(True)
if expr.is_integer: # type: ignore[attr-defined]
desc = (
"Could not extract specialized integer from data-dependent expression"
)
else:
desc = "Could not guard on data-dependent expression"
size_oblivious_result_msg = (
"consider using data-dependent friendly APIs such as "
"guard_or_false, guard_or_true and statically_known_true."
)
msg = (
f"{desc} {expr} (unhinted: {unhinted_expr}). "
f"(Size-like symbols: {', '.join(map(str, size_like_symbols)) or 'none'})\n\n"
f"{size_oblivious_result_msg}\n"
f"Caused by: {sloc}\n"
'For more information, run with TORCH_LOGS="dynamic"\n'
"For extended logs when we create symbols, also add "
f'TORCHDYNAMO_EXTENDED_DEBUG_CREATE_SYMBOL="{",".join(map(str, expr.free_symbols))}"\n'
"If you suspect the guard was triggered from C++, add TORCHDYNAMO_EXTENDED_DEBUG_CPP=1\n"
"For more debugging help, see "
"https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit?usp=sharing\n"
+ maybe_extra_debug
# TODO: Help text about how to use our runtime tests to fix this
# problem
)
dtrace_structured(
"guard_on_data_dependent_error",
metadata_fn=lambda: {
"expr": repr(expr),
"unhinted_expr": repr(unhinted_expr),
"expr_id": self._expr_sym_node_id,
"stack": structured.from_traceback(
CapturedTraceback.extract(skip=1).summary()
),
},
)
return GuardOnDataDependentSymNode(expr, msg)
def _update_var_to_range(
self,
symbol: sympy.Symbol,
vr: ValueRanges,
vr_sloc: Optional[ValueRangesSLoc] = None,
*,
is_constraint: bool = False,
) -> None:
lower, upper = vr.lower, vr.upper
# If we have a size-like unbacked SymInt, refuse to refine the range to be
# less than two. This is because when we intersect this range
# with [2, inf] for size oblivious tests, the range would be
# unsatisfiable. In other words, once you have a size-like
# unbacked SymInt, we can never learn that it is exactly zero or one,
# because we would now give inconsistent results for all size
# oblivous tests!
if upper < 2 and symbol in self.size_like:
vr = ValueRanges(lower, 2)
# Updates the range and the guards corresponding to each bound of the symbol.
if symbol not in self.var_to_range:
self.log.debug("_update_var_to_range %s = %s (new)", symbol, vr)
self.var_to_range[symbol] = vr
if vr_sloc is None:
sloc = self._get_sloc()
vr_sloc = ValueRangesSLoc(sloc, sloc)
self.var_to_range_sloc[symbol] = vr_sloc
else:
old = self.var_to_range[symbol]
new = old & vr
if new != old:
if vr_sloc is None:
sloc = self._get_sloc()
vr_sloc = ValueRangesSLoc(sloc, sloc)
if new.lower != old.lower:
self.var_to_range_sloc[symbol].lower = vr_sloc.lower
if new.upper != old.upper:
self.var_to_range_sloc[symbol].upper = vr_sloc.upper
self.var_to_range[symbol] = new
self.log.debug("_update_var_to_range %s = %s (update)", symbol, new)
if (v := self.var_to_val.get(symbol)) is not None:
r = self.var_to_range[symbol]
if v not in r:
# For constraint failure, delay this for later
# TODO: Rework all of this, the constraint logic is very
# duplicative with regular reasoning
if not is_constraint:
assert v in r, f"{v} not in {r}"
def _set_replacement(self, a: sympy.Symbol, tgt: sympy.Expr, msg: str) -> None:
"""
Adds or updates a replacement for a symbol.
Use this instead of `self.replacements[a] = tgt`.
"""
if tgt == self.replacements.get(a, None):
return
if a in tgt.free_symbols:
return
# Precondition: a == tgt
assert isinstance(a, sympy.Symbol)
if (
self.prefer_deferred_runtime_asserts_over_guards
and not _is_supported_equivalence(tgt)
):
return # continuing leads to placeholder shapes having complex expressions that we can't resolve
# Handles nested tensor symbolic variables which don't have
# var_to_range bounds
tgt_bound = None
if a in self.var_to_range:
src_bound = self.var_to_range[a]
# First, refine the value range of a based on the computed value range
# of tgt. This is always OK to do, even if we decide not to do the
# substitution in the end. This might be a no-op, if a already has
# a tighter bound
tgt_bound = self.bound_sympy(tgt)
self._update_var_to_range(a, tgt_bound)
# Next, check if we can update the range of free symbols in tgt
# based on the range in a. But only do it if:
# - the source bound non-trivially improves over what we get out of
# the existing bounds.
# - the replacement is univariate and we can invert the tgt expression
if not tgt_bound.issubset(src_bound) and len(tgt.free_symbols) == 1:
b = next(iter(tgt.free_symbols))
# Try to invert the equality
r = try_solve(sympy.Eq(a, tgt), b, floordiv_inequality=False)
if r is not None:
self.log.debug(
"set_replacement: solve for %s in %s == %s gives %s",
b,
a,
tgt,
r,
)
# The solution here can be non-integral, for example, if
# we have s0 = 2*s1, then s1 = s0/2. What we would like
# to do is calculated the bounds in arbitrary precision,
# and then requantize the bound to integers when we are
# done.
rat_b_bound = self.bound_sympy(r[1])
b_bound = ValueRanges(
CeilToInt(rat_b_bound.lower), FloorToInt(rat_b_bound.upper)
)
self._update_var_to_range(b, b_bound, self.var_to_range_sloc[a])
tgt_bound = self.bound_sympy(tgt)
assert tgt_bound.issubset(src_bound), (
f"{tgt_bound=} not a subset of {src_bound=}"
)
# TODO: Should we propagate size-like-ness?
#
# Pros: if u0 is size-like, intuitively u0 == u1 should cause u1
# to become size-like.
#
# Cons: if u0 is size-like, what about u0 - 1 == u1? You CAN'T
# propagate in this case, because what if u0 == 0, then u1 is negative
# and clearly isn't a size. So, at minimum, any f(x) whose value
# range isn't [0, inf] given x in [0, inf] cannot propagate
# size-like-ness. But there are many situations where you could
# imagine u1 is going to be size-like and actually you just didn't
# have a refined enough value range on u0. Since even innocuous
# looking arithmetic operations can destroy size-like-ness, it's
# best to not propagate it at all and force the user to annotate it
# as necessary.
#
# Compromise: we preserve size-like-ness only for exact equality
# and nothing else.
if a in self.size_like and isinstance(tgt, sympy.Symbol):
self.size_like.add(tgt)
elif isinstance(tgt, sympy.Symbol) and tgt in self.size_like:
self.size_like.add(a)
# Now, decide if we will do the substitution.
#
# - If the source has a non-trivial range, only substitute if
# we preserve this range. Note that we may have propagated
# the src_range to free variables in tgt when tgt is univariate
# and we could find an inverse, which helps us achieve this.
# This ensures we never "forget" about user defined ranges,
# even if they end up being defined on composite formulas
# like s0 + s1.
#
# - If the variable is unbacked, only substitute if the substitution
# would preserve the bounds also under size-like-ness conditions.
if not tgt_bound.issubset(src_bound):
self.log.debug(
"skipped set_replacement %s = %s (%s) [%s not subset of %s]",
a,
tgt,
msg,
tgt_bound,
src_bound,
)
return
elif a in self.size_like:
tgt_bound_so = self.bound_sympy(tgt, size_oblivious=True)
src_bound_so = self.bound_sympy(a, size_oblivious=True)
if not tgt_bound_so.issubset(src_bound_so):
self.log.debug(
"skipped set_replacement %s = %s (%s) "
"[%s not subset of %s (size-oblivious conditions)]",
a,
tgt,
msg,
tgt_bound_so,
src_bound_so,
)
return
if isinstance(tgt, (sympy.Integer, sympy.Float)):
# specializing to a constant, which is likely unexpected (unless
# you specified dynamic=True)
user_tb = TracingContext.extract_stack()
trace_structured(
"symbolic_shape_specialization",
metadata_fn=lambda: {
"symbol": repr(a),
"sources": [s.name() for s in self.var_to_sources.get(a, [])],
"value": repr(tgt),
"reason": msg,
"stack": structured.from_traceback(
CapturedTraceback.extract(skip=1).summary()
),
"user_stack": (
structured.from_traceback(user_tb) if user_tb else None
),
},
)
for source in self.var_to_sources.get(a, []):
if user_tb:
self.specialization_stacks[source] = user_tb
if config.print_specializations:
self.log.warning(
"Specializing %s to %s", self.var_to_sources[a][0].name(), tgt
)
self.log.debug("SPECIALIZATION", stack_info=True)
log.info("set_replacement %s = %s (%s) %s", a, tgt, msg, tgt_bound)
self.replacements[a] = tgt
# NB: the replacement may get refined, but the user will find the
# FIRST one most useful (TODO: Maybe we could consider tracking all of
# them)
if a not in self.replacements_slocs:
self.replacements_slocs[a] = self._get_sloc()
self._update_version_counter()
# When specializing 'a == tgt', the equality should be also conveyed to
# Z3, in case an expression uses 'a'.
self._add_target_expr(sympy.Eq(a, tgt, evaluate=False))
def _add_divisible(self, expr: sympy.Expr) -> None:
self.divisible.add(expr)
self._update_version_counter()
@_lru_cache
@record_shapeenv_event()
def _find(self, a: sympy.Symbol) -> sympy.Expr:
"""
Implements a DSU-like algorithm to find the variable that represents a
Also handles transitive non-identity replacements.
a: b + c
c: d
"""
if a not in self.replacements:
return a
res = self.replacements[a]
cur_replace = {s: self._find(s) for s in res.free_symbols}
replaced, changed = self.replacements[a]._xreplace(cur_replace)
if changed:
self._set_replacement(a, replaced, "find")
return self.replacements[a]
@lru_cache(256)
def _maybe_guard_rel(self, expr: sympy.Expr) -> None:
"""
The relational guard is guarded to be true. Use this information to
simplify shapes (i.e. a == b or a % 5 == 0)
"""
if isinstance(expr, sympy.And):
for arg in expr.args:
self._maybe_guard_rel(arg)
return
elif not isinstance(expr, sympy.Rel):
return
# A good example of what goes wrong if you don't do this is
# python test/functorch/test_aotdispatch.py -k
# test_aot_autograd_symbolic_module_exhaustive_nn_LazyConv3d_cpu_float32
if isinstance(expr, sympy.Ne):
return
free = list(expr.free_symbols)
assert len(free) > 0, (
f"The expression should not be static by this point: {expr}"
)
# In case of really gnarly expression, we don't blow up
if len(free) > 5:
return
# Prioritize unbacked symints for solving by ordering them last.
# Prefer to simplify out lexicographically higher symbols (i.e. simplify out s4 over s3).
# (NB: this unfortunately isn't strictly equivalent to simplifying out newer symbols)
# Prefer to simplify out symbols with ephemeral sources.
def _smart_symbol_sort(x: sympy.Symbol) -> tuple[int, int, str]:
has_only_ephemeral_sources = x in self.var_to_sources and all(
s.is_ephemeral() for s in self.var_to_sources[x]
)
# NB: size_hint is int, not sympy.Expr, do not use int_oo here
hint_size = self.size_hint(x, allow_none=True)
if hint_size is None:
size = sys.maxsize
elif symbol_is_type(x, SymT.SIZE):
assert isinstance(hint_size, sympy.Expr)
size = int(hint_size)
else:
size = sys.maxsize
name = x.name
# 1 puts ephemeral sourced symbols first when sorting in reverse
return (1 if has_only_ephemeral_sources else 0, size, name)
free = sorted(free, key=_smart_symbol_sort, reverse=True) # type: ignore[attr-defined]
lhs = expr.lhs
rhs = expr.rhs
self._refine_ranges(expr)
# The rest of this stuff is for equality only
if not isinstance(expr, sympy.Eq):
return
if not expr.has(Mod):
try:
floor_div_atoms = lhs.atoms(FloorDiv).union(rhs.atoms(FloorDiv))
if len(floor_div_atoms) > 0 and any(
a.divisor != 1 for a in floor_div_atoms
):
raise NotImplementedError
# Never replace unbacked symbols with other unbacked symbols that are
# not function arguments. (ex:mark_unbacked symbols are fine to replace
# other unbacked, but not those coming from .item() calls).
# This is error prone because you can cause references to
# unbacked symbols to time travel backwards. E.g.,
#
# u1 = x.item()
# ... use of u1 ...
# u2 = y.item()
# u3 = z.item()
# torch._check(u1 == u2 + u3)
#
# If you replace u1 with u2 + u3, then the use of u1 now
# references u2 and u3 prior to them actually being bound at
# runtime. It's pretty inconvenient to setup control
# dependencies for substitutions, so ban it entirely.
def trivial_solve(lhs: sympy.Expr, rhs: sympy.Expr) -> bool:
if isinstance(lhs, sympy.Symbol):
if free_unbacked_symbols(
lhs
) and not _free_non_source_unbacked_symbols(
rhs, self.unbacked_inputs
):
return True
if symbol_is_type(lhs, SymT.FLOAT):
return True
# TODO: Maybe trivial solutions for int should also be
# done?
return False
# short-circuit when no solving is needed
if trivial_solve(lhs, rhs):
self._set_replacement(lhs, self._find(rhs), "trivial_lhs")
elif trivial_solve(rhs, lhs):
self._set_replacement(rhs, self._find(lhs), "trivial_rhs")
else:
r = try_solve(expr, free[0], floordiv_inequality=False)
if r is not None and all(
t.is_integer for t in sympy.preorder_traversal(r[1])
):
new_var = self._find(r[1])
ok = len(free_unbacked_symbols(new_var)) == 0
if ok:
self._set_replacement(free[0], new_var, "solve")
except NotImplementedError:
pass
else:
# expression has mod.
mod_expr = next(iter(expr.atoms(Mod)))
try:
r = try_solve(expr, mod_expr, floordiv_inequality=False)
if r is not None and r[1] == 0:
self._add_divisible(mod_expr)
except NotImplementedError:
pass
return
# See: Note - On 0/1 specialization
def _default_value_range(
self, do_not_specialize_zero_one: bool = False
) -> ValueRanges:
lower = 0 if (do_not_specialize_zero_one or not self.specialize_zero_one) else 2
return ValueRanges(lower, int_oo)
def _default_unspecified_value_range(self) -> ValueRanges:
return ValueRanges.unknown_int()
@_lru_cache
def _simplify_floor_div(self, expr: sympy.Expr) -> sympy.Expr:
floor_divs = tuple(expr.atoms(FloorDiv))
# we expect floor_divs to be exact,
# and thus add the guards for the exact floordivs,
# even if tracing doesn't require them otherwise
for fd in reversed(floor_divs):
base, divisor = fd.args
mod_expr = Mod(base, divisor)
eq_expr = sympy.Eq(mod_expr, 0)
# add necessary mod guards
self.evaluate_expr(eq_expr)
return self.simplify(expr)
# We're about to add a guard/runtime assert, check if the ShapeEnv is frozen
# and if so issue a warning
def _check_frozen(self, expr: sympy.Basic, concrete_val: sympy.Basic) -> None:
if self.frozen:
self.counter["ignored_backward_guard"] += 1
signpost_event(
"dynamic",
"evaluate_expr_frozen",
{
**self.co_fields,
"ignored_guard": f"{expr} == {concrete_val}",
# no version = original state (this signpost is expected)
# version 2 = dynamic backwards is eagerly compiled
"version": 2,
},
)
log.info(
"Ignored guard %s == %s, this could result in accuracy problems",
expr,
concrete_val,
# only print stack trace when debug mode is on (e.g. TORCH_LOGS="dynamic")
stack_info=log.getEffectiveLevel() < logging.WARNING,
)
def _get_user_frame(self) -> Optional[types.FrameType]:
frame = inspect.currentframe()
while frame is not None:
if frame.f_code.co_filename not in uninteresting_files():
return frame
frame = frame.f_back
return frame
def _get_stack_summary(
self, is_debug: bool = False, framework_loc: Optional[str] = None
) -> tuple[SLoc, str]:
floc: Optional[Union[str, traceback.FrameSummary]] = framework_loc
if floc is None:
frame = self._get_user_frame()
try:
if frame is not None:
floc = traceback.FrameSummary(
frame.f_code.co_filename,
frame.f_lineno,
frame.f_code.co_name,
)
finally:
del frame
# NB: this stack is truncated, but it's fine because the main
# stack_info will give you the rest of the info you need
maybe_user_loc = None
user_tb = TracingContext.extract_stack()
if user_tb:
idx = len(user_tb) - 1
while idx > 0 and user_tb[idx].filename in uninteresting_files():
idx -= 1
maybe_user_loc = format_frame(user_tb[idx], line=True)
maybe_extra_debug = ""
if is_debug and user_tb:
maybe_extra_debug = (
"\nUser Stack (most recent call last):\n"
+ " (snipped, see stack below for prefix)\n"
+ "".join(traceback.format_list(user_tb))
)
if is_debug and config.extended_debug_cpp:
cpp_stack = CapturedTraceback.extract(cpp=True)
maybe_extra_debug += "\nC++ stack trace:\n" + "".join(cpp_stack.format())
elif is_debug:
maybe_extra_debug += (
"\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1"
)
return SLoc(floc, maybe_user_loc), maybe_extra_debug
# Pass in framework_loc to override the framework location info
def _get_sloc(self, framework_loc: Optional[str] = None) -> SLoc:
sloc, _ = self._get_stack_summary(framework_loc=framework_loc)
return sloc
def _generate_unique_id(self, source_name: str) -> int:
attempt = int(hashlib.sha256(source_name.encode()).hexdigest(), 16) % 100
while attempt in self.unique_ids:
attempt += 1
self.unique_ids.add(attempt)
return attempt
def _find_frame_locals(self) -> _FrameLocalResult:
"""
Given the current user code frame, finds the relevant lines of code,
values of symbolic locals, and free symbols involved.
"""
frame_locals: dict[str, Any] = {}
frame_symbols: dict[str, str] = {}
if (
frame := _find_user_code_frame()
) is None or frame.f_code.co_filename == "<string>":
return _FrameLocalResult()
# find bytecode instructions relevant to the frame
instructions = list(dis.Bytecode(frame.f_code))
co_lines, offset = inspect.getsourcelines(frame.f_code)
start, end, cur = None, None, None
# pyrefly: ignore [bad-assignment]
for i, instr in enumerate(instructions):
if instr.starts_line is not None:
cur = instr.starts_line
if cur != frame.f_lineno:
continue
if start is None:
start = end = i
else:
end = i
if start is None or end is None: # no instructions found
return _FrameLocalResult()
# track involved locals and free symbols
def go(x: Any) -> Optional[str]:
if isinstance(x, torch.Tensor):
for y in x.size():
go(y)
for y in x.stride():
go(y)
go(x.storage_offset())
return (
f"Tensor(shape: {x.size()}, "
f"stride: {x.stride()}, "
f"storage_offset: {x.storage_offset()})"
)
elif isinstance(x, (SymBool, SymInt, SymFloat)):
for s in x.node.expr.free_symbols:
if str(s) in frame_symbols: # type: ignore[operator]
continue
if s in self.var_to_sources:
frame_symbols[str(s)] = self.var_to_sources[s][0].name() # type: ignore[assignment]
return str(x)
return None
# go through instructions, seeing linenos & involved locals
last_lineno = frame.f_lineno
for instr in instructions[start : end + 1]:
if (lineno := instr.starts_line) is not None:
last_lineno = max(last_lineno, lineno)
if isinstance(instr.argval, str) and instr.argval in frame.f_locals:
flat_locals = pytree.tree_flatten(frame.f_locals[instr.argval])[0]
frame_locals[instr.argval] = [
go(flat_local) for flat_local in flat_locals
]
# store LOC
locs = co_lines[frame.f_lineno - offset : last_lineno + 1 - offset]
if not locs:
return _FrameLocalResult()
indent = len(locs[0]) - len(locs[0].lstrip())
frame_loc = "".join([loc[indent:] for loc in locs]).strip() # type: ignore[assignment]
return _FrameLocalResult(
loc=frame_loc, locals=frame_locals, symbols=frame_symbols
)
def _log_guard(self, prefix: str, g: SympyBoolean, forcing_spec: bool) -> None:
dtrace_structured(
"guard_added",
metadata_fn=lambda: {
"expr": str(g),
"prefix": prefix,
"expr_node_id": self._expr_sym_node_id,
"user_stack": structured.get_user_stack(3),
"stack": structured.get_framework_stack(3),
"symbol_to_sources": {
str(v): k
for k, v in self.source_to_var.items()
if v in g.free_symbols
},
"frame_locals": asdict(self._find_frame_locals()),
},
)
trace_structured(
"guard_added_fast",
metadata_fn=lambda: {
"expr": str(g),
"user_stack": structured.from_traceback(TracingContext.extract_stack()),
"stack": structured.from_traceback(
CapturedTraceback.extract(skip=1).summary()
),
},
)
if self.log.isEnabledFor(logging.INFO):
str_g = str(g)
is_debug = (
config.extended_debug_guard_added is not None
and str_g == config.extended_debug_guard_added
)
sloc, maybe_extra_debug = self._get_stack_summary(is_debug)
maybe_more_info = ""
if not is_debug:
maybe_more_info = (
", for more info run with "
f'TORCHDYNAMO_EXTENDED_DEBUG_GUARD_ADDED="{str_g}"'
)
self.log.info(
"%s %s [guard added] %s%s%s",
prefix if not forcing_spec else f"{prefix} (forcing_spec)",
str_g,
sloc,
maybe_more_info,
maybe_extra_debug,
stack_info=is_debug,
)
# A local variable to evaluate_expr stored in the class to avoid
# using it for the lru_cache that is on top of it since it does
# not effect the results. When needed its read directly.
_expr_sym_node_id: Optional[int] = None
def evaluate_sym_node(
self,
sym_node: SymNode,
size_oblivious: bool = False,
fallback_value: Optional[bool] = None,
) -> sympy.Basic:
"""
Given a a SymNode, evaluates sym_node.expr, adding guards if necessary.
"""
self._expr_sym_node_id = id(sym_node)
return self.evaluate_expr(
sym_node.expr,
sym_node.hint,
sym_node.fx_node,
size_oblivious,
fallback_value=fallback_value,
)
def _is_python_assert(self) -> bool:
# Check if this boolean is used in an assertion, bytecode pattern for
# assertions is pretty stable for Python 3.7--3.13, ported with minimal
# changes from torch/fx/proxy.py
# Bytecode pattern for `assert` statements:
# TO_BOOL / COMPARE_OP # Only for Python >= 3.13
# POP_JUMP_IF_TRUE
# LOAD_ASSERTION_ERROR
# RAISE_VARARGS
frame = self._get_user_frame()
assert frame is not None
insts = list(dis.get_instructions(frame.f_code))
if sys.version_info >= (3, 11):
# For Python >= 3.11, instructions can be 2-4 bytes long.
from bisect import bisect_left
cur = bisect_left(insts, frame.f_lasti, key=lambda x: x.offset)
else:
# For Python <= 3.10, instructions are always 2 bytes.
cur = frame.f_lasti // 2
if sys.version_info >= (3, 13):
if insts[cur].opname in ("TO_BOOL", "COMPARE_OP"):
# Peek 1 instruction further.
cur += 1
assert_insts = torch._dynamo.symbolic_convert.get_assert_bytecode_sequence(
False
)
cur_insts = insts[cur + 1 : cur + 1 + len(assert_insts)]
cur_insts = [inst.opname for inst in cur_insts]
return cur_insts == assert_insts
def _log_real_tensor_propagation(
self, orig_expr: sympy.Basic, unsound_result: sympy.Basic
) -> None:
log.warning(
"propagate_real_tensors evaluate_expr(%s) -> %s",
orig_expr,
unsound_result,
)
trace_structured(
"propagate_real_tensors",
metadata_fn=lambda: {
"expr": repr(orig_expr),
"result": repr(unsound_result),
"stack": structured.from_traceback(
CapturedTraceback.extract(skip=1).summary()
),
},
)
dtrace_structured(
"propagate_real_tensors_provenance",
metadata_fn=lambda: {
"expr": repr(orig_expr),
"result": repr(unsound_result),
"expr_node_id": self._expr_sym_node_id,
"user_stack": structured.get_user_stack(3),
"stack": structured.get_framework_stack(3),
"symbol_to_sources": {
str(v): k
for k, v in self.source_to_var.items()
if v in orig_expr.free_symbols
},
"frame_locals": asdict(self._find_frame_locals()),
},
)
def evaluate_expr(
self,
orig_expr: sympy.Basic,
hint: Optional[Union[int, bool, float]] = None,
fx_node: Optional[torch.fx.Node] = None,
size_oblivious: bool = False,
fallback_value: Optional[bool] = None,
*,
forcing_spec: bool = False,
) -> sympy.Basic:
"""
Given an expression, evaluates it, adding guards if necessary
When fallback_value is not None the function return fallback_value instead of failing with data dependent error.
"""
# Add extra state that evaluate_expr() depends on.
suppress_guards_tls = ShapeEnv._suppress_guards_tls()
return self._inner_evaluate_expr(
orig_expr,
hint,
fx_node,
size_oblivious,
forcing_spec,
suppress_guards_tls,
fallback_value,
)
@lru_cache(256)
@record_shapeenv_event(save_tracked_fakes=True, name="evaluate_expr")
def _inner_evaluate_expr(
self,
orig_expr: sympy.Basic,
hint: Optional[Union[int, bool, float]],
fx_node: Optional[torch.fx.Node],
size_oblivious: bool,
forcing_spec: bool,
_suppress_guards_tls: bool,
fallback_value: Optional[bool] = None,
) -> sympy.Basic:
try:
return self._evaluate_expr(
orig_expr,
hint,
fx_node,
size_oblivious,
fallback_value,
forcing_spec=forcing_spec,
)
except Exception as e:
if isinstance(e, GuardOnDataDependentSymNode):
pass
else:
self.log.warning(
"failed during evaluate_expr(%s, hint=%s, size_oblivious=%s, forcing_spec=%s",
orig_expr,
hint,
size_oblivious,
forcing_spec,
)
raise
def _log_suppressed_dde(self, a: SymBool, assumed_value: bool) -> None:
sloc, extra = self._get_stack_summary(True)
log.info(
"could not evaluate %s due to data dependency, it was assumed to be %s with no runtime assertions %s %s",
a,
assumed_value,
sloc,
extra,
)
def _evaluate_expr(
self,
orig_expr: sympy.Basic,
hint: Optional[Union[bool, int, float]] = None,
fx_node: Optional[torch.fx.Node] = None,
size_oblivious: bool = False,
fallback_value: Optional[bool] = None,
*,
forcing_spec: bool = False,
) -> sympy.Basic:
# TODO: split conjunctions and evaluate them separately
if isinstance(
orig_expr,
(sympy.logic.boolalg.BooleanTrue, sympy.logic.boolalg.BooleanFalse),
):
return orig_expr
# Don't track this one. (Because this cache is inside this function the
# cache only lasts for the invocation of this function call)
@functools.cache
def compute_concrete_val() -> sympy.Basic:
if hint is None:
# This is only ever called for expressions WITHOUT unbacked
# symbols
r = self.size_hint(orig_expr)
assert r is not None
return r
else:
return sympy.sympify(hint)
concrete_val: Optional[sympy.Basic]
# Check if:
# 1. 'translation_validation' is set
# 2. the corresponding 'fx_node' is not 'None'
# 3. the guard should not be suppressed
# 4. the guard doesn't contain backed symfloat symbols
# since z3 can't handle floats
# 5. fallback_value is none.
# If all of the above check, we create an FX node representing the
# actual expression to be guarded.
node = None
fresh = False
if (
self._translation_validation_enabled
and fx_node is not None
and not self._suppress_guards_tls()
and not size_oblivious
and not any(symbol_is_type(s, SymT.FLOAT) for s in orig_expr.free_symbols)
and fallback_value is None
):
# TODO: does this even worked with unbacked :think:
concrete_val = compute_concrete_val()
if concrete_val is sympy.true:
node, fresh = self._create_fx_call_function(torch._assert, (fx_node,))
elif concrete_val is sympy.false:
neg, _ = self._create_fx_call_function(operator.not_, (fx_node,))
node, fresh = self._create_fx_call_function(torch._assert, (neg,))
else:
eql, _ = self._create_fx_call_function(
operator.eq, (fx_node, concrete_val)
)
node, fresh = self._create_fx_call_function(torch._assert, (eql,))
assert node is not None
# If this is a fresh node, we have to remember the event index that
# corresponds to this assertion node.
# Reason: so that, given an assertion node, we can replay the ShapeEnv
# events until the point where this assertion node was freshly created.
if fresh:
self._add_fx_node_metadata(node)
# After creating the FX node corresponding to orig_expr, we must make sure that
# no error will be raised until the end of this function.
#
# Reason: the translation validation may become invalid otherwise.
#
# If an error is raised before the end of this function, we remove the FX node
# inserted, and re-raise the error.
guard = None
try:
if orig_expr.is_number:
self.log.debug("eval %s [trivial]", orig_expr)
if hint is not None:
if isinstance(hint, bool):
assert orig_expr == hint, f"{orig_expr} != {hint}"
else:
assert sympy.Eq(orig_expr, hint), f"{orig_expr} != {hint}"
return orig_expr
expr = orig_expr
static_expr = self._maybe_evaluate_static(
expr, size_oblivious=size_oblivious
)
if static_expr is not None:
self.log.debug(
"eval %s == %s [statically known]",
(
f"size_oblivious({orig_expr})"
if size_oblivious
else size_oblivious
),
static_expr,
)
if (
not size_oblivious
and config.backed_size_oblivious
and hint is not None
):
# TODO: maybe reconcile this with use of counterfactual hints
# in unbacked case
assert static_expr == hint, f"{static_expr} != {hint}"
return static_expr
transmute_into_runtime_assert = False
concrete_val = None
if not (expr.free_symbols <= self.var_to_val.keys()):
# TODO: dedupe this with _maybe_evaluate_static
# Attempt to eliminate the unbacked SymInt
new_expr = self._maybe_evaluate_static(expr, unbacked_only=True)
assert new_expr is not None
if not (new_expr.free_symbols <= self.var_to_val.keys()):
ok = False
# fallback_value is set when guard_or_true or guard_or_false are used.
if not ok and fallback_value is not None:
self._log_suppressed_dde(orig_expr, fallback_value)
return fallback_value
# oblivious_var_to_val will be defined iff we have sizes with DimDynamic.OBLIVIOUS_SIZE type.
# See https://github.com/pytorch/pytorch/issues/137100#issuecomment-2495778113
if (
self.oblivious_var_to_val
and not (
correct_hint := orig_expr.xreplace(
self.oblivious_var_to_val
)
).free_symbols
and not (
counterfactual_hint := orig_expr.xreplace(
{
k: max(2, v)
for k, v in self.oblivious_var_to_val.items()
}
)
).free_symbols
and correct_hint == counterfactual_hint
):
# TODO: better logging
log.info(
"oblivious_size %s -> %s (passed counterfactual)",
orig_expr,
correct_hint,
)
concrete_val = correct_hint
# NB: do NOT transmute into runtime assert
ok = True
# unbacked_var_to_val is not None iff propagate_real_tensors is on.
# if propagate_real_tensors is on, we check the example values to generate (unsound_result)
# and if they pass we add a runtime assertions and continue.
if (
not ok
and self.unbacked_var_to_val
and not (
unsound_result := orig_expr.xreplace(
self.unbacked_var_to_val
).xreplace(self.var_to_val)
).free_symbols
):
self._log_real_tensor_propagation(orig_expr, unsound_result)
transmute_into_runtime_assert = True
concrete_val = unsound_result
ok = True
# Check if this is coming from a python assert statement, if so, convert it to a runtime assertion
# instead of failing.
if not ok and self.trace_asserts and self._is_python_assert():
concrete_val = sympy.true
transmute_into_runtime_assert = True
ok = True
if not ok:
raise self._make_data_dependent_error(
expr.xreplace(self.var_to_val),
expr,
expr_sym_node_id=self._expr_sym_node_id,
)
else:
expr = new_expr
if concrete_val is None:
concrete_val = compute_concrete_val()
self._check_frozen(expr, concrete_val)
if (
config.inject_EVALUATE_EXPR_flip_equality_TESTING_ONLY
and isinstance(hint, bool)
and isinstance(expr, (sympy.Eq, sympy.Ne))
):
expr = sympy.Not(expr)
# Turn this into a boolean expression, no longer need to consult
# concrete_val
if concrete_val is sympy.true:
g = cast(SympyBoolean, expr)
elif concrete_val is sympy.false:
g = sympy.Not(expr)
else:
g = sympy.Eq(expr, concrete_val) # type: ignore[arg-type]
if transmute_into_runtime_assert:
self.guard_or_defer_runtime_assert(
g, f"propagate_real_tensors: {orig_expr} == {concrete_val}"
)
return concrete_val
if not self._suppress_guards_tls():
self._log_guard("eval", g, forcing_spec=forcing_spec)
# TODO: If we successfully eliminate a symbol via equality, it
# is not actually necessary to save a guard for the equality,
# as we will implicitly generate a guard when we match that
# input against the symbol. Probably the easiest way to
# implement this is to have maybe_guard_rel return a bool
# saying if it "subsumed" the guard (and therefore the guard
# is no longer necessary)
self._maybe_guard_rel(g)
if (
torch.compiler.is_exporting()
and self.prefer_deferred_runtime_asserts_over_guards
):
# it's fine to defer simple guards here without checking,
# the _maybe_guard_rel() call above will set replacements if possible,
# and so the result here will be statically known
self.guard_or_defer_runtime_assert(g, f"evaluate_expr: {orig_expr}")
else:
# at this point, we've evaluated the concrete expr value, and have
# flipped/negated the guard if necessary. Now we know what to guard
# or defer to runtime assert on.
guard = ShapeGuard(
g, self._get_sloc(), size_oblivious=size_oblivious
)
self.guards.append(guard)
self.axioms.update(dict(self.get_implications(self.simplify(g))))
else:
self._log_guard("eval [guard suppressed]", g, forcing_spec=forcing_spec)
except Exception:
if fresh:
self._remove_fx_node(node)
raise
if not self._suppress_guards_tls():
if guard is not None: # we might have deferred this to runtime assert
for s in g.free_symbols:
self.symbol_guard_counter[s] += 1
# Forcing_spec to avoid infinite recursion
if (
not forcing_spec
and config.symbol_guard_limit_before_specialize is not None
and self.symbol_guard_counter[s]
> config.symbol_guard_limit_before_specialize
):
# Force specialization
self.log.info(
"symbol_guard_limit_before_specialize=%s exceeded on %s",
config.symbol_guard_limit_before_specialize,
s,
)
self.evaluate_expr(s, forcing_spec=True)
return concrete_val
def cleanup(self) -> None:
"""
Break reference cycles.
This destroys the stacks. If you really want to keep them, we
just need some way to break references on code objects.
"""
for s in self.var_to_stack.values():
s.cleanup()
for ras in self.deferred_runtime_asserts.values():
for ra in ras:
ra.stack.cleanup()
@lru_cache(256)
@record_shapeenv_event(save_tracked_fakes=True)
def guard_or_defer_runtime_assert(
self, orig_expr: SympyBoolean, msg: str, fx_node: Optional[torch.fx.Node] = None
) -> bool:
"""
Adds a guard that orig_expr is True if we can or fall back to adding an assert
that is checked at runtime.
Args:
orig_expr (sympy.Expr): Boolean expression to assert is true
msg (str): Message to display on assertion failure
fx_node (Optional, torch.fx.Node): node in ``self.graph`` corresponding
to the expression, if applicable
"""
expr = orig_expr
# TODO: split conjunctions and evaluate them separately
static_expr = self._maybe_evaluate_static(expr)
if static_expr is not None:
self.log.debug(
"runtime_assert %s == %s [statically known]", orig_expr, static_expr
)
# TODO: assert bool(static_expr)
return bool(static_expr)
# Attempt to eliminate the unbacked SymInt
new_expr = self._maybe_evaluate_static(expr, unbacked_only=True)
assert new_expr is not None
if (
not self.prefer_deferred_runtime_asserts_over_guards
and new_expr.free_symbols <= self.var_to_val.keys()
):
# Do a normal guard
return self.evaluate_expr(new_expr, fx_node=fx_node)
# NB: Don't use new_expr as expr; it could contain gunk like shape0
# which we don't want to guard on
if (
self._translation_validation_enabled
and fx_node is not None
and not self._suppress_guards_tls()
):
node, fresh = self._create_fx_call_function(torch._assert, (fx_node,))
assert node is not None
if fresh:
self._add_fx_node_metadata(node)
if not self._suppress_guards_tls():
self._log_guard("runtime_assert", orig_expr, forcing_spec=False)
# If you're here because of this assert, read Note [Backwards runtime asserts]
# in torch/_inductor/graph.py
if self.runtime_asserts_frozen:
log.debug("runtime_asserts_frozen but then got %s", expr)
self._check_frozen(expr, sympy.true)
# eliminate symbols on equality tests / refine ranges
self._maybe_guard_rel(expr)
# canonicalise to remove equations that are trivially equal
orig_expr = expr
expr = canonicalize_bool_expr(expr)
stack = CapturedTraceback.extract(skip=1)
ra = RuntimeAssert(expr, msg, stack)
# TODO: Do this in a way that is less janky than int(s.name[1:])
cands = sorted(
(s for s in expr.free_symbols if symbol_is_type(s, SymT.UNBACKED_INT)),
key=lambda s: int(s.name[1:]),
)
# Is None when prefer_deferred_runtime_asserts_over_guards=True
# and the guard in question has no unbacked SymInts in front
ix = cands[-1] if cands else None
self.deferred_runtime_asserts.setdefault(ix, []).append(ra)
self.axioms.update(dict(self.get_implications(self.simplify(expr))))
self.num_deferred_runtime_asserts += 1
self._update_version_counter()
else:
self._log_guard(
"runtime_assert [guard suppressed]", orig_expr, forcing_spec=False
)
return True
# Refines the ranges of the variables present in 'guard'.
#
# This function tries to refine the range of the variables inside
# 'guard' by reasoning about it. Specifically, when 'guard' is a
# 'sympy.Relational' operation.
#
# It does mainly 3 things:
# 1. Tries to isolate a variable in the left-hand side
# 2. Compute the value range of the right-hand side
# 3. Update the value range of the variable, if better
def _refine_ranges(self, expr: SympyBoolean) -> None:
expr = self.simplify(expr)
for symbol in expr.free_symbols:
assert isinstance(symbol, sympy.Symbol)
if isinstance(self.var_to_val.get(symbol, None), SingletonInt):
# Skip var_to_range logic for SingletonInt which is only used
# for jagged layout NestedTensors today
continue
r = try_solve(expr, symbol)
if r is None or not (symbol.is_integer and r[1].is_integer):
# Range refinement only supports integer symbols for now.
# There are lots of SymPy bugs when it comes to comparing
# reals and integers, so we skip that for now.
continue
r_expr, rhs = r
vr = self.var_to_range[symbol]
lower, upper = vr.lower, vr.upper
rhs_vr = bound_sympy(rhs, self.var_to_range)
# Let's suppose that we have a preexisting range for x [0, 100].
# Now, we issue a guard x > y, where the range for y is [50, 150].
# Then, lower = 0, rhs_vr.lower = 50 and therefore refinement can happen,
# refining x to [51, 100], since x must be greater than y, but the lowest
# y could be is 50.
#
# sympy.Eq may update both lower and upper bounds.
# sympy.G{t,e} may update the lower bound, only.
# sympy.L{t,e} may update the upper bound, only.
if lower <= rhs_vr.lower and isinstance(
r_expr, (sympy.Eq, sympy.Ge, sympy.Gt)
):
# Strictly greater relations allow us to refine a bit more, since
# x < y implies that the lower bound for x is: y + 1.
lower = rhs_vr.lower + int(isinstance(r_expr, sympy.Gt))
if upper >= rhs_vr.upper and isinstance(
r_expr, (sympy.Eq, sympy.Le, sympy.Lt)
):
upper = rhs_vr.upper - int(isinstance(r_expr, sympy.Lt))
# Do nothing if the new value range is no better than what we already have.
if vr == ValueRanges(lower, upper):
continue
# Updates the range and the guards corresponding to each bound of the symbol.
self._update_var_to_range(symbol, ValueRanges(lower, upper))
# If the range is refined to singleton, set replacement
if self.var_to_range[symbol].is_singleton():
self._set_replacement(
symbol,
self.var_to_range[symbol].lower,
"range_refined_to_singleton",
)
# Clears the cache, since this update can change the result.
self._maybe_evaluate_static.cache_clear()
@lru_cache(maxsize=None)
@record_shapeenv_event()
def constrain_symbol_range(
self, s: sympy.Symbol, compiler_min: int, compiler_max: int
) -> None:
upd_vr = ValueRanges(compiler_min, compiler_max)
old_vr = self.var_to_range.get(s, ValueRanges.unknown())
self._update_var_to_range(s, upd_vr)
if (new_vr := self.var_to_range[s]) != old_vr:
log.info(
"constrain_symbol_range %s [%s, %s]", s, new_vr.lower, new_vr.upper
)
def _is_int(expr: object) -> bool:
return isinstance(expr, SymInt) and expr.node.expr.is_number
# WARNING: This is legacy, DO NOT USE
def _is_dim_dynamic(t: torch.Tensor, d: int) -> bool:
return hasattr(t, "_dynamo_dynamic_indices") and d in t._dynamo_dynamic_indices
|
ShapeEnv
|
python
|
coleifer__peewee
|
tests/models.py
|
{
"start": 122616,
"end": 126775
}
|
class ____(BaseTestCase):
def test_table_name(self):
class Foo(Model):
class Meta:
def table_function(klass):
return 'xxx_%s' % klass.__name__.lower()
class Bar(Foo): pass
class Baze(Foo):
class Meta:
table_name = 'yyy_baze'
class Biz(Baze): pass
class Nug(Foo):
class Meta:
def table_function(klass):
return 'zzz_%s' % klass.__name__.lower()
self.assertEqual(Foo._meta.table_name, 'xxx_foo')
self.assertEqual(Bar._meta.table_name, 'xxx_bar')
self.assertEqual(Baze._meta.table_name, 'yyy_baze')
self.assertEqual(Biz._meta.table_name, 'xxx_biz')
self.assertEqual(Nug._meta.table_name, 'zzz_nug')
def test_composite_key_inheritance(self):
class Foo(Model):
key = TextField()
value = TextField()
class Meta:
primary_key = CompositeKey('key', 'value')
class Bar(Foo): pass
class Baze(Foo):
value = IntegerField()
foo = Foo(key='k1', value='v1')
self.assertEqual(foo.__composite_key__, ('k1', 'v1'))
bar = Bar(key='k2', value='v2')
self.assertEqual(bar.__composite_key__, ('k2', 'v2'))
baze = Baze(key='k3', value=3)
self.assertEqual(baze.__composite_key__, ('k3', 3))
def test_no_primary_key_inheritable(self):
class Foo(Model):
data = TextField()
class Meta:
primary_key = False
class Bar(Foo): pass
class Baze(Foo):
pk = AutoField()
class Zai(Foo):
zee = TextField(primary_key=True)
self.assertFalse(Foo._meta.primary_key)
self.assertEqual(Foo._meta.sorted_field_names, ['data'])
self.assertFalse(Bar._meta.primary_key)
self.assertEqual(Bar._meta.sorted_field_names, ['data'])
self.assertTrue(Baze._meta.primary_key is Baze.pk)
self.assertEqual(Baze._meta.sorted_field_names, ['pk', 'data'])
self.assertTrue(Zai._meta.primary_key is Zai.zee)
self.assertEqual(Zai._meta.sorted_field_names, ['zee', 'data'])
def test_inheritance(self):
db = SqliteDatabase(':memory:')
class Base(Model):
class Meta:
constraints = ['c1', 'c2']
database = db
indexes = (
(('username',), True),
)
only_save_dirty = True
options = {'key': 'value'}
schema = 'magic'
class Child(Base): pass
class GrandChild(Child): pass
for ModelClass in (Child, GrandChild):
self.assertEqual(ModelClass._meta.constraints, ['c1', 'c2'])
self.assertTrue(ModelClass._meta.database is db)
self.assertEqual(ModelClass._meta.indexes, [(('username',), True)])
self.assertEqual(ModelClass._meta.options, {'key': 'value'})
self.assertTrue(ModelClass._meta.only_save_dirty)
self.assertEqual(ModelClass._meta.schema, 'magic')
class Overrides(Base):
class Meta:
constraints = None
indexes = None
only_save_dirty = False
options = {'foo': 'bar'}
schema = None
self.assertTrue(Overrides._meta.constraints is None)
self.assertEqual(Overrides._meta.indexes, [])
self.assertFalse(Overrides._meta.only_save_dirty)
self.assertEqual(Overrides._meta.options, {'foo': 'bar'})
self.assertTrue(Overrides._meta.schema is None)
def test_temporary_inheritance(self):
class T0(TestModel): pass
class T1(TestModel):
class Meta:
temporary = True
class T2(T1): pass
class T3(T1):
class Meta:
temporary = False
self.assertFalse(T0._meta.temporary)
self.assertTrue(T1._meta.temporary)
self.assertTrue(T2._meta.temporary)
self.assertFalse(T3._meta.temporary)
|
TestMetaInheritance
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.