language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
spyder-ide__spyder
|
spyder/api/plugin_registration/_confpage.py
|
{
"start": 558,
"end": 5918
}
|
class ____(PluginConfigPage):
def setup_page(self):
newcb = self.create_checkbox
self.plugins_checkboxes = {}
header_label = QLabel(
_("Disable a Spyder plugin (external or built-in) to prevent it "
"from loading until re-enabled here, to simplify the interface "
"or in case it causes problems.")
)
header_label.setWordWrap(True)
# To save the plugin elements
internal_elements = []
external_elements = []
# ------------------ Internal plugins ---------------------------------
for plugin_name in self.plugin.all_internal_plugins:
(conf_section_name,
PluginClass) = self.plugin.all_internal_plugins[plugin_name]
if not getattr(PluginClass, 'CAN_BE_DISABLED', True):
# Do not list core plugins that can not be disabled
continue
plugin_state = self.get_option(
'enable', section=conf_section_name, default=True)
cb = newcb('', 'enable', default=True, section=conf_section_name,
restart=True)
internal_elements.append(
dict(
title=PluginClass.get_name(),
description=PluginClass.get_description(),
icon=PluginClass.get_icon(),
widget=cb,
additional_info=_("Built-in"),
additional_info_color=SpyderPalette.COLOR_TEXT_4,
)
)
self.plugins_checkboxes[plugin_name] = (cb.checkbox, plugin_state)
# ------------------ External plugins ---------------------------------
for plugin_name in self.plugin.all_external_plugins:
(conf_section_name,
PluginClass) = self.plugin.all_external_plugins[plugin_name]
if not getattr(PluginClass, 'CAN_BE_DISABLED', True):
# Do not list external plugins that can not be disabled
continue
plugin_state = self.get_option(
f'{conf_section_name}/enable',
section=self.plugin._external_plugins_conf_section,
default=True
)
cb = newcb('', f'{conf_section_name}/enable', default=True,
section=self.plugin._external_plugins_conf_section,
restart=True)
external_elements.append(
dict(
title=PluginClass.get_name(),
description=PluginClass.get_description(),
icon=PluginClass.get_icon(),
widget=cb
)
)
self.plugins_checkboxes[plugin_name] = (cb.checkbox, plugin_state)
# Sort elements by title for easy searching
collator = Collator()
internal_elements.sort(key=lambda e: collator.sort_key(e['title']))
external_elements.sort(key=lambda e: collator.sort_key(e['title']))
# Build plugins table, showing external plugins first.
self._plugins_table = ElementsTable(
self, add_padding_around_widgets=True
)
self._plugins_table.setup_elements(
external_elements + internal_elements
)
# Finder to filter plugins
finder = FinderWidget(
self,
find_on_change=True,
show_close_button=False,
set_min_width=False,
)
finder.sig_find_text.connect(self._do_find)
# Layout
layout = QVBoxLayout()
layout.addWidget(header_label)
layout.addSpacing(15)
layout.addWidget(self._plugins_table)
layout.addWidget(finder)
layout.addSpacing(15)
self.setLayout(layout)
def apply_settings(self):
for plugin_name in self.plugins_checkboxes:
cb, previous_state = self.plugins_checkboxes[plugin_name]
if cb.isChecked() and not previous_state:
self.plugin.set_plugin_enabled(plugin_name)
PluginClass = None
external = False
if plugin_name in self.plugin.all_internal_plugins:
(__,
PluginClass) = self.plugin.all_internal_plugins[plugin_name]
elif plugin_name in self.plugin.all_external_plugins:
(__,
PluginClass) = self.plugin.all_external_plugins[plugin_name]
external = True # noqa
# TODO: Once we can test that all plugins can be restarted
# without problems during runtime, we can enable the
# autorestart feature provided by the plugin registry:
# self.plugin.register_plugin(self.main, PluginClass,
# external=external)
elif not cb.isChecked() and previous_state:
# TODO: Once we can test that all plugins can be restarted
# without problems during runtime, we can enable the
# autorestart feature provided by the plugin registry:
# self.plugin.delete_plugin(plugin_name)
pass
return set({})
def _do_find(self, text):
self._plugins_table.do_find(text)
|
PluginsConfigPage
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/chart_line.py
|
{
"start": 323,
"end": 3860
}
|
class ____(chart.Chart):
"""
A class for writing the Excel XLSX Line charts.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, options: Optional[Dict[str, Any]] = None) -> None:
"""
Constructor.
"""
super().__init__()
if options is None:
options = {}
self.subtype = options.get("subtype")
if not self.subtype:
self.subtype = "standard"
self.default_marker = {"type": "none"}
self.smooth_allowed = True
# Override and reset the default axis values.
if self.subtype == "percent_stacked":
self.y_axis["defaults"]["num_format"] = "0%"
# Set the available data label positions for this chart type.
self.label_position_default = "right"
self.label_positions = {
"center": "ctr",
"right": "r",
"left": "l",
"above": "t",
"below": "b",
# For backward compatibility.
"top": "t",
"bottom": "b",
}
self.set_y_axis({})
###########################################################################
#
# Private API.
#
###########################################################################
def _write_chart_type(self, args) -> None:
# Override the virtual superclass method with a chart specific method.
# Write the c:lineChart element.
self._write_line_chart(args)
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_line_chart(self, args) -> None:
# Write the <c:lineChart> element.
if args["primary_axes"]:
series = self._get_primary_axes_series()
else:
series = self._get_secondary_axes_series()
if not series:
return
subtype = self.subtype
if subtype == "percent_stacked":
subtype = "percentStacked"
self._xml_start_tag("c:lineChart")
# Write the c:grouping element.
self._write_grouping(subtype)
# Write the series elements.
for data in series:
self._write_ser(data)
# Write the c:dropLines element.
self._write_drop_lines()
# Write the c:hiLowLines element.
self._write_hi_low_lines()
# Write the c:upDownBars element.
self._write_up_down_bars()
# Write the c:marker element.
self._write_marker_value()
# Write the c:axId elements
self._write_axis_ids(args)
self._xml_end_tag("c:lineChart")
def _write_d_pt_point(self, index, point) -> None:
# Write an individual <c:dPt> element. Override the parent method to
# add markers.
self._xml_start_tag("c:dPt")
# Write the c:idx element.
self._write_idx(index)
self._xml_start_tag("c:marker")
# Write the c:spPr element.
self._write_sp_pr(point)
self._xml_end_tag("c:marker")
self._xml_end_tag("c:dPt")
def _write_marker_value(self) -> None:
# Write the <c:marker> element without a sub-element.
attributes = [("val", 1)]
self._xml_empty_tag("c:marker", attributes)
|
ChartLine
|
python
|
apache__airflow
|
devel-common/src/tests_common/test_utils/fake_datetime.py
|
{
"start": 855,
"end": 1058
}
|
class ____(datetime):
"""A fake replacement for datetime that can be mocked for testing."""
def __new__(cls, *args, **kwargs):
return datetime.__new__(datetime, *args, **kwargs)
|
FakeDatetime
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/ddl.py
|
{
"start": 37979,
"end": 38187
}
|
class ____(_CreateDropBase["Table"]):
"""Represent a COMMENT ON TABLE '' statement.
Note this varies a lot across database backends.
"""
__visit_name__ = "drop_table_comment"
|
DropTableComment
|
python
|
PyCQA__pylint
|
tests/pyreverse/functional/class_diagrams/attributes/duplicates.py
|
{
"start": 420,
"end": 620
}
|
class ____:
def __init__(self) -> None:
self.val: str | int = "1"
self.lav: list[str] = []
def bar(self) -> None:
self.val = "2"
self.lav = []
|
DuplicateAnnotations
|
python
|
apache__airflow
|
airflow-core/tests/unit/jobs/test_scheduler_job.py
|
{
"start": 7311,
"end": 323847
}
|
class ____:
@staticmethod
def clean_db():
clear_db_dags()
clear_db_runs()
clear_db_backfills()
clear_db_pools()
clear_db_import_errors()
clear_db_jobs()
clear_db_assets()
clear_db_deadline()
clear_db_callbacks()
clear_db_triggers()
@pytest.fixture(autouse=True)
def per_test(self) -> Generator:
self.clean_db()
self.job_runner: SchedulerJobRunner | None = None
yield
self.clean_db()
@pytest.fixture(autouse=True)
def set_instance_attrs(self) -> Generator:
# Speed up some tests by not running the tasks, just look at what we
# enqueue!
self.null_exec: MockExecutor | None = MockExecutor()
yield
self.null_exec = None
@pytest.fixture
def mock_executors(self):
mock_jwt_generator = MagicMock(spec=JWTGenerator)
mock_jwt_generator.generate.return_value = "mock-token"
default_executor = mock.MagicMock(name="DefaultExecutor", slots_available=8, slots_occupied=0)
default_executor.name = ExecutorName(alias="default_exec", module_path="default.exec.module.path")
default_executor.jwt_generator = mock_jwt_generator
default_executor.team_name = None # Global executor
default_executor.sentry_integration = ""
second_executor = mock.MagicMock(name="SeconadaryExecutor", slots_available=8, slots_occupied=0)
second_executor.name = ExecutorName(alias="secondary_exec", module_path="secondary.exec.module.path")
second_executor.jwt_generator = mock_jwt_generator
second_executor.team_name = None # Global executor
second_executor.sentry_integration = ""
# TODO: Task-SDK Make it look like a bound method. Needed until we remove the old queue_workload
# interface from executors
default_executor.queue_workload.__func__ = BaseExecutor.queue_workload
second_executor.queue_workload.__func__ = BaseExecutor.queue_workload
with mock.patch("airflow.jobs.job.Job.executors", new_callable=PropertyMock) as executors_mock:
executors_mock.return_value = [default_executor, second_executor]
yield [default_executor, second_executor]
@pytest.fixture
def mock_executor(self, mock_executors):
default_executor = mock_executors[0]
with mock.patch("airflow.jobs.job.Job.executors", new_callable=PropertyMock) as executors_mock:
executors_mock.return_value = [default_executor]
yield default_executor
def test_is_alive(self):
scheduler_job = Job(heartrate=10, state=State.RUNNING)
self.job_runner = SchedulerJobRunner(scheduler_job)
assert scheduler_job.is_alive()
scheduler_job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=20)
assert scheduler_job.is_alive()
scheduler_job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=31)
assert not scheduler_job.is_alive()
# test because .seconds was used before instead of total_seconds
# internal repr of datetime is (days, seconds)
scheduler_job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(days=1)
assert not scheduler_job.is_alive()
scheduler_job.state = State.SUCCESS
scheduler_job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=10)
assert not scheduler_job.is_alive(), "Completed jobs even with recent heartbeat should not be alive"
@pytest.mark.parametrize(
"heartrate",
[10, 5],
)
def test_heartrate(self, heartrate):
with conf_vars({("scheduler", "scheduler_heartbeat_sec"): str(heartrate)}):
scheduler_job = Job(executor=self.null_exec)
_ = SchedulerJobRunner(job=scheduler_job)
assert scheduler_job.heartrate == heartrate
def test_no_orphan_process_will_be_left(self):
current_process = psutil.Process()
old_children = current_process.children(recursive=True)
scheduler_job = Job(
executor=MockExecutor(do_update=False),
)
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
run_job(scheduler_job, execute_callable=self.job_runner._execute)
# Remove potential noise created by previous tests.
current_children = set(current_process.children(recursive=True)) - set(old_children)
assert not current_children
@mock.patch("airflow.jobs.scheduler_job_runner.TaskCallbackRequest")
@mock.patch("airflow.jobs.scheduler_job_runner.Stats.incr")
def test_process_executor_events(self, mock_stats_incr, mock_task_callback, dag_maker):
dag_id = "test_process_executor_events"
task_id_1 = "dummy_task"
session = settings.Session()
with dag_maker(dag_id=dag_id, fileloc="/test_path1/"):
task1 = EmptyOperator(task_id=task_id_1)
ti1 = dag_maker.create_dagrun().get_task_instance(task1.task_id)
mock_stats_incr.reset_mock()
executor = MockExecutor(do_update=False)
task_callback = mock.MagicMock(spec=TaskCallbackRequest)
mock_task_callback.return_value = task_callback
scheduler_job = Job(executor=executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.FAILED, None
self.job_runner._process_executor_events(executor=executor, session=session)
ti1.refresh_from_db(session=session)
assert ti1.state == State.FAILED
scheduler_job.executor.callback_sink.send.assert_not_called()
# ti in success state
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS, None
self.job_runner._process_executor_events(executor=executor, session=session)
ti1.refresh_from_db(session=session)
assert ti1.state == State.SUCCESS
scheduler_job.executor.callback_sink.send.assert_not_called()
mock_stats_incr.assert_has_calls(
[
mock.call(
"scheduler.tasks.killed_externally",
tags={"dag_id": dag_id, "task_id": ti1.task_id},
),
mock.call("operator_failures_EmptyOperator", tags={"dag_id": dag_id, "task_id": ti1.task_id}),
mock.call("ti_failures", tags={"dag_id": dag_id, "task_id": ti1.task_id}),
],
any_order=True,
)
@mock.patch("airflow.jobs.scheduler_job_runner.TaskCallbackRequest", spec=TaskCallbackRequest)
@mock.patch("airflow.jobs.scheduler_job_runner.Stats.incr")
def test_process_executor_events_restarting_cleared_task(
self, mock_stats_incr, mock_task_callback, dag_maker
):
"""
Test processing of RESTARTING task instances by scheduler's _process_executor_events.
Simulates the complete flow when a running task is cleared:
1. Task is RUNNING and has exhausted retries (try_number > max_tries)
2. User clears the task → state becomes RESTARTING
3. Executor successfully terminates the task → reports SUCCESS
4. Scheduler processes the event and sets task to None (scheduled)
5. max_tries is adjusted to allow retry beyond normal limits
This test prevents regression of issue #55045 where RESTARTING tasks
would get stuck due to scheduler not processing executor events.
"""
dag_id = "test_restarting_max_tries"
task_id = "test_task"
session = settings.Session()
with dag_maker(dag_id=dag_id, fileloc="/test_path1/", max_active_runs=1):
task1 = EmptyOperator(task_id=task_id, retries=2)
ti1 = dag_maker.create_dagrun().get_task_instance(task1.task_id)
# Set up exhausted task scenario: try_number > max_tries
ti1.state = TaskInstanceState.RESTARTING # Simulates cleared running task
ti1.try_number = 4 # Already tried 4 times
ti1.max_tries = 3 # Originally only allowed 3 tries
session.merge(ti1)
session.commit()
# Verify task is in RESTARTING state and eligible for retry
assert ti1.state == TaskInstanceState.RESTARTING
assert ti1.is_eligible_to_retry() is True, "RESTARTING should bypass max_tries"
# Set up scheduler and executor
executor = MockExecutor(do_update=False)
task_callback = mock.MagicMock(spec=TaskCallbackRequest)
mock_task_callback.return_value = task_callback
scheduler_job = Job(executor=executor)
job_runner = SchedulerJobRunner(scheduler_job)
# Simulate executor reporting task completion (this triggers the bug scenario)
executor.event_buffer[ti1.key] = State.SUCCESS, None
# Process the executor event
job_runner._process_executor_events(executor=executor, session=session)
ti1.refresh_from_db(session=session)
assert ti1.state is None, "Task should be set to None (scheduled) state after RESTARTING processing"
# Verify max_tries was adjusted to allow retry
expected_max_tries = 4 + 2
assert ti1.max_tries == expected_max_tries, (
f"max_tries should be adjusted to {expected_max_tries}, got {ti1.max_tries}"
)
# Verify task is now eligible for retry despite being previously exhausted
assert ti1.is_eligible_to_retry() is True, (
"Task should be eligible for retry after max_tries adjustment"
)
# Verify try_number wasn't changed (scheduler doesn't increment it here)
assert ti1.try_number == 4, "try_number should remain unchanged"
@mock.patch("airflow.jobs.scheduler_job_runner.TaskCallbackRequest")
@mock.patch("airflow.jobs.scheduler_job_runner.Stats.incr")
def test_process_executor_events_with_no_callback(self, mock_stats_incr, mock_task_callback, dag_maker):
dag_id = "test_process_executor_events_with_no_callback"
task_id = "test_task"
run_id = "test_run"
mock_stats_incr.reset_mock()
executor = MockExecutor(do_update=False)
task_callback = mock.MagicMock(spec=TaskCallbackRequest)
mock_task_callback.return_value = task_callback
scheduler_job = Job(executor=executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
session = settings.Session()
with dag_maker(dag_id=dag_id, fileloc="/test_path1/"):
task1 = EmptyOperator(task_id=task_id, retries=1)
ti1 = dag_maker.create_dagrun(
run_id=run_id, logical_date=DEFAULT_DATE + timedelta(hours=1)
).get_task_instance(task1.task_id)
mock_stats_incr.reset_mock()
executor = MockExecutor(do_update=False)
task_callback = mock.MagicMock()
mock_task_callback.return_value = task_callback
scheduler_job = Job(executor=executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.FAILED, None
self.job_runner._process_executor_events(executor=executor, session=session)
ti1.refresh_from_db(session=session)
assert ti1.state == State.UP_FOR_RETRY
scheduler_job.executor.callback_sink.send.assert_not_called()
# ti in success state
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS, None
self.job_runner._process_executor_events(executor=executor, session=session)
ti1.refresh_from_db(session=session)
assert ti1.state == State.SUCCESS
scheduler_job.executor.callback_sink.send.assert_not_called()
mock_stats_incr.assert_has_calls(
[
mock.call(
"scheduler.tasks.killed_externally",
tags={"dag_id": dag_id, "task_id": task_id},
),
mock.call("operator_failures_EmptyOperator", tags={"dag_id": dag_id, "task_id": task_id}),
mock.call("ti_failures", tags={"dag_id": dag_id, "task_id": task_id}),
],
any_order=True,
)
@mock.patch("airflow.jobs.scheduler_job_runner.TaskCallbackRequest")
@mock.patch("airflow.jobs.scheduler_job_runner.Stats.incr")
def test_process_executor_events_with_callback(
self, mock_stats_incr, mock_task_callback, dag_maker, session
):
dag_id = "test_process_executor_events_with_callback"
task_id_1 = "dummy_task"
with dag_maker(dag_id=dag_id, fileloc="/test_path1/") as dag:
EmptyOperator(task_id=task_id_1, on_failure_callback=lambda x: print("hi"))
dr = dag_maker.create_dagrun()
ti1 = dr.task_instances[0]
mock_stats_incr.reset_mock()
task_callback = mock.MagicMock()
mock_task_callback.return_value = task_callback
executor = MockExecutor(do_update=False)
scheduler_job = Job(executor=executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.FAILED, None
self.job_runner._process_executor_events(executor=executor, session=session)
ti1.refresh_from_db()
assert ti1.state == State.FAILED
mock_task_callback.assert_called_once_with(
filepath=dag.relative_fileloc,
ti=mock.ANY,
bundle_name="dag_maker",
bundle_version=None,
msg=f"Executor {executor} reported that the task instance "
"<TaskInstance: test_process_executor_events_with_callback.dummy_task test [queued]> "
"finished with state failed, but the task instance's state attribute is queued. "
"Learn more: https://airflow.apache.org/docs/apache-airflow/stable/troubleshooting.html#task-state-changed-externally",
context_from_server=mock.ANY,
task_callback_type=TaskInstanceState.FAILED,
)
scheduler_job.executor.callback_sink.send.assert_called_once_with(task_callback)
scheduler_job.executor.callback_sink.reset_mock()
mock_stats_incr.assert_any_call(
"scheduler.tasks.killed_externally",
tags={
"dag_id": "test_process_executor_events_with_callback",
"task_id": "dummy_task",
},
)
@mock.patch("airflow.jobs.scheduler_job_runner.TaskCallbackRequest")
@mock.patch("airflow.jobs.scheduler_job_runner.Stats.incr")
def test_process_executor_event_missing_dag(self, mock_stats_incr, mock_task_callback, dag_maker, caplog):
dag_id = "test_process_executor_events_with_callback"
task_id_1 = "dummy_task"
with dag_maker(dag_id=dag_id, fileloc="/test_path1/"):
task1 = EmptyOperator(task_id=task_id_1, on_failure_callback=lambda x: print("hi"))
ti1 = dag_maker.create_dagrun().get_task_instance(task1.task_id)
mock_stats_incr.reset_mock()
executor = MockExecutor(do_update=False)
task_callback = mock.MagicMock()
mock_task_callback.return_value = task_callback
scheduler_job = Job(executor=executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
self.job_runner.scheduler_dag_bag = mock.MagicMock()
self.job_runner.scheduler_dag_bag.get_dag_for_run.side_effect = Exception("failed")
session = settings.Session()
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.FAILED, None
self.job_runner._process_executor_events(executor=executor, session=session)
ti1.refresh_from_db()
assert ti1.state == State.FAILED
@mock.patch("airflow.jobs.scheduler_job_runner.TaskCallbackRequest")
@mock.patch("airflow.jobs.scheduler_job_runner.Stats.incr")
def test_process_executor_events_ti_requeued(self, mock_stats_incr, mock_task_callback, dag_maker):
dag_id = "test_process_executor_events_ti_requeued"
task_id_1 = "dummy_task"
session = settings.Session()
with dag_maker(dag_id=dag_id, fileloc="/test_path1/"):
task1 = EmptyOperator(task_id=task_id_1)
ti1 = dag_maker.create_dagrun().get_task_instance(task1.task_id)
mock_stats_incr.reset_mock()
executor = MockExecutor(do_update=False)
task_callback = mock.MagicMock()
mock_task_callback.return_value = task_callback
scheduler_job = Job(executor=executor)
session.add(scheduler_job)
session.flush()
self.job_runner = SchedulerJobRunner(scheduler_job)
# ti is queued with another try number - do not fail it
ti1.state = State.QUEUED
ti1.queued_by_job_id = scheduler_job.id
ti1.try_number = 2
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key.with_try_number(1)] = State.SUCCESS, None
self.job_runner._process_executor_events(executor=executor, session=session)
ti1.refresh_from_db(session=session)
assert ti1.state == State.QUEUED
scheduler_job.executor.callback_sink.send.assert_not_called()
# ti is queued by another scheduler - do not fail it
ti1.state = State.QUEUED
ti1.queued_by_job_id = scheduler_job.id - 1
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS, None
self.job_runner._process_executor_events(executor=executor, session=session)
ti1.refresh_from_db(session=session)
assert ti1.state == State.QUEUED
scheduler_job.executor.callback_sink.send.assert_not_called()
# ti is queued by this scheduler but it is handed back to the executor - do not fail it
ti1.state = State.QUEUED
ti1.queued_by_job_id = 1
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS, None
executor.has_task = mock.MagicMock(return_value=True)
self.job_runner._process_executor_events(executor=executor, session=session)
ti1.refresh_from_db(session=session)
assert ti1.state == State.QUEUED
scheduler_job.executor.callback_sink.send.assert_not_called()
mock_stats_incr.assert_not_called()
@pytest.mark.usefixtures("testing_dag_bundle")
@mock.patch("airflow.jobs.scheduler_job_runner.Stats.incr")
def test_process_executor_events_with_asset_events(self, mock_stats_incr, session, dag_maker):
"""
Test that _process_executor_events handles asset events without DetachedInstanceError.
Regression test for scheduler crashes when task callbacks are built with
consumed_asset_events that weren't eager-loaded.
"""
asset1 = Asset(uri="test://asset1", name="test_asset_executor", group="test_group")
asset_model = AssetModel(name=asset1.name, uri=asset1.uri, group=asset1.group)
session.add(asset_model)
session.flush()
with dag_maker(dag_id="test_executor_events_with_assets", schedule=[asset1], fileloc="/test_path1/"):
EmptyOperator(task_id="dummy_task", on_failure_callback=lambda ctx: None)
dag = dag_maker.dag
sync_dag_to_db(dag)
DagVersion.get_latest_version(dag.dag_id)
dr = dag_maker.create_dagrun()
# Create asset event and attach to dag run
asset_event = AssetEvent(
asset_id=asset_model.id,
source_task_id="upstream_task",
source_dag_id="upstream_dag",
source_run_id="upstream_run",
source_map_index=-1,
)
session.add(asset_event)
session.flush()
dr.consumed_asset_events.append(asset_event)
session.add(dr)
session.flush()
executor = MockExecutor(do_update=False)
scheduler_job = Job(executor=executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
ti1 = dr.get_task_instance("dummy_task")
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.FAILED, None
# This should not raise DetachedInstanceError
self.job_runner._process_executor_events(executor=executor, session=session)
ti1.refresh_from_db(session=session)
assert ti1.state == State.FAILED
# Verify callback was created with asset event data
scheduler_job.executor.callback_sink.send.assert_called_once()
callback_request = scheduler_job.executor.callback_sink.send.call_args.args[0]
assert callback_request.context_from_server is not None
assert len(callback_request.context_from_server.dag_run.consumed_asset_events) == 1
assert callback_request.context_from_server.dag_run.consumed_asset_events[0].asset.uri == asset1.uri
def test_execute_task_instances_is_paused_wont_execute(self, session, dag_maker):
dag_id = "SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute"
task_id_1 = "dummy_task"
with dag_maker(dag_id=dag_id, session=session) as dag:
EmptyOperator(task_id=task_id_1)
assert isinstance(dag, SerializedDAG)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr1 = dag_maker.create_dagrun(run_type=DagRunType.BACKFILL_JOB)
(ti1,) = dr1.task_instances
ti1.state = State.SCHEDULED
self.job_runner._critical_section_enqueue_task_instances(session)
session.flush()
ti1.refresh_from_db(session=session)
assert ti1.state == State.SCHEDULED
session.rollback()
@pytest.mark.usefixtures("testing_dag_bundle")
def test_find_and_purge_task_instances_without_heartbeats_with_asset_events(
self, session, dag_maker, create_dagrun
):
"""
Test that heartbeat purge succeeds when DagRun has consumed_asset_events.
Regression test for DetachedInstanceError when building TaskCallbackRequest
with asset event data after session expunge.
"""
asset1 = Asset(uri="test://asset1", name="test_asset", group="test_group")
asset_model = AssetModel(name=asset1.name, uri=asset1.uri, group=asset1.group)
session.add(asset_model)
session.flush()
with dag_maker(dag_id="test_heartbeat_with_assets", schedule=[asset1]):
EmptyOperator(task_id="dummy_task")
dag = dag_maker.dag
scheduler_dag = sync_dag_to_db(dag)
dag_v = DagVersion.get_latest_version(dag.dag_id)
data_interval = infer_automated_data_interval(scheduler_dag.timetable, DEFAULT_LOGICAL_DATE)
dag_run = create_dagrun(
scheduler_dag,
logical_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
data_interval=data_interval,
)
# Create asset alias and event with full relationships
asset_alias = AssetAliasModel(name="test_alias", group="test_group")
session.add(asset_alias)
session.flush()
asset_event = AssetEvent(
asset_id=asset_model.id,
source_task_id="upstream_task",
source_dag_id="upstream_dag",
source_run_id="upstream_run",
source_map_index=-1,
)
session.add(asset_event)
session.flush()
# Attach alias to event and event to dag run
asset_event.source_aliases.append(asset_alias)
dag_run.consumed_asset_events.append(asset_event)
session.add_all([asset_event, dag_run])
session.flush()
executor = MockExecutor()
scheduler_job = Job(executor=executor)
with mock.patch("airflow.executors.executor_loader.ExecutorLoader.load_executor") as loader_mock:
loader_mock.return_value = executor
self.job_runner = SchedulerJobRunner(job=scheduler_job)
ti = dag_run.get_task_instance("dummy_task")
assert ti is not None # sanity check: dag_maker.create_dagrun created the TI
ti.state = State.RUNNING
ti.last_heartbeat_at = timezone.utcnow() - timedelta(minutes=6)
ti.start_date = timezone.utcnow() - timedelta(minutes=10)
ti.queued_by_job_id = scheduler_job.id
ti.dag_version = dag_v
session.merge(ti)
session.flush()
executor.running.add(ti.key)
tis_without_heartbeats = self.job_runner._find_task_instances_without_heartbeats(session=session)
assert len(tis_without_heartbeats) == 1
ti_from_query = tis_without_heartbeats[0]
ti_key = ti_from_query.key
# Detach all ORM objects to mirror scheduler behaviour after session closes
session.expunge_all()
# This should not raise DetachedInstanceError now that eager loads are in place
self.job_runner._purge_task_instances_without_heartbeats(tis_without_heartbeats, session=session)
assert ti_key not in executor.running
executor.callback_sink.send.assert_called_once()
callback_request = executor.callback_sink.send.call_args.args[0]
assert callback_request.context_from_server is not None
assert len(callback_request.context_from_server.dag_run.consumed_asset_events) == 1
consumed_event = callback_request.context_from_server.dag_run.consumed_asset_events[0]
assert consumed_event.asset.uri == asset1.uri
assert len(consumed_event.source_aliases) == 1
assert consumed_event.source_aliases[0].name == "test_alias"
# @pytest.mark.usefixtures("mock_executor")
def test_execute_task_instances_backfill_tasks_will_execute(self, dag_maker):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = "SchedulerJobTest.test_execute_task_instances_backfill_tasks_will_execute"
task_id_1 = "dummy_task"
with dag_maker(dag_id=dag_id):
task1 = EmptyOperator(task_id=task_id_1)
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dr1 = dag_maker.create_dagrun(run_type=DagRunType.BACKFILL_JOB)
dag_version = DagVersion.get_latest_version(dr1.dag_id)
ti1 = TaskInstance(task1, run_id=dr1.run_id, dag_version_id=dag_version.id)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.flush()
assert dr1.run_type == DagRunType.BACKFILL_JOB
self.job_runner._critical_section_enqueue_task_instances(session)
session.flush()
ti1.refresh_from_db()
assert ti1.state == TaskInstanceState.QUEUED
session.rollback()
def test_setup_callback_sink_standalone_dag_processor(self, mock_executors):
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
self.job_runner._execute()
assert isinstance(scheduler_job.executor.callback_sink, DatabaseCallbackSink)
def test_setup_callback_sink_standalone_dag_processor_multiple_executors(self, mock_executors):
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
self.job_runner._execute()
for executor in scheduler_job.executors:
assert isinstance(executor.callback_sink, DatabaseCallbackSink)
def test_executor_start_called(self, mock_executors):
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
self.job_runner._execute()
scheduler_job.executor.start.assert_called_once()
for executor in scheduler_job.executors:
executor.start.assert_called_once()
def test_executor_job_id_assigned(self, mock_executors, configure_testing_dag_bundle):
with configure_testing_dag_bundle(os.devnull):
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
self.job_runner._execute()
assert scheduler_job.executor.job_id == scheduler_job.id
for executor in scheduler_job.executors:
assert executor.job_id == scheduler_job.id
def test_executor_heartbeat(self, mock_executors, configure_testing_dag_bundle):
with configure_testing_dag_bundle(os.devnull):
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
self.job_runner._execute()
for executor in scheduler_job.executors:
executor.heartbeat.assert_called_once()
def test_executor_events_processed(self, mock_executors, configure_testing_dag_bundle):
with configure_testing_dag_bundle(os.devnull):
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
self.job_runner._execute()
for executor in scheduler_job.executors:
executor.get_event_buffer.assert_called_once()
@patch("traceback.extract_stack")
def test_executor_debug_dump(self, patch_traceback_extract_stack, mock_executors):
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
self.job_runner._debug_dump(1, mock.MagicMock())
for executor in scheduler_job.executors:
executor.debug_dump.assert_called_once()
patch_traceback_extract_stack.assert_called()
def test_find_executable_task_instances_backfill(self, dag_maker):
dag_id = "SchedulerJobTest.test_find_executable_task_instances_backfill"
task_id_1 = "dummy"
with dag_maker(dag_id=dag_id, max_active_tasks=16):
task1 = EmptyOperator(task_id=task_id_1)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dr_non_backfill = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
dr_backfill = dag_maker.create_dagrun_after(
dr_non_backfill, run_type=DagRunType.BACKFILL_JOB, state=State.RUNNING
)
ti_backfill = dr_backfill.get_task_instance(task1.task_id)
ti_non_backfill = dr_non_backfill.get_task_instance(task1.task_id)
ti_backfill.state = State.SCHEDULED
ti_non_backfill.state = State.SCHEDULED
session.merge(dr_backfill)
session.merge(ti_backfill)
session.merge(ti_non_backfill)
session.flush()
queued_tis = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(queued_tis) == 2
assert {x.key for x in queued_tis} == {ti_non_backfill.key, ti_backfill.key}
session.rollback()
def test_find_executable_task_instances_pool(self, dag_maker):
dag_id = "SchedulerJobTest.test_find_executable_task_instances_pool"
task_id_1 = "dummy"
task_id_2 = "dummydummy"
session = settings.Session()
with dag_maker(dag_id=dag_id, max_active_tasks=16, session=session):
EmptyOperator(task_id=task_id_1, pool="a", priority_weight=2)
EmptyOperator(task_id=task_id_2, pool="b", priority_weight=1)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr1 = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
dr2 = dag_maker.create_dagrun_after(dr1, run_type=DagRunType.SCHEDULED)
tis = [
dr1.get_task_instance(task_id_1, session=session),
dr1.get_task_instance(task_id_2, session=session),
dr2.get_task_instance(task_id_1, session=session),
dr2.get_task_instance(task_id_2, session=session),
]
tis.sort(key=lambda ti: ti.key)
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = Pool(pool="a", slots=1, description="haha", include_deferred=False)
pool2 = Pool(pool="b", slots=100, description="haha", include_deferred=False)
session.add(pool)
session.add(pool2)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
session.flush()
assert len(res) == 3
res_keys = []
for ti in res:
res_keys.append(ti.key)
assert tis[0].key in res_keys
assert tis[2].key in res_keys
assert tis[3].key in res_keys
session.rollback()
@pytest.mark.parametrize(
("state", "total_executed_ti"),
[
(DagRunState.SUCCESS, 0),
(DagRunState.FAILED, 0),
(DagRunState.RUNNING, 2),
(DagRunState.QUEUED, 0),
],
)
def test_find_executable_task_instances_only_running_dagruns(
self, state, total_executed_ti, dag_maker, session
):
"""Test that only task instances of 'running' dagruns are executed"""
dag_id = "SchedulerJobTest.test_find_executable_task_instances_only_running_dagruns"
task_id_1 = "dummy"
task_id_2 = "dummydummy"
with dag_maker(dag_id=dag_id, session=session):
EmptyOperator(task_id=task_id_1)
EmptyOperator(task_id=task_id_2)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr = dag_maker.create_dagrun(state=state)
tis = dr.task_instances
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
session.flush()
assert total_executed_ti == len(res)
def test_find_executable_task_instances_order_logical_date(self, dag_maker):
"""
Test that task instances follow logical_date order priority. If two dagruns with
different logical dates are scheduled, tasks with earliest dagrun logical date will first
be executed
"""
dag_id_1 = "SchedulerJobTest.test_find_executable_task_instances_order_logical_date-a"
dag_id_2 = "SchedulerJobTest.test_find_executable_task_instances_order_logical_date-b"
task_id = "task-a"
session = settings.Session()
with dag_maker(dag_id=dag_id_1, max_active_tasks=16, session=session):
EmptyOperator(task_id=task_id)
dr1 = dag_maker.create_dagrun(logical_date=DEFAULT_DATE + timedelta(hours=1))
with dag_maker(dag_id=dag_id_2, max_active_tasks=16, session=session):
EmptyOperator(task_id=task_id)
dr2 = dag_maker.create_dagrun()
dr1 = session.merge(dr1, load=False)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
tis = dr1.task_instances + dr2.task_instances
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=1, session=session)
session.flush()
assert [ti.key for ti in res] == [tis[1].key]
session.rollback()
def test_find_executable_task_instances_order_priority(self, dag_maker):
dag_id_1 = "SchedulerJobTest.test_find_executable_task_instances_order_priority-a"
dag_id_2 = "SchedulerJobTest.test_find_executable_task_instances_order_priority-b"
task_id = "task-a"
session = settings.Session()
with dag_maker(dag_id=dag_id_1, max_active_tasks=16, session=session):
EmptyOperator(task_id=task_id, priority_weight=1)
dr1 = dag_maker.create_dagrun()
with dag_maker(dag_id=dag_id_2, max_active_tasks=16, session=session):
EmptyOperator(task_id=task_id, priority_weight=4)
dr2 = dag_maker.create_dagrun()
dr1 = session.merge(dr1, load=False)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
tis = dr1.task_instances + dr2.task_instances
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=1, session=session)
session.flush()
assert [ti.key for ti in res] == [tis[1].key]
session.rollback()
def test_find_executable_task_instances_executor(self, dag_maker, mock_executors):
"""
Test that tasks for all executors are set to queued, if space allows it
"""
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dag_id = "SchedulerJobTest.test_find_executable_task_instances_executor"
with dag_maker(dag_id=dag_id):
op1 = EmptyOperator(task_id="dummy1") # No executor specified, runs on default executor
op2 = EmptyOperator(task_id="dummy2", executor="default_exec")
op3 = EmptyOperator(task_id="dummy3", executor="default.exec.module.path")
op4 = EmptyOperator(task_id="dummy4", executor="secondary_exec")
op5 = EmptyOperator(task_id="dummy5", executor="secondary.exec.module.path")
dag_run = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
ti1 = dag_run.get_task_instance(op1.task_id, session)
ti2 = dag_run.get_task_instance(op2.task_id, session)
ti3 = dag_run.get_task_instance(op3.task_id, session)
ti4 = dag_run.get_task_instance(op4.task_id, session)
ti5 = dag_run.get_task_instance(op5.task_id, session)
tis_tuple = (ti1, ti2, ti3, ti4, ti5)
for ti in tis_tuple:
ti.state = State.SCHEDULED
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(res) == 5
res_ti_keys = [res_ti.key for res_ti in res]
for ti in tis_tuple:
assert ti.key in res_ti_keys
@conf_vars({("core", "multi_team"): "true"})
def test_find_executable_task_instances_executor_with_teams(self, dag_maker, mock_executors, session):
"""
Test that tasks are correctly routed to team-specific executors when multi-team is enabled
"""
clear_db_teams()
clear_db_dag_bundles()
team1 = Team(name="team_a")
team2 = Team(name="team_b")
session.add_all([team1, team2])
session.flush()
bundle1 = DagBundleModel(name="bundle_a")
bundle2 = DagBundleModel(name="bundle_b")
bundle1.teams.append(team1)
bundle2.teams.append(team2)
session.add_all([bundle1, bundle2])
session.flush()
mock_executors[0].team_name = "team_a"
mock_executors[1].team_name = "team_b"
with dag_maker(dag_id="dag_a", bundle_name="bundle_a", session=session):
op1 = EmptyOperator(task_id="task_a_default") # No explicit executor - should use team's default
op2 = EmptyOperator(
task_id="task_a_explicit", executor="default_exec"
) # Team-specific explicit executor
dr1 = dag_maker.create_dagrun()
with dag_maker(dag_id="dag_b", bundle_name="bundle_b", session=session):
op3 = EmptyOperator(task_id="task_b_default") # Team b's default
op4 = EmptyOperator(task_id="task_b_explicit", executor="secondary_exec") # Team b explicit
dr2 = dag_maker.create_dagrun()
# DAG with no team (global)
with dag_maker(dag_id="dag_global", session=session):
op5 = EmptyOperator(task_id="task_global") # Global task - any executor
dr3 = dag_maker.create_dagrun()
tis = [
dr1.get_task_instance(op1.task_id, session),
dr1.get_task_instance(op2.task_id, session),
dr2.get_task_instance(op3.task_id, session),
dr2.get_task_instance(op4.task_id, session),
dr3.get_task_instance(op5.task_id, session),
]
for ti in tis:
ti.state = State.SCHEDULED
session.flush()
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
# All tasks should be queued since they have valid executor mappings
assert len(res) == 5
# Verify that each task is routed to the correct executor
executor_to_tis = self.job_runner._executor_to_tis(res, session)
# Team pi tasks should go to mock_executors[0] (configured for team_pi)
a_tis_in_executor = [ti for ti in executor_to_tis.get(mock_executors[0], []) if ti.dag_id == "dag_a"]
assert len(a_tis_in_executor) == 2
# Team rho tasks should go to mock_executors[1] (configured for team_rho)
b_tis_in_executor = [ti for ti in executor_to_tis.get(mock_executors[1], []) if ti.dag_id == "dag_b"]
assert len(b_tis_in_executor) == 2
# Global task should go to the default executor (scheduler_job.executor)
global_tis_in_executor = [
ti for ti in executor_to_tis.get(scheduler_job.executor, []) if ti.dag_id == "dag_global"
]
assert len(global_tis_in_executor) == 1
# Verify no cross-contamination: team pi tasks should not be in team rho executor and vice versa
a_tis_in_wrong_executor = [
ti for ti in executor_to_tis.get(mock_executors[1], []) if ti.dag_id == "dag_a"
]
assert len(a_tis_in_wrong_executor) == 0
b_tis_in_wrong_executor = [
ti for ti in executor_to_tis.get(mock_executors[0], []) if ti.dag_id == "dag_b"
]
assert len(b_tis_in_wrong_executor) == 0
def test_find_executable_task_instances_order_priority_with_pools(self, dag_maker):
"""
The scheduler job should pick tasks with higher priority for execution
even if different pools are involved.
"""
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dag_id = "SchedulerJobTest.test_find_executable_task_instances_order_priority_with_pools"
session.add(Pool(pool="pool1", slots=32, include_deferred=False))
session.add(Pool(pool="pool2", slots=32, include_deferred=False))
with dag_maker(dag_id=dag_id, max_active_tasks=2):
op1 = EmptyOperator(task_id="dummy1", priority_weight=1, pool="pool1")
op2 = EmptyOperator(task_id="dummy2", priority_weight=2, pool="pool2")
op3 = EmptyOperator(task_id="dummy3", priority_weight=3, pool="pool1")
dag_run = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
ti1 = dag_run.get_task_instance(op1.task_id, session)
ti2 = dag_run.get_task_instance(op2.task_id, session)
ti3 = dag_run.get_task_instance(op3.task_id, session)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(res) == 2
assert ti3.key == res[0].key
assert ti2.key == res[1].key
session.rollback()
def test_find_executable_task_instances_order_logical_date_and_priority(self, dag_maker):
dag_id_1 = "SchedulerJobTest.test_find_executable_task_instances_order_logical_date_and_priority-a"
dag_id_2 = "SchedulerJobTest.test_find_executable_task_instances_order_logical_date_and_priority-b"
task_id = "task-a"
session = settings.Session()
with dag_maker(dag_id=dag_id_1, max_active_tasks=16, session=session):
EmptyOperator(task_id=task_id, priority_weight=1)
dr1 = dag_maker.create_dagrun()
with dag_maker(dag_id=dag_id_2, max_active_tasks=16, session=session):
EmptyOperator(task_id=task_id, priority_weight=4)
dr2 = dag_maker.create_dagrun(logical_date=DEFAULT_DATE + timedelta(hours=1))
dr1 = session.merge(dr1, load=False)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
tis = dr1.task_instances + dr2.task_instances
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=1, session=session)
session.flush()
assert [ti.key for ti in res] == [tis[1].key]
session.rollback()
def test_find_executable_task_instances_in_default_pool(self, dag_maker, mock_executor):
set_default_pool_slots(1)
dag_id = "SchedulerJobTest.test_find_executable_task_instances_in_default_pool"
with dag_maker(dag_id=dag_id):
op1 = EmptyOperator(task_id="dummy1")
op2 = EmptyOperator(task_id="dummy2")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
session = settings.Session()
dr1 = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
dr2 = dag_maker.create_dagrun_after(dr1, run_type=DagRunType.SCHEDULED, state=State.RUNNING)
ti1 = dr1.get_task_instance(op1.task_id, session)
ti2 = dr2.get_task_instance(op2.task_id, session)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.flush()
# Two tasks w/o pool up for execution and our default pool size is 1
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(res) == 1
ti2.state = State.RUNNING
session.flush()
# One task w/o pool up for execution and one task running
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(res) == 0
session.rollback()
session.close()
def test_queued_task_instances_fails_with_missing_dag(self, dag_maker, session):
"""Check that task instances of missing DAGs are failed"""
dag_id = "SchedulerJobTest.test_find_executable_task_instances_not_in_dagbag"
task_id_1 = "dummy"
task_id_2 = "dummydummy"
with dag_maker(dag_id=dag_id, session=session, default_args={"max_active_tis_per_dag": 1}):
EmptyOperator(task_id=task_id_1)
EmptyOperator(task_id=task_id_2)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner.scheduler_dag_bag = mock.MagicMock()
self.job_runner.scheduler_dag_bag.get_dag_for_run.return_value = None
dr = dag_maker.create_dagrun(state=DagRunState.RUNNING)
tis = dr.task_instances
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
session.flush()
assert len(res) == 0
tis = dr.get_task_instances(session=session)
assert len(tis) == 2
assert all(ti.state == State.FAILED for ti in tis)
def test_nonexistent_pool(self, dag_maker):
dag_id = "SchedulerJobTest.test_nonexistent_pool"
with dag_maker(dag_id=dag_id, max_active_tasks=16):
EmptyOperator(task_id="dummy_wrong_pool", pool="this_pool_doesnt_exist")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dr = dag_maker.create_dagrun()
ti = dr.task_instances[0]
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
session.flush()
assert len(res) == 0
session.rollback()
def test_infinite_pool(self, dag_maker):
dag_id = "SchedulerJobTest.test_infinite_pool"
with dag_maker(dag_id=dag_id, max_active_tasks=16):
EmptyOperator(task_id="dummy", pool="infinite_pool")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dr = dag_maker.create_dagrun()
ti = dr.task_instances[0]
ti.state = State.SCHEDULED
session.merge(ti)
infinite_pool = Pool(
pool="infinite_pool",
slots=-1,
description="infinite pool",
include_deferred=False,
)
session.add(infinite_pool)
session.commit()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
session.flush()
assert len(res) == 1
session.rollback()
def test_not_enough_pool_slots(self, caplog, dag_maker):
dag_id = "SchedulerJobTest.test_test_not_enough_pool_slots"
with dag_maker(dag_id=dag_id, max_active_tasks=16):
EmptyOperator(task_id="cannot_run", pool="some_pool", pool_slots=4)
EmptyOperator(task_id="can_run", pool="some_pool", pool_slots=1)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dr = dag_maker.create_dagrun()
ti = dr.task_instances[0]
ti.state = State.SCHEDULED
session.merge(ti)
ti = dr.task_instances[1]
ti.state = State.SCHEDULED
session.merge(ti)
some_pool = Pool(pool="some_pool", slots=2, description="my pool", include_deferred=False)
session.add(some_pool)
session.commit()
with caplog.at_level(logging.WARNING):
self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert (
"Not executing <TaskInstance: "
"SchedulerJobTest.test_test_not_enough_pool_slots.cannot_run test [scheduled]>. "
"Requested pool slots (4) are greater than total pool slots: '2' for pool: some_pool"
in caplog.text
)
assert (
session.query(TaskInstance)
.filter(TaskInstance.dag_id == dag_id, TaskInstance.state == State.SCHEDULED)
.count()
== 1
)
assert (
session.query(TaskInstance)
.filter(TaskInstance.dag_id == dag_id, TaskInstance.state == State.QUEUED)
.count()
== 1
)
session.flush()
session.rollback()
def test_find_executable_task_instances_none(self, dag_maker):
dag_id = "SchedulerJobTest.test_find_executable_task_instances_none"
task_id_1 = "dummy"
with dag_maker(dag_id=dag_id, max_active_tasks=16):
EmptyOperator(task_id=task_id_1)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
assert len(self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)) == 0
session.rollback()
def test_tis_for_queued_dagruns_are_not_run(self, dag_maker):
"""
This tests that tis from queued dagruns are not queued
"""
dag_id = "test_tis_for_queued_dagruns_are_not_run"
task_id_1 = "dummy"
with dag_maker(dag_id):
task1 = EmptyOperator(task_id=task_id_1)
dr1 = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
dr2 = dag_maker.create_dagrun_after(dr1, run_type=DagRunType.SCHEDULED)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
ti1 = dr1.get_task_instance(task1.task_id)
ti2 = dr2.get_task_instance(task1.task_id)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(res) == 1
assert ti2.key == res[0].key
ti1.refresh_from_db()
ti2.refresh_from_db()
assert ti1.state == State.SCHEDULED
assert ti2.state == State.QUEUED
@pytest.mark.parametrize("active_state", [TaskInstanceState.RUNNING, TaskInstanceState.QUEUED])
def test_find_executable_task_instances_concurrency(self, dag_maker, active_state, session):
"""
We verify here that, with varying amounts of queued / running / scheduled tasks,
the correct number of TIs are queued
"""
dag_id = "check_MAT_dag"
with dag_maker(dag_id=dag_id, max_active_tasks=2, session=session):
EmptyOperator(task_id="task_1")
EmptyOperator(task_id="task_2")
EmptyOperator(task_id="task_3")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr1 = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, run_id="run_1", session=session)
dr2 = dag_maker.create_dagrun_after(
dr1, run_type=DagRunType.SCHEDULED, run_id="run_2", session=session
)
dr3 = dag_maker.create_dagrun_after(
dr2, run_type=DagRunType.SCHEDULED, run_id="run_3", session=session
)
# set 2 tis in dr1 to running
# no more can be queued
t1, t2, t3 = dr1.get_task_instances(session=session)
t1.state = active_state
t2.state = active_state
t3.state = State.SCHEDULED
session.merge(t1)
session.merge(t2)
session.merge(t3)
# set 1 ti from dr1 to running
# one can be queued
t1, t2, t3 = dr2.get_task_instances(session=session)
t1.state = active_state
t2.state = State.SCHEDULED
t3.state = State.SCHEDULED
session.merge(t1)
session.merge(t2)
session.merge(t3)
# set 0 tis from dr1 to running
# two can be queued
t1, t2, t3 = dr3.get_task_instances(session=session)
t1.state = State.SCHEDULED
t2.state = State.SCHEDULED
t3.state = State.SCHEDULED
session.merge(t1)
session.merge(t2)
session.merge(t3)
session.flush()
queued_tis = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
queued_runs = Counter([x.run_id for x in queued_tis])
assert queued_runs["run_1"] == 0
assert queued_runs["run_2"] == 1
assert queued_runs["run_3"] == 2
session.commit()
session.query(TaskInstance).all()
# now we still have max tis running so no more will be queued
queued_tis = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert queued_tis == []
session.rollback()
# TODO: This is a hack, I think I need to just remove the setting and have it on always
def test_find_executable_task_instances_max_active_tis_per_dag(self, dag_maker):
dag_id = "SchedulerJobTest.test_find_executable_task_instances_max_active_tis_per_dag"
with dag_maker(dag_id=dag_id, max_active_tasks=16):
task1 = EmptyOperator(task_id="dummy", max_active_tis_per_dag=2)
task2 = EmptyOperator(task_id="dummy2")
executor = MockExecutor(do_update=True)
scheduler_job = Job(executor=executor)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dr1 = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
dr2 = dag_maker.create_dagrun_after(dr1, run_type=DagRunType.SCHEDULED)
dr3 = dag_maker.create_dagrun_after(dr2, run_type=DagRunType.SCHEDULED)
ti1_1 = dr1.get_task_instance(task1.task_id)
ti2 = dr1.get_task_instance(task2.task_id)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.flush()
with mock.patch("airflow.executors.executor_loader.ExecutorLoader.load_executor") as loader_mock:
loader_mock.side_effect = executor.get_mock_loader_side_effect()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(res) == 2
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = dr2.get_task_instance(task1.task_id)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(res) == 1
ti1_2.state = State.RUNNING
ti1_3 = dr3.get_task_instance(task1.task_id)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(res) == 0
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(res) == 2
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(res) == 1
session.rollback()
def test_change_state_for_executable_task_instances_no_tis_with_state(self, dag_maker):
dag_id = "SchedulerJobTest.test_change_state_for__no_tis_with_state"
task_id_1 = "dummy"
with dag_maker(dag_id=dag_id, max_active_tasks=2):
task1 = EmptyOperator(task_id=task_id_1)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dr1 = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
dr2 = dag_maker.create_dagrun_after(dr1, run_type=DagRunType.SCHEDULED)
dr3 = dag_maker.create_dagrun_after(dr2, run_type=DagRunType.SCHEDULED)
ti1 = dr1.get_task_instance(task1.task_id)
ti2 = dr2.get_task_instance(task1.task_id)
ti3 = dr3.get_task_instance(task1.task_id)
ti1.state = State.RUNNING
ti2.state = State.RUNNING
ti3.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=100, session=session)
assert len(res) == 0
session.rollback()
def test_find_executable_task_instances_not_enough_pool_slots_for_first(self, dag_maker):
set_default_pool_slots(1)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dag_id = "SchedulerJobTest.test_find_executable_task_instances_not_enough_pool_slots_for_first"
with dag_maker(dag_id=dag_id):
op1 = EmptyOperator(task_id="dummy1", priority_weight=2, pool_slots=2)
op2 = EmptyOperator(task_id="dummy2", priority_weight=1, pool_slots=1)
dr1 = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
ti1 = dr1.get_task_instance(op1.task_id, session)
ti2 = dr1.get_task_instance(op2.task_id, session)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.flush()
# Schedule ti with lower priority,
# because the one with higher priority is limited by a concurrency limit
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(res) == 1
assert res[0].key == ti2.key
session.rollback()
def test_find_executable_task_instances_not_enough_dag_concurrency_for_first(self, dag_maker):
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dag_id_1 = (
"SchedulerJobTest.test_find_executable_task_instances_not_enough_dag_concurrency_for_first-a"
)
dag_id_2 = (
"SchedulerJobTest.test_find_executable_task_instances_not_enough_dag_concurrency_for_first-b"
)
with dag_maker(dag_id=dag_id_1, max_active_tasks=1):
op1a = EmptyOperator(task_id="dummy1-a", priority_weight=2)
op1b = EmptyOperator(task_id="dummy1-b", priority_weight=2)
dr1 = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
with dag_maker(dag_id=dag_id_2):
op2 = EmptyOperator(task_id="dummy2", priority_weight=1)
dr2 = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
ti1a = dr1.get_task_instance(op1a.task_id, session)
ti1b = dr1.get_task_instance(op1b.task_id, session)
ti2 = dr2.get_task_instance(op2.task_id, session)
ti1a.state = State.RUNNING
ti1b.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.flush()
# Schedule ti with lower priority,
# because the one with higher priority is limited by a concurrency limit
res = self.job_runner._executable_task_instances_to_queued(max_tis=1, session=session)
assert len(res) == 1
assert res[0].key == ti2.key
session.rollback()
def test_find_executable_task_instances_not_enough_task_concurrency_for_first(self, dag_maker):
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dag_id = "SchedulerJobTest.test_find_executable_task_instances_not_enough_task_concurrency_for_first"
with dag_maker(dag_id=dag_id):
op1a = EmptyOperator(task_id="dummy1-a", priority_weight=2, max_active_tis_per_dag=1)
op1b = EmptyOperator(task_id="dummy1-b", priority_weight=1)
dr1 = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
dr2 = dag_maker.create_dagrun_after(dr1, run_type=DagRunType.SCHEDULED)
ti1a = dr1.get_task_instance(op1a.task_id, session)
ti1b = dr1.get_task_instance(op1b.task_id, session)
ti2a = dr2.get_task_instance(op1a.task_id, session)
ti1a.state = State.RUNNING
ti1b.state = State.SCHEDULED
ti2a.state = State.SCHEDULED
session.flush()
# Schedule ti with lower priority,
# because the one with higher priority is limited by a concurrency limit
res = self.job_runner._executable_task_instances_to_queued(max_tis=1, session=session)
assert len(res) == 1
assert res[0].key == ti1b.key
session.rollback()
def test_find_executable_task_instances_task_concurrency_per_dagrun_for_first(self, dag_maker):
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dag_id = "SchedulerJobTest.test_find_executable_task_instances_task_concurrency_per_dagrun_for_first"
with dag_maker(dag_id=dag_id):
op1a = EmptyOperator(task_id="dummy1-a", priority_weight=2, max_active_tis_per_dagrun=1)
op1b = EmptyOperator(task_id="dummy1-b", priority_weight=1)
dr1 = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
dr2 = dag_maker.create_dagrun_after(dr1, run_type=DagRunType.SCHEDULED)
ti1a = dr1.get_task_instance(op1a.task_id, session)
ti1b = dr1.get_task_instance(op1b.task_id, session)
ti2a = dr2.get_task_instance(op1a.task_id, session)
ti1a.state = State.RUNNING
ti1b.state = State.SCHEDULED
ti2a.state = State.SCHEDULED
session.flush()
# Schedule ti with higher priority,
# because it's running in a different DAG run with 0 active tis
res = self.job_runner._executable_task_instances_to_queued(max_tis=1, session=session)
assert len(res) == 1
assert res[0].key == ti2a.key
session.rollback()
def test_find_executable_task_instances_not_enough_task_concurrency_per_dagrun_for_first(self, dag_maker):
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dag_id = (
"SchedulerJobTest"
".test_find_executable_task_instances_not_enough_task_concurrency_per_dagrun_for_first"
)
with dag_maker(dag_id=dag_id):
op1a = EmptyOperator.partial(
task_id="dummy1-a", priority_weight=2, max_active_tis_per_dagrun=1
).expand_kwargs([{"inputs": 1}, {"inputs": 2}])
op1b = EmptyOperator(task_id="dummy1-b", priority_weight=1)
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
ti1a0 = dr.get_task_instance(op1a.task_id, session, map_index=0)
ti1a1 = dr.get_task_instance(op1a.task_id, session, map_index=1)
ti1b = dr.get_task_instance(op1b.task_id, session)
ti1a0.state = State.RUNNING
ti1a1.state = State.SCHEDULED
ti1b.state = State.SCHEDULED
session.flush()
# Schedule ti with lower priority,
# because the one with higher priority is limited by a concurrency limit
res = self.job_runner._executable_task_instances_to_queued(max_tis=1, session=session)
assert len(res) == 1
assert res[0].key == ti1b.key
session.rollback()
def test_find_executable_task_instances_negative_open_pool_slots(self, dag_maker):
"""
Pools with negative open slots should not block other pools.
Negative open slots can happen when reducing the number of total slots in a pool
while tasks are running in that pool.
"""
set_default_pool_slots(0)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
pool1 = Pool(pool="pool1", slots=1, include_deferred=False)
pool2 = Pool(pool="pool2", slots=1, include_deferred=False)
session.add(pool1)
session.add(pool2)
dag_id = "SchedulerJobTest.test_find_executable_task_instances_negative_open_pool_slots"
with dag_maker(dag_id=dag_id):
op1 = EmptyOperator(task_id="op1", pool="pool1")
op2 = EmptyOperator(task_id="op2", pool="pool2", pool_slots=2)
dr1 = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
ti1 = dr1.get_task_instance(op1.task_id, session)
ti2 = dr1.get_task_instance(op2.task_id, session)
ti1.state = State.SCHEDULED
ti2.state = State.RUNNING
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=1, session=session)
assert len(res) == 1
assert res[0].key == ti1.key
session.rollback()
@mock.patch("airflow.jobs.scheduler_job_runner.Stats.gauge")
def test_emit_pool_starving_tasks_metrics(self, mock_stats_gauge, dag_maker):
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dag_id = "SchedulerJobTest.test_emit_pool_starving_tasks_metrics"
with dag_maker(dag_id=dag_id):
op = EmptyOperator(task_id="op", pool_slots=2)
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
ti = dr.get_task_instance(op.task_id, session)
ti.state = State.SCHEDULED
set_default_pool_slots(1)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(res) == 0
mock_stats_gauge.assert_has_calls(
[
mock.call("scheduler.tasks.starving", 1),
mock.call(f"pool.starving_tasks.{Pool.DEFAULT_POOL_NAME}", 1),
mock.call("pool.starving_tasks", 1, tags={"pool_name": Pool.DEFAULT_POOL_NAME}),
],
any_order=True,
)
mock_stats_gauge.reset_mock()
set_default_pool_slots(2)
session.flush()
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
assert len(res) == 1
mock_stats_gauge.assert_has_calls(
[
mock.call("scheduler.tasks.starving", 0),
mock.call(f"pool.starving_tasks.{Pool.DEFAULT_POOL_NAME}", 0),
mock.call("pool.starving_tasks", 0, tags={"pool_name": Pool.DEFAULT_POOL_NAME}),
],
any_order=True,
)
session.rollback()
session.close()
def test_enqueue_task_instances_with_queued_state(self, dag_maker, session):
dag_id = "SchedulerJobTest.test_enqueue_task_instances_with_queued_state"
task_id_1 = "dummy"
session = settings.Session()
with dag_maker(dag_id=dag_id, start_date=DEFAULT_DATE, session=session):
task1 = EmptyOperator(task_id=task_id_1)
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr1 = dag_maker.create_dagrun()
ti1 = dr1.get_task_instance(task1.task_id, session)
with patch.object(BaseExecutor, "queue_workload") as mock_queue_workload:
self.job_runner._enqueue_task_instances_with_queued_state(
[ti1], executor=scheduler_job.executor, session=session
)
assert mock_queue_workload.called
session.rollback()
@pytest.mark.parametrize("state", [State.FAILED, State.SUCCESS])
def test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state(self, state, dag_maker):
"""This tests that task instances whose dagrun is in finished state are not queued"""
dag_id = "SchedulerJobTest.test_enqueue_task_instances_with_queued_state"
task_id_1 = "dummy"
session = settings.Session()
with dag_maker(dag_id=dag_id, start_date=DEFAULT_DATE, session=session):
task1 = EmptyOperator(task_id=task_id_1)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr1 = dag_maker.create_dagrun(state=state)
ti = dr1.get_task_instance(task1.task_id, session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
with patch.object(BaseExecutor, "queue_workload") as mock_queue_workload:
self.job_runner._enqueue_task_instances_with_queued_state(
[ti], executor=scheduler_job.executor, session=session
)
session.flush()
ti.refresh_from_db(session=session)
assert ti.state == State.NONE
mock_queue_workload.assert_not_called()
@pytest.mark.parametrize(
("task1_exec", "task2_exec"),
[
("default_exec", "default_exec"),
("default_exec", "secondary_exec"),
("secondary_exec", "secondary_exec"),
],
)
@pytest.mark.usefixtures("mock_executors")
def test_critical_section_enqueue_task_instances(self, task1_exec, task2_exec, dag_maker, session):
dag_id = "SchedulerJobTest.test_execute_task_instances"
# important that len(tasks) is less than max_active_tasks
# because before scheduler._execute_task_instances would only
# check the num tasks once so if max_active_tasks was 3,
# we could execute arbitrarily many tasks in the second run
with dag_maker(dag_id=dag_id, max_active_tasks=3, session=session):
task1 = EmptyOperator(task_id="t1", executor=task1_exec)
task2 = EmptyOperator(task_id="t2", executor=task2_exec)
task3 = EmptyOperator(task_id="t3", executor=task2_exec)
task4 = EmptyOperator(task_id="t4", executor=task2_exec)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# create first dag run with 3 running tasks
dr1 = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, session=session)
dr1_ti1 = dr1.get_task_instance(task1.task_id, session)
dr1_ti2 = dr1.get_task_instance(task2.task_id, session)
dr1_ti3 = dr1.get_task_instance(task3.task_id, session)
dr1_ti4 = dr1.get_task_instance(task4.task_id, session)
dr1_ti1.state = State.RUNNING
dr1_ti2.state = State.RUNNING
dr1_ti3.state = State.RUNNING
dr1_ti4.state = State.SCHEDULED
session.flush()
def _count_tis(states):
return session.scalar(
select(func.count(TaskInstance.task_id)).where(
TaskInstance.dag_id == dag_id,
TaskInstance.state.in_(states),
)
)
assert dr1.state == State.RUNNING
assert _count_tis([TaskInstanceState.RUNNING]) == 3
# create second dag run
dr2 = dag_maker.create_dagrun_after(dr1, run_type=DagRunType.SCHEDULED, session=session)
dr2_ti1 = dr2.get_task_instance(task1.task_id, session)
dr2_ti2 = dr2.get_task_instance(task2.task_id, session)
dr2_ti3 = dr2.get_task_instance(task3.task_id, session)
dr2_ti4 = dr2.get_task_instance(task4.task_id, session)
# manually set to scheduled so we can pick them up
dr2_ti1.state = State.SCHEDULED
dr2_ti2.state = State.SCHEDULED
dr2_ti3.state = State.SCHEDULED
dr2_ti4.state = State.SCHEDULED
session.flush()
assert dr2.state == State.RUNNING
num_queued = self.job_runner._critical_section_enqueue_task_instances(session=session)
assert num_queued == 3
# check that max_active_tasks is respected
assert _count_tis([TaskInstanceState.RUNNING, TaskInstanceState.QUEUED]) == 6
# this doesn't really tell us anything since we set these values manually, but hey
dr1_counter = Counter(x.state for x in dr1.get_task_instances(session=session))
assert dr1_counter[State.RUNNING] == 3
assert dr1_counter[State.SCHEDULED] == 1
# this is the more meaningful bit
# three of dr2's tasks should be queued since that's max active tasks
# and max active tasks is evaluated per-dag-run
dr2_counter = Counter(x.state for x in dr2.get_task_instances(session=session))
assert dr2_counter[State.QUEUED] == 3
assert dr2_counter[State.SCHEDULED] == 1
num_queued = self.job_runner._critical_section_enqueue_task_instances(session=session)
assert num_queued == 0
def test_execute_task_instances_limit_second_executor(self, dag_maker, mock_executors):
dag_id = "SchedulerJobTest.test_execute_task_instances_limit"
task_id_1 = "dummy_task"
task_id_2 = "dummy_task_2"
session = settings.Session()
# important that len(tasks) is less than max_active_tasks
# because before scheduler._execute_task_instances would only
# check the num tasks once so if max_active_tasks was 3,
# we could execute arbitrarily many tasks in the second run
with dag_maker(dag_id=dag_id, max_active_tasks=16, session=session):
task1 = EmptyOperator(task_id=task_id_1, executor="default_exec")
task2 = EmptyOperator(task_id=task_id_2, executor="secondary_exec")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
def _create_dagruns():
dagrun = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.RUNNING)
yield dagrun
for _ in range(3):
dagrun = dag_maker.create_dagrun_after(
dagrun,
run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
)
yield dagrun
tis1 = []
tis2 = []
for dr in _create_dagruns():
ti1 = dr.get_task_instance(task1.task_id, session)
tis1.append(ti1)
ti2 = dr.get_task_instance(task2.task_id, session)
tis2.append(ti2)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.flush()
scheduler_job.max_tis_per_query = 6
# First pass we'll grab 6 of the 8 tasks (limited by max_tis_per_query)
res = self.job_runner._critical_section_enqueue_task_instances(session)
assert res == 6
session.flush()
for ti in tis1[:3] + tis2[:3]:
ti.refresh_from_db(session)
assert ti.state == TaskInstanceState.QUEUED
for ti in tis1[3:] + tis2[3:]:
ti.refresh_from_db(session)
assert ti.state == TaskInstanceState.SCHEDULED
# The remaining TIs are queued
res = self.job_runner._critical_section_enqueue_task_instances(session)
assert res == 2
session.flush()
for ti in tis1 + tis2:
ti.refresh_from_db(session)
assert ti.state == State.QUEUED
@pytest.mark.parametrize(
("task1_exec", "task2_exec"),
[
("default_exec", "default_exec"),
("default_exec", "secondary_exec"),
("secondary_exec", "secondary_exec"),
],
)
def test_execute_task_instances_limit(self, task1_exec, task2_exec, dag_maker, mock_executors):
dag_id = "SchedulerJobTest.test_execute_task_instances_limit"
task_id_1 = "dummy_task"
task_id_2 = "dummy_task_2"
session = settings.Session()
# important that len(tasks) is less than max_active_tasks
# because before scheduler._execute_task_instances would only
# check the num tasks once so if max_active_tasks was 3,
# we could execute arbitrarily many tasks in the second run
with dag_maker(dag_id=dag_id, max_active_tasks=16, session=session):
task1 = EmptyOperator(task_id=task_id_1, executor=task1_exec)
task2 = EmptyOperator(task_id=task_id_2, executor=task2_exec)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
def _create_dagruns():
dagrun = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.RUNNING)
yield dagrun
for _ in range(3):
dagrun = dag_maker.create_dagrun_after(
dagrun,
run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
)
yield dagrun
tis = []
for dr in _create_dagruns():
ti1 = dr.get_task_instance(task1.task_id, session)
tis.append(ti1)
ti2 = dr.get_task_instance(task2.task_id, session)
tis.append(ti2)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.flush()
scheduler_job.max_tis_per_query = 2
total_enqueued = self.job_runner._critical_section_enqueue_task_instances(session)
assert total_enqueued == 2
def test_execute_task_instances_limit_slots(self, dag_maker, mock_executors):
dag_id = "SchedulerJobTest.test_execute_task_instances_limit"
task_id_1 = "dummy_task"
task_id_2 = "dummy_task_2"
session = settings.Session()
# important that len(tasks) is less than max_active_tasks
# because before scheduler._execute_task_instances would only
# check the num tasks once so if max_active_tasks was 3,
# we could execute arbitrarily many tasks in the second run
with dag_maker(dag_id=dag_id, max_active_tasks=16, session=session):
task1 = EmptyOperator(task_id=task_id_1, executor="default_exec")
task2 = EmptyOperator(task_id=task_id_2, executor="secondary_exec")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
def _create_dagruns():
dagrun = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.RUNNING)
yield dagrun
for _ in range(3):
dagrun = dag_maker.create_dagrun_after(
dagrun,
run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
)
yield dagrun
tis = []
for dr in _create_dagruns():
ti1 = dr.get_task_instance(task1.task_id, session)
tis.append(ti1)
ti2 = dr.get_task_instance(task2.task_id, session)
tis.append(ti2)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.flush()
scheduler_job.max_tis_per_query = 8
scheduler_job.executor.slots_available = 2 # Limit only the default executor to 2 slots.
# Check that we don't "overfill" an executor when the max tis per query is larger than slots
# available. Of the 8 tasks returned by the query, the default executor will only take 2 and the
# secondary executor will take 4 (since only 4 of the 8 TIs in the result will be for that executor)
res = self.job_runner._critical_section_enqueue_task_instances(session)
assert res == 6
scheduler_job.executor.slots_available = 6 # The default executor has more slots freed now and
# will take the other two TIs.
res = self.job_runner._critical_section_enqueue_task_instances(session)
assert res == 2
def test_execute_task_instances_unlimited(self, dag_maker, mock_executor):
"""Test that max_tis_per_query=0 is unlimited"""
dag_id = "SchedulerJobTest.test_execute_task_instances_unlimited"
task_id_1 = "dummy_task"
task_id_2 = "dummy_task_2"
session = settings.Session()
with dag_maker(dag_id=dag_id, max_active_tasks=1024, session=session):
task1 = EmptyOperator(task_id=task_id_1)
task2 = EmptyOperator(task_id=task_id_2)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
def _create_dagruns():
dagrun = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.RUNNING)
yield dagrun
for _ in range(19):
dagrun = dag_maker.create_dagrun_after(
dagrun,
run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
)
yield dagrun
for dr in _create_dagruns():
ti1 = dr.get_task_instance(task1.task_id, session)
ti2 = dr.get_task_instance(task2.task_id, session)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.flush()
scheduler_job.max_tis_per_query = 0
scheduler_job.executor.parallelism = 32
scheduler_job.executor.slots_available = 31
res = self.job_runner._critical_section_enqueue_task_instances(session)
# 20 dag runs * 2 tasks each = 40, but limited by number of slots available
assert res == 31
session.rollback()
@pytest.mark.parametrize(
("task1_exec", "task2_exec"),
[
("default_exec", "default_exec"),
("default_exec", "secondary_exec"),
("secondary_exec", "secondary_exec"),
],
)
def test_execute_task_instances_unlimited_multiple_executors(
self, task1_exec, task2_exec, dag_maker, mock_executors
):
"""Test that max_tis_per_query=0 is unlimited"""
dag_id = "SchedulerJobTest.test_execute_task_instances_unlimited"
task_id_1 = "dummy_task"
task_id_2 = "dummy_task_2"
session = settings.Session()
with dag_maker(dag_id=dag_id, max_active_tasks=1024, session=session):
task1 = EmptyOperator(task_id=task_id_1, executor=task1_exec)
task2 = EmptyOperator(task_id=task_id_2, executor=task2_exec)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
def _create_dagruns():
dagrun = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.RUNNING)
yield dagrun
for _ in range(40):
dagrun = dag_maker.create_dagrun_after(
dagrun,
run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
)
yield dagrun
for dr in _create_dagruns():
ti1 = dr.get_task_instance(task1.task_id, session)
ti2 = dr.get_task_instance(task2.task_id, session)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.flush()
scheduler_job.max_tis_per_query = 0
for executor in mock_executors:
executor.parallelism = 32
executor.slots_available = 31
total_enqueued = 0
with conf_vars({("core", "parallelism"): "40"}):
# 40 dag runs * 2 tasks each = 80. Two executors have capacity for 61 concurrent jobs, but they
# together respect core.parallelism and will not run more in aggregate then that allows.
total_enqueued += self.job_runner._critical_section_enqueue_task_instances(session)
if task1_exec != task2_exec:
# Two executors will execute up to core parallelism
assert total_enqueued == 40
else:
# A single executor will only run up to its available slots
assert total_enqueued == 31
session.rollback()
def test_adopt_or_reset_orphaned_tasks(self, dag_maker, session):
with dag_maker("test_execute_helper_reset_orphaned_tasks", session=session):
op1 = EmptyOperator(task_id="op1")
scheduler_job = Job()
session.add(scheduler_job)
session.flush()
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.QUEUED
ti.queued_by_job_id = scheduler_job.id
session.flush()
dr2 = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.QUEUED
ti2.queued_by_job_id = scheduler_job.id
session.flush()
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=0)
self.job_runner.adopt_or_reset_orphaned_tasks()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
assert ti.state == State.NONE
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
assert ti2.state == State.NONE, "Tasks run by Backfill Jobs should be treated the same"
def test_adopt_or_reset_orphaned_tasks_multiple_executors(self, dag_maker, mock_executors):
"""
Test that with multiple executors configured tasks are sorted correctly and handed off to the
correct executor for adoption.
"""
session = settings.Session()
with dag_maker("test_execute_helper_reset_orphaned_tasks_multiple_executors"):
op1 = EmptyOperator(task_id="op1")
op2 = EmptyOperator(task_id="op2", executor="default_exec")
op3 = EmptyOperator(task_id="op3", executor="secondary_exec")
dr = dag_maker.create_dagrun()
scheduler_job = Job()
session.add(scheduler_job)
session.commit()
ti1 = dr.get_task_instance(task_id=op1.task_id, session=session)
ti2 = dr.get_task_instance(task_id=op2.task_id, session=session)
ti3 = dr.get_task_instance(task_id=op3.task_id, session=session)
tis = [ti1, ti2, ti3]
for ti in tis:
ti.state = State.QUEUED
ti.queued_by_job_id = scheduler_job.id
session.commit()
new_scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=new_scheduler_job, num_runs=0)
self.job_runner.adopt_or_reset_orphaned_tasks()
# Default executor is called for ti1 (no explicit executor override uses default) and ti2 (where we
# explicitly marked that for execution by the default executor)
try:
mock_executors[0].try_adopt_task_instances.assert_called_once_with([ti1, ti2])
except AssertionError:
# The order of the TIs given to try_adopt_task_instances is not consistent, so check the other
# order first before allowing AssertionError to fail the test
mock_executors[0].try_adopt_task_instances.assert_called_once_with([ti2, ti1])
# Second executor called for ti3
mock_executors[1].try_adopt_task_instances.assert_called_once_with([ti3])
def test_adopt_sets_last_heartbeat_on_adopt(self, dag_maker, session, mock_executor):
with dag_maker("test_adopt_sets_last_heartbeat_on_adopt", session=session):
op1 = EmptyOperator(task_id="op1")
old_scheduler_job = Job()
session.add(old_scheduler_job)
session.flush()
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.QUEUED
ti.queued_by_job_id = old_scheduler_job.id
ti.last_heartbeat_at = None
session.commit()
# Executor adopts all TIs (returns empty list to reset), so TI is adopted
mock_executor.try_adopt_task_instances.return_value = []
new_scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=new_scheduler_job, num_runs=0)
self.job_runner.adopt_or_reset_orphaned_tasks(session=session)
ti.refresh_from_db(session=session)
assert ti.state == State.QUEUED
assert ti.queued_by_job_id == new_scheduler_job.id
assert ti.last_heartbeat_at is not None
def test_adopt_sets_dagrun_conf_when_none(self, dag_maker, session, mock_executor):
with dag_maker("test_adopt_sets_dagrun_conf_when_none", session=session):
op1 = EmptyOperator(task_id="op1")
old_scheduler_job = Job()
session.add(old_scheduler_job)
session.flush()
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
# Ensure conf starts as None
dr.conf = None
session.merge(dr)
session.flush()
dr = session.scalar(select(DagRun).where(DagRun.id == dr.id))
assert dr.conf is None
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.QUEUED
ti.queued_by_job_id = old_scheduler_job.id
session.commit()
# Executor adopts all TIs (returns empty list to reset), so TI is adopted
mock_executor.try_adopt_task_instances.return_value = []
new_scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=new_scheduler_job, num_runs=0)
self.job_runner.adopt_or_reset_orphaned_tasks(session=session)
# DagRun.conf should be set to {} on adoption when it was None
session.refresh(dr)
assert dr.conf == {}
def test_purge_without_heartbeat_skips_when_missing_dag_version(self, dag_maker, session, caplog):
with dag_maker("test_purge_without_heartbeat_skips_when_missing_dag_version", session=session):
EmptyOperator(task_id="task")
dag_run = dag_maker.create_dagrun(run_id="test_run", state=DagRunState.RUNNING)
mock_executor = MagicMock()
scheduler_job = Job(executor=mock_executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
ti = dag_run.get_task_instance(task_id="task", session=session)
ti.state = TaskInstanceState.RUNNING
ti.queued_by_job_id = scheduler_job.id
ti.last_heartbeat_at = timezone.utcnow() - timedelta(hours=1)
# Simulate missing dag_version
ti.dag_version_id = None
session.merge(ti)
session.commit()
with caplog.at_level("WARNING", logger="airflow.jobs.scheduler_job_runner"):
self.job_runner._purge_task_instances_without_heartbeats([ti], session=session)
# Should log a warning and skip processing
assert any("DAG Version not found for TaskInstance" in rec.message for rec in caplog.records)
mock_executor.send_callback.assert_not_called()
# State should be unchanged (not failed)
ti.refresh_from_db(session=session)
assert ti.state == TaskInstanceState.RUNNING
@staticmethod
def mock_failure_callback(context):
pass
@conf_vars({("scheduler", "num_stuck_in_queued_retries"): "2"})
def test_handle_stuck_queued_tasks_multiple_attempts(self, dag_maker, session, mock_executors):
"""Verify that tasks stuck in queued will be rescheduled up to N times."""
with dag_maker("test_fail_stuck_queued_tasks_multiple_executors"):
EmptyOperator(task_id="op1", on_failure_callback=TestSchedulerJob.mock_failure_callback)
EmptyOperator(task_id="op2", executor="default_exec")
def _queue_tasks(tis):
for ti in tis:
ti.state = "queued"
ti.queued_dttm = timezone.utcnow()
session.commit()
run_id = str(uuid4())
dr = dag_maker.create_dagrun(run_id=run_id)
tis = dr.get_task_instances(session=session)
_queue_tasks(tis=tis)
scheduler_job = Job()
scheduler = SchedulerJobRunner(job=scheduler_job, num_runs=0)
# job_runner._reschedule_stuck_task = MagicMock()
scheduler._task_queued_timeout = -300 # always in violation of timeout
with _loader_mock(mock_executors):
scheduler._handle_tasks_stuck_in_queued()
# If the task gets stuck in queued once, we reset it to scheduled
tis = dr.get_task_instances(session=session)
assert [x.state for x in tis] == ["scheduled", "scheduled"]
assert [x.queued_dttm for x in tis] == [None, None]
_queue_tasks(tis=tis)
log_events = [
x.event for x in session.scalars(select(Log).where(Log.run_id == run_id).order_by(Log.id)).all()
]
assert log_events == [
"stuck in queued reschedule",
"stuck in queued reschedule",
]
with _loader_mock(mock_executors):
scheduler._handle_tasks_stuck_in_queued()
log_events = [
x.event for x in session.scalars(select(Log).where(Log.run_id == run_id).order_by(Log.id)).all()
]
assert log_events == [
"stuck in queued reschedule",
"stuck in queued reschedule",
"stuck in queued reschedule",
"stuck in queued reschedule",
]
mock_executors[0].fail.assert_not_called()
tis = dr.get_task_instances(session=session)
assert [x.state for x in tis] == ["scheduled", "scheduled"]
_queue_tasks(tis=tis)
with _loader_mock(mock_executors):
scheduler._handle_tasks_stuck_in_queued()
log_events = [
x.event for x in session.scalars(select(Log).where(Log.run_id == run_id).order_by(Log.id)).all()
]
assert log_events == [
"stuck in queued reschedule",
"stuck in queued reschedule",
"stuck in queued reschedule",
"stuck in queued reschedule",
"stuck in queued tries exceeded",
"stuck in queued tries exceeded",
]
mock_executors[
0
].send_callback.assert_called_once() # this should only be called for the task that has a callback
states = [x.state for x in dr.get_task_instances(session=session)]
assert states == ["failed", "failed"]
mock_executors[0].fail.assert_called()
@conf_vars({("scheduler", "num_stuck_in_queued_retries"): "2"})
def test_handle_stuck_queued_tasks_reschedule_sensors(self, dag_maker, session, mock_executors):
"""Reschedule sensors go in and out of running repeatedly using the same try_number
Make sure that they get three attempts per reschedule, not 3 attempts per try_number"""
with dag_maker("test_fail_stuck_queued_tasks_multiple_executors"):
EmptyOperator(task_id="op1", on_failure_callback=TestSchedulerJob.mock_failure_callback)
EmptyOperator(task_id="op2", executor="default_exec")
def _queue_tasks(tis):
for ti in tis:
ti.state = "queued"
ti.queued_dttm = timezone.utcnow()
session.commit()
def _add_running_event(tis):
for ti in tis:
updated_entry = Log(
dttm=timezone.utcnow(),
dag_id=ti.dag_id,
task_id=ti.task_id,
map_index=ti.map_index,
event="running",
run_id=ti.run_id,
try_number=ti.try_number,
)
session.add(updated_entry)
run_id = str(uuid4())
dr = dag_maker.create_dagrun(run_id=run_id)
tis = dr.get_task_instances(session=session)
_queue_tasks(tis=tis)
scheduler_job = Job()
scheduler = SchedulerJobRunner(job=scheduler_job, num_runs=0)
# job_runner._reschedule_stuck_task = MagicMock()
scheduler._task_queued_timeout = -300 # always in violation of timeout
with _loader_mock(mock_executors):
scheduler._handle_tasks_stuck_in_queued()
# If the task gets stuck in queued once, we reset it to scheduled
tis = dr.get_task_instances(session=session)
assert [x.state for x in tis] == ["scheduled", "scheduled"]
assert [x.queued_dttm for x in tis] == [None, None]
_queue_tasks(tis=tis)
log_events = [
x.event for x in session.scalars(select(Log).where(Log.run_id == run_id).order_by(Log.id)).all()
]
assert log_events == [
"stuck in queued reschedule",
"stuck in queued reschedule",
]
with _loader_mock(mock_executors):
scheduler._handle_tasks_stuck_in_queued()
log_events = [
x.event for x in session.scalars(select(Log).where(Log.run_id == run_id).order_by(Log.id)).all()
]
assert log_events == [
"stuck in queued reschedule",
"stuck in queued reschedule",
"stuck in queued reschedule",
"stuck in queued reschedule",
]
mock_executors[0].fail.assert_not_called()
tis = dr.get_task_instances(session=session)
assert [x.state for x in tis] == ["scheduled", "scheduled"]
_add_running_event(tis) # This should "reset" the count of stuck queued
for _ in range(3): # Should be able to be stuck 3 more times before failing
_queue_tasks(tis=tis)
with _loader_mock(mock_executors):
scheduler._handle_tasks_stuck_in_queued()
tis = dr.get_task_instances(session=session)
log_events = [
x.event for x in session.scalars(select(Log).where(Log.run_id == run_id).order_by(Log.id)).all()
]
assert log_events == [
"stuck in queued reschedule",
"stuck in queued reschedule",
"stuck in queued reschedule",
"stuck in queued reschedule",
"running",
"running",
"stuck in queued reschedule",
"stuck in queued reschedule",
"stuck in queued reschedule",
"stuck in queued reschedule",
"stuck in queued tries exceeded",
"stuck in queued tries exceeded",
]
mock_executors[
0
].send_callback.assert_called_once() # this should only be called for the task that has a callback
states = [x.state for x in dr.get_task_instances(session=session)]
assert states == ["failed", "failed"]
mock_executors[0].fail.assert_called()
def test_revoke_task_not_imp_tolerated(self, dag_maker, session, caplog):
"""Test that if executor no implement revoke_task then we don't blow up."""
with dag_maker("test_fail_stuck_queued_tasks"):
op1 = EmptyOperator(task_id="op1")
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.QUEUED
ti.queued_dttm = timezone.utcnow() - timedelta(minutes=15)
session.commit()
from airflow.executors.local_executor import LocalExecutor
assert "revoke_task" in BaseExecutor.__dict__
# this is just verifying that LocalExecutor is good enough for this test
# in that it does not implement revoke_task
assert "revoke_task" not in LocalExecutor.__dict__
scheduler_job = Job(executor=LocalExecutor())
job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=0)
job_runner._task_queued_timeout = 300
job_runner._handle_tasks_stuck_in_queued()
def test_executor_end_called(self, mock_executors):
"""
Test to make sure executor.end gets called with a successful scheduler loop run
"""
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
run_job(scheduler_job, execute_callable=self.job_runner._execute)
scheduler_job.executor.end.assert_called_once()
def test_executor_end_called_multiple_executors(self, mock_executors):
"""
Test to make sure executor.end gets called on all executors with a successful scheduler loop run
"""
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
run_job(scheduler_job, execute_callable=self.job_runner._execute)
scheduler_job.executor.end.assert_called_once()
for executor in scheduler_job.executors:
executor.end.assert_called_once()
def test_cleanup_methods_all_called(self):
"""
Test to make sure all cleanup methods are called when the scheduler loop has an exception
"""
scheduler_job = Job(executor=mock.MagicMock(slots_available=8))
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
self.job_runner._run_scheduler_loop = mock.MagicMock(side_effect=RuntimeError("oops"))
scheduler_job.executor.end = mock.MagicMock(side_effect=RuntimeError("triple oops"))
with pytest.raises(RuntimeError, match="oops"):
run_job(scheduler_job, execute_callable=self.job_runner._execute)
scheduler_job.executor.end.assert_called_once()
def test_cleanup_methods_all_called_multiple_executors(self, mock_executors):
"""
Test to make sure all cleanup methods are called when the scheduler loop has an exception
"""
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
self.job_runner._run_scheduler_loop = mock.MagicMock(side_effect=RuntimeError("oops"))
scheduler_job.executor.end = mock.MagicMock(side_effect=RuntimeError("triple oops"))
with pytest.raises(RuntimeError, match="oops"):
run_job(scheduler_job, execute_callable=self.job_runner._execute)
for executor in scheduler_job.executors:
executor.end.assert_called_once()
def test_queued_dagruns_stops_creating_when_max_active_is_reached(self, dag_maker):
"""This tests that queued dagruns stops creating once max_active_runs is reached"""
with dag_maker(max_active_runs=10) as dag:
EmptyOperator(task_id="mytask")
session = settings.Session()
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
orm_dag = session.get(DagModel, dag.dag_id)
assert orm_dag is not None
for _ in range(20):
self.job_runner._create_dag_runs([orm_dag], session)
drs = session.query(DagRun).all()
assert len(drs) == 10
for dr in drs:
dr.state = State.RUNNING
session.merge(dr)
session.commit()
assert session.query(DagRun.state).filter(DagRun.state == State.RUNNING).count() == 10
for _ in range(20):
self.job_runner._create_dag_runs([orm_dag], session)
assert session.query(DagRun).count() == 10
assert session.query(DagRun.state).filter(DagRun.state == State.RUNNING).count() == 10
assert session.query(DagRun.state).filter(DagRun.state == State.QUEUED).count() == 0
assert orm_dag.next_dagrun_create_after is None
def test_runs_are_created_after_max_active_runs_was_reached(self, dag_maker, session):
"""
Test that when creating runs once max_active_runs is reached the runs does not stick
"""
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# Use catchup=True to ensure proper run creation behavior after max_active_runs is no longer reached
with dag_maker(max_active_runs=1, session=session, catchup=True) as dag:
# Need to use something that doesn't immediately get marked as success by the scheduler
BashOperator(task_id="task", bash_command="true")
dag_run = dag_maker.create_dagrun(state=State.RUNNING, session=session, run_type=DagRunType.SCHEDULED)
# Reach max_active_runs
for _ in range(3):
self.job_runner._do_scheduling(session)
# Complete dagrun
# Add dag_run back in to the session (_do_scheduling does an expunge_all)
dag_run = session.merge(dag_run)
session.refresh(dag_run)
dag_run.get_task_instance(task_id="task", session=session).state = State.SUCCESS
# create new run
for _ in range(3):
self.job_runner._do_scheduling(session)
# Assert that new runs has created
dag_runs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(dag_runs) == 2
@pytest.mark.parametrize(
("ti_state", "final_ti_span_status"),
[
pytest.param(State.SUCCESS, SpanStatus.ENDED, id="dr_ended_successfully"),
pytest.param(State.RUNNING, SpanStatus.ACTIVE, id="dr_still_running"),
],
)
def test_recreate_unhealthy_scheduler_spans_if_needed(self, ti_state, final_ti_span_status, dag_maker):
with dag_maker(
dag_id="test_recreate_unhealthy_scheduler_spans_if_needed",
start_date=DEFAULT_DATE,
max_active_runs=1,
dagrun_timeout=datetime.timedelta(seconds=60),
):
EmptyOperator(task_id="dummy")
session = settings.Session()
old_job = Job()
old_job.job_type = SchedulerJobRunner.job_type
session.add(old_job)
session.commit()
assert old_job.is_alive() is False
new_job = Job()
new_job.job_type = SchedulerJobRunner.job_type
session.add(new_job)
session.flush()
self.job_runner = SchedulerJobRunner(job=new_job)
self.job_runner.active_spans = ThreadSafeDict()
assert len(self.job_runner.active_spans.get_all()) == 0
dr = dag_maker.create_dagrun()
dr.state = State.RUNNING
dr.span_status = SpanStatus.ACTIVE
dr.scheduled_by_job_id = old_job.id
ti = dr.get_task_instances(session=session)[0]
ti.state = ti_state
ti.start_date = timezone.utcnow()
ti.span_status = SpanStatus.ACTIVE
ti.queued_by_job_id = old_job.id
session.merge(ti)
session.merge(dr)
session.commit()
assert dr.scheduled_by_job_id != self.job_runner.job.id
assert dr.scheduled_by_job_id == old_job.id
assert dr.run_id is not None
assert dr.state == State.RUNNING
assert dr.span_status == SpanStatus.ACTIVE
assert self.job_runner.active_spans.get("dr:" + str(dr.id)) is None
assert self.job_runner.active_spans.get("ti:" + ti.id) is None
assert ti.state == ti_state
assert ti.span_status == SpanStatus.ACTIVE
self.job_runner._recreate_unhealthy_scheduler_spans_if_needed(dr, session)
assert self.job_runner.active_spans.get("dr:" + str(dr.id)) is not None
if final_ti_span_status == SpanStatus.ACTIVE:
assert self.job_runner.active_spans.get("ti:" + ti.id) is not None
assert len(self.job_runner.active_spans.get_all()) == 2
else:
assert self.job_runner.active_spans.get("ti:" + ti.id) is None
assert len(self.job_runner.active_spans.get_all()) == 1
assert dr.span_status == SpanStatus.ACTIVE
assert ti.span_status == final_ti_span_status
def test_end_spans_of_externally_ended_ops(self, dag_maker):
with dag_maker(
dag_id="test_end_spans_of_externally_ended_ops",
start_date=DEFAULT_DATE,
max_active_runs=1,
dagrun_timeout=datetime.timedelta(seconds=60),
):
EmptyOperator(task_id="dummy")
session = settings.Session()
job = Job()
job.job_type = SchedulerJobRunner.job_type
session.add(job)
self.job_runner = SchedulerJobRunner(job=job)
self.job_runner.active_spans = ThreadSafeDict()
assert len(self.job_runner.active_spans.get_all()) == 0
dr = dag_maker.create_dagrun()
dr.state = State.SUCCESS
dr.span_status = SpanStatus.SHOULD_END
ti = dr.get_task_instances(session=session)[0]
ti.state = State.SUCCESS
ti.span_status = SpanStatus.SHOULD_END
ti.context_carrier = {}
session.merge(ti)
session.merge(dr)
session.commit()
dr_span = Trace.start_root_span(span_name="dag_run_span", start_as_current=False)
ti_span = Trace.start_child_span(span_name="ti_span", start_as_current=False)
self.job_runner.active_spans.set("dr:" + str(dr.id), dr_span)
self.job_runner.active_spans.set("ti:" + ti.id, ti_span)
assert dr.span_status == SpanStatus.SHOULD_END
assert ti.span_status == SpanStatus.SHOULD_END
assert self.job_runner.active_spans.get("dr:" + str(dr.id)) is not None
assert self.job_runner.active_spans.get("ti:" + ti.id) is not None
self.job_runner._end_spans_of_externally_ended_ops(session)
assert dr.span_status == SpanStatus.ENDED
assert ti.span_status == SpanStatus.ENDED
assert self.job_runner.active_spans.get("dr:" + str(dr.id)) is None
assert self.job_runner.active_spans.get("ti:" + ti.id) is None
@pytest.mark.parametrize(
("state", "final_span_status"),
[
pytest.param(State.SUCCESS, SpanStatus.ENDED, id="dr_ended_successfully"),
pytest.param(State.RUNNING, SpanStatus.NEEDS_CONTINUANCE, id="dr_still_running"),
],
)
def test_end_active_spans(self, state, final_span_status, dag_maker):
with dag_maker(
dag_id="test_end_active_spans",
start_date=DEFAULT_DATE,
max_active_runs=1,
dagrun_timeout=datetime.timedelta(seconds=60),
):
EmptyOperator(task_id="dummy")
session = settings.Session()
job = Job()
job.job_type = SchedulerJobRunner.job_type
self.job_runner = SchedulerJobRunner(job=job)
self.job_runner.active_spans = ThreadSafeDict()
assert len(self.job_runner.active_spans.get_all()) == 0
dr = dag_maker.create_dagrun()
dr.state = state
dr.span_status = SpanStatus.ACTIVE
ti = dr.get_task_instances(session=session)[0]
ti.state = state
ti.span_status = SpanStatus.ACTIVE
ti.context_carrier = {}
session.merge(ti)
session.merge(dr)
session.commit()
dr_span = Trace.start_root_span(span_name="dag_run_span", start_as_current=False)
ti_span = Trace.start_child_span(span_name="ti_span", start_as_current=False)
self.job_runner.active_spans.set("dr:" + str(dr.id), dr_span)
self.job_runner.active_spans.set("ti:" + ti.id, ti_span)
assert dr.span_status == SpanStatus.ACTIVE
assert ti.span_status == SpanStatus.ACTIVE
assert self.job_runner.active_spans.get("dr:" + str(dr.id)) is not None
assert self.job_runner.active_spans.get("ti:" + ti.id) is not None
assert len(self.job_runner.active_spans.get_all()) == 2
self.job_runner._end_active_spans(session)
assert dr.span_status == final_span_status
assert ti.span_status == final_span_status
assert self.job_runner.active_spans.get("dr:" + str(dr.id)) is None
assert self.job_runner.active_spans.get("ti:" + ti.id) is None
assert len(self.job_runner.active_spans.get_all()) == 0
def test_dagrun_timeout_verify_max_active_runs(self, dag_maker):
"""
Test if a dagrun will not be scheduled if max_dag_runs
has been reached and dagrun_timeout is not reached
"""
with dag_maker(
dag_id="test_scheduler_verify_max_active_runs_and_dagrun_timeout",
start_date=DEFAULT_DATE,
max_active_runs=1,
dagrun_timeout=datetime.timedelta(seconds=60),
) as dag:
EmptyOperator(task_id="dummy")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
orm_dag = session.get(DagModel, dag.dag_id)
assert orm_dag is not None
self.job_runner._create_dag_runs([orm_dag], session)
self.job_runner._start_queued_dagruns(session)
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
assert orm_dag.next_dagrun_create_after is None
# But we should record the date of _what run_ it would be
assert isinstance(orm_dag.next_dagrun, datetime.datetime)
assert isinstance(orm_dag.next_dagrun_data_interval_start, datetime.datetime)
assert isinstance(orm_dag.next_dagrun_data_interval_end, datetime.datetime)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.flush()
callback = self.job_runner._schedule_dag_run(dr, session)
session.flush()
session.refresh(dr)
assert dr.state == State.FAILED
session.refresh(orm_dag)
assert isinstance(orm_dag.next_dagrun, datetime.datetime)
assert isinstance(orm_dag.next_dagrun_data_interval_start, datetime.datetime)
assert isinstance(orm_dag.next_dagrun_data_interval_end, datetime.datetime)
assert isinstance(orm_dag.next_dagrun_create_after, datetime.datetime)
expected_callback = DagCallbackRequest(
filepath=dr.dag.relative_fileloc,
dag_id=dr.dag_id,
is_failure_callback=True,
run_id=dr.run_id,
bundle_name=orm_dag.bundle_name,
bundle_version=orm_dag.bundle_version,
context_from_server=DagRunContext(
dag_run=dr,
last_ti=dr.get_last_ti(dag, session),
),
msg="timed_out",
)
# Verify dag failure callback request is sent
assert callback == expected_callback
session.rollback()
session.close()
def test_dagrun_timeout_fails_run(self, dag_maker):
"""
Test if a dagrun will be set failed if timeout, even without max_active_runs
"""
session = settings.Session()
with dag_maker(
dag_id="test_scheduler_fail_dagrun_timeout",
dagrun_timeout=datetime.timedelta(seconds=60),
session=session,
):
EmptyOperator(task_id="dummy")
dr = dag_maker.create_dagrun(start_date=timezone.utcnow() - datetime.timedelta(days=1))
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
callback = self.job_runner._schedule_dag_run(dr, session)
session.flush()
session.refresh(dr)
assert dr.state == State.FAILED
assert isinstance(callback, DagCallbackRequest)
assert callback.dag_id == dr.dag_id
assert callback.run_id == dr.run_id
assert callback.msg == "timed_out"
session.rollback()
session.close()
def test_dagrun_timeout_fails_run_and_update_next_dagrun(self, dag_maker):
"""
Test that dagrun timeout fails run and update the next dagrun
"""
session = settings.Session()
# Explicitly set catchup=True as test specifically expects runs to be created in date order
with dag_maker(
max_active_runs=1,
dag_id="test_scheduler_fail_dagrun_timeout",
dagrun_timeout=datetime.timedelta(seconds=60),
catchup=True,
):
EmptyOperator(task_id="dummy")
dr = dag_maker.create_dagrun(start_date=timezone.utcnow() - datetime.timedelta(days=1))
# check that next_dagrun is dr.logical_date
dag_maker.dag_model.next_dagrun == dr.logical_date
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._schedule_dag_run(dr, session)
session.flush()
session.refresh(dr)
assert dr.state == State.FAILED
# check that next_dagrun_create_after has been updated by calculate_dagrun_date_fields
assert dag_maker.dag_model.next_dagrun_create_after == dr.logical_date + timedelta(days=1)
# check that no running/queued runs yet
assert (
session.query(DagRun).filter(DagRun.state.in_([DagRunState.RUNNING, DagRunState.QUEUED])).count()
== 0
)
@pytest.mark.parametrize(
("state", "expected_callback_msg"), [(State.SUCCESS, "success"), (State.FAILED, "task_failure")]
)
def test_dagrun_callbacks_are_called(self, state, expected_callback_msg, dag_maker, session):
"""
Test if DagRun is successful, and if Success callbacks is defined, it is sent to DagFileProcessor.
"""
with dag_maker(
dag_id="test_dagrun_callbacks_are_called",
on_success_callback=lambda x: print("success"),
on_failure_callback=lambda x: print("failed"),
session=session,
) as dag:
EmptyOperator(task_id="dummy")
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance("dummy", session)
ti.set_state(state, session)
session.flush()
with mock.patch.object(settings, "USE_JOB_SCHEDULE", False):
self.job_runner._do_scheduling(session)
expected_callback = DagCallbackRequest(
filepath=dag.relative_fileloc,
dag_id=dr.dag_id,
is_failure_callback=bool(state == State.FAILED),
run_id=dr.run_id,
msg=expected_callback_msg,
bundle_name="dag_maker",
bundle_version=None,
context_from_server=DagRunContext(
dag_run=dr,
last_ti=ti,
),
)
# Verify dag failure callback request is sent to file processor
scheduler_job.executor.callback_sink.send.assert_called_once_with(expected_callback)
session.rollback()
session.close()
@pytest.mark.parametrize(
("state", "expected_callback_msg"), [(State.SUCCESS, "success"), (State.FAILED, "task_failure")]
)
def test_dagrun_plugins_are_notified(self, state, expected_callback_msg, dag_maker, session):
"""
Test if DagRun is successful, and if Success callbacks is defined, it is sent to DagFileProcessor.
"""
with dag_maker(
dag_id="test_dagrun_callbacks_are_called",
on_success_callback=lambda x: print("success"),
on_failure_callback=lambda x: print("failed"),
session=session,
):
EmptyOperator(task_id="dummy")
dag_listener.clear()
get_listener_manager().add_listener(dag_listener)
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance("dummy", session)
ti.set_state(state, session)
with mock.patch.object(settings, "USE_JOB_SCHEDULE", False):
self.job_runner._do_scheduling(session)
assert len(dag_listener.success) or len(dag_listener.failure)
dag_listener.success = []
dag_listener.failure = []
session.rollback()
def test_dagrun_timeout_callbacks_are_stored_in_database(self, dag_maker, session):
with dag_maker(
dag_id="test_dagrun_timeout_callbacks_are_stored_in_database",
on_failure_callback=lambda x: print("failed"),
dagrun_timeout=timedelta(hours=1),
session=session,
) as dag:
EmptyOperator(task_id="empty")
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
scheduler_job.executor.callback_sink = DatabaseCallbackSink()
dr = dag_maker.create_dagrun(start_date=DEFAULT_DATE)
with mock.patch.object(settings, "USE_JOB_SCHEDULE", False):
self.job_runner._do_scheduling(session)
callback = (
session.query(DbCallbackRequest)
.order_by(DbCallbackRequest.id.desc())
.first()
.get_callback_request()
)
expected_callback = DagCallbackRequest(
filepath=dag.relative_fileloc,
dag_id=dr.dag_id,
is_failure_callback=True,
run_id=dr.run_id,
msg="timed_out",
bundle_name="dag_maker",
bundle_version=None,
context_from_server=DagRunContext(
dag_run=dr,
last_ti=dr.get_last_ti(dag, session),
),
)
assert callback == expected_callback
def test_dagrun_callbacks_commited_before_sent(self, dag_maker):
"""
Tests that before any callbacks are sent to the processor, the session is committed. This ensures
that the dagrun details are up to date when the callbacks are run.
"""
with dag_maker(dag_id="test_dagrun_callbacks_commited_before_sent"):
EmptyOperator(task_id="dummy")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._send_dag_callbacks_to_processor = mock.Mock()
self.job_runner._schedule_dag_run = mock.Mock()
dr = dag_maker.create_dagrun()
session = settings.Session()
ti = dr.get_task_instance("dummy")
ti.set_state(State.SUCCESS, session)
with (
mock.patch.object(settings, "USE_JOB_SCHEDULE", False),
mock.patch("airflow.jobs.scheduler_job_runner.prohibit_commit") as mock_guard,
):
mock_guard.return_value.__enter__.return_value.commit.side_effect = session.commit
def mock_schedule_dag_run(*args, **kwargs):
mock_guard.reset_mock()
return None
def mock_send_dag_callbacks_to_processor(*args, **kwargs):
mock_guard.return_value.__enter__.return_value.commit.assert_called()
self.job_runner._send_dag_callbacks_to_processor.side_effect = (
mock_send_dag_callbacks_to_processor
)
self.job_runner._schedule_dag_run.side_effect = mock_schedule_dag_run
self.job_runner._do_scheduling(session)
# Verify dag failure callback request is sent to file processor
self.job_runner._send_dag_callbacks_to_processor.assert_called_once()
# and mock_send_dag_callbacks_to_processor has asserted the callback was sent after a commit
session.rollback()
session.close()
@pytest.mark.parametrize("state", [State.SUCCESS, State.FAILED])
def test_dagrun_callbacks_are_not_added_when_callbacks_are_not_defined(self, state, dag_maker, session):
"""
Test if no on_*_callback are defined on DAG, Callbacks not registered and sent to DAG Processor
"""
with dag_maker(
dag_id="test_dagrun_callbacks_are_not_added_when_callbacks_are_not_defined",
session=session,
):
BashOperator(task_id="test_task", bash_command="echo hi")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._send_dag_callbacks_to_processor = mock.Mock()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance("test_task", session)
ti.set_state(state, session)
with mock.patch.object(settings, "USE_JOB_SCHEDULE", False):
self.job_runner._do_scheduling(session)
# Verify Callback is not set (i.e is None) when no callbacks are set on DAG
self.job_runner._send_dag_callbacks_to_processor.assert_called_once()
call_args = self.job_runner._send_dag_callbacks_to_processor.call_args.args
assert call_args[0].dag_id == dr.dag_id
assert call_args[1] is None
session.rollback()
@pytest.mark.parametrize(("state", "msg"), [[State.SUCCESS, "success"], [State.FAILED, "task_failure"]])
def test_dagrun_callbacks_are_added_when_callbacks_are_defined(self, state, msg, dag_maker):
"""
Test if on_*_callback are defined on DAG, Callbacks ARE registered and sent to DAG Processor
"""
with dag_maker(
dag_id="test_dagrun_callbacks_are_added_when_callbacks_are_defined",
on_failure_callback=lambda: True,
on_success_callback=lambda: True,
):
BashOperator(task_id="test_task", bash_command="echo hi")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._send_dag_callbacks_to_processor = mock.Mock()
session = settings.Session()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance("test_task")
ti.set_state(state, session)
with mock.patch.object(settings, "USE_JOB_SCHEDULE", False):
self.job_runner._do_scheduling(session)
# Verify Callback is set (i.e is None) when no callbacks are set on DAG
self.job_runner._send_dag_callbacks_to_processor.assert_called_once()
call_args = self.job_runner._send_dag_callbacks_to_processor.call_args.args
assert call_args[0].dag_id == dr.dag_id
assert call_args[1] is not None
assert call_args[1].msg == msg
session.rollback()
session.close()
def test_dagrun_notify_called_success(self, dag_maker):
with dag_maker(
dag_id="test_dagrun_notify_called",
on_success_callback=lambda x: print("success"),
on_failure_callback=lambda x: print("failed"),
):
EmptyOperator(task_id="dummy")
dag_listener.clear()
get_listener_manager().add_listener(dag_listener)
executor = MockExecutor(do_update=False)
scheduler_job = Job(executor=executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
session = settings.Session()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance("dummy")
ti.set_state(State.SUCCESS, session)
with mock.patch.object(settings, "USE_JOB_SCHEDULE", False):
self.job_runner._do_scheduling(session)
assert dag_listener.success[0].dag_id == dr.dag_id
assert dag_listener.success[0].run_id == dr.run_id
assert dag_listener.success[0].state == DagRunState.SUCCESS
def test_do_not_schedule_removed_task(self, dag_maker, session):
"""Test that scheduler doesn't schedule task instances for tasks removed from DAG."""
interval = datetime.timedelta(days=1)
dag_id = "test_scheduler_do_not_schedule_removed_task"
# Create initial DAG with a task
with dag_maker(
dag_id=dag_id,
schedule=interval,
start_date=DEFAULT_DATE,
):
EmptyOperator(task_id="dummy")
# Create a dagrun for the initial DAG
dr = dag_maker.create_dagrun()
assert dr is not None
# Verify the task instance was created
initial_tis = (
session.query(TaskInstance)
.filter(TaskInstance.dag_id == dag_id, TaskInstance.task_id == "dummy")
.all()
)
assert len(initial_tis) == 1
# Update the DAG to remove the task (simulate DAG file change)
with dag_maker(
dag_id=dag_id,
schedule=interval,
start_date=DEFAULT_DATE,
):
pass # No tasks in the DAG now
# Create a new dagrun for the updated DAG
dr2 = dag_maker.create_dagrun(logical_date=DEFAULT_DATE + interval, run_id="test_run_2")
assert dr2 is not None
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# Try to find executable task instances - should not find any for the removed task
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
# Should be empty because the task no longer exists in the DAG
assert res == []
# Verify no new task instances were created for the removed task in the new dagrun
new_tis = (
session.query(TaskInstance)
.filter(
TaskInstance.dag_id == dag_id,
TaskInstance.task_id == "dummy",
TaskInstance.run_id == "test_run_2",
)
.all()
)
assert len(new_tis) == 0
@pytest.mark.parametrize(
("ti_states", "run_state"),
[
(["failed", "success"], "failed"),
(["success", "success"], "success"),
],
)
def test_dagrun_state_correct(self, ti_states, run_state, dag_maker, session):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
with dag_maker():
@task
def my_task(): ...
for _ in ti_states:
my_task()
dr = dag_maker.create_dagrun(state="running", triggered_by=DagRunTriggeredByType.TIMETABLE)
for idx, state in enumerate(ti_states):
dr.task_instances[idx].state = state
session.commit()
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._do_scheduling(session)
assert (
session.query(DagRun).filter(DagRun.dag_id == dr.dag_id, DagRun.run_id == dr.run_id).one().state
== run_state
)
def test_dagrun_root_after_dagrun_unfinished(self, mock_executor, testing_dag_bundle):
"""
DagRuns with one successful and one future root task -> SUCCESS
Noted: the DagRun state could be still in running state during CI.
"""
dagbag = DagBag(TEST_DAG_FOLDER, include_examples=False)
sync_bag_to_db(dagbag, "testing", None)
dag_id = "test_dagrun_states_root_future"
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=2)
run_job(scheduler_job, execute_callable=self.job_runner._execute)
first_run = DagRun.find(dag_id=dag_id)[0]
ti_ids = [(ti.task_id, ti.state) for ti in first_run.get_task_instances()]
assert ti_ids == [("current", State.SUCCESS)]
assert first_run.state in [State.SUCCESS, State.RUNNING]
def test_scheduler_start_date(self, testing_dag_bundle):
"""
Test that the scheduler respects start_dates, even when DAGs have run
"""
dagbag = DagBag(TEST_DAG_FOLDER, include_examples=False)
with create_session() as session:
dag_id = "test_start_date_scheduling"
dag = dagbag.get_dag(dag_id)
# Deactivate other dags in this file
other_dag = dagbag.get_dag("test_task_start_date_scheduling")
other_dag.is_paused_upon_creation = True
scheduler_dag, _ = sync_dags_to_db([dag, other_dag])
scheduler_dag.clear()
assert scheduler_dag.start_date > datetime.datetime.now(timezone.utc)
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
run_job(scheduler_job, execute_callable=self.job_runner._execute)
# zero tasks ran
assert len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()) == 0
session.commit()
assert self.null_exec.sorted_tasks == []
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
data_interval_end = DEFAULT_DATE + timedelta(days=1)
scheduler_dag.create_dagrun(
state="success",
triggered_by=DagRunTriggeredByType.TIMETABLE,
run_id="abc123",
logical_date=DEFAULT_DATE,
run_type=DagRunType.BACKFILL_JOB,
data_interval=DataInterval(DEFAULT_DATE, data_interval_end),
run_after=data_interval_end,
)
# one task "ran"
assert len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()) == 1
session.commit()
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
run_job(scheduler_job, execute_callable=self.job_runner._execute)
# still one task
assert len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()) == 1
session.commit()
assert self.null_exec.sorted_tasks == []
def test_scheduler_task_start_date_catchup_true(self, testing_dag_bundle):
"""
Test that with catchup=True, the scheduler respects task start dates that are different from DAG start dates
"""
dagbag = DagBag(
dag_folder=os.path.join(settings.DAGS_FOLDER, "test_scheduler_dags.py"),
include_examples=False,
)
dag_id = "test_task_start_date_scheduling"
dag = dagbag.get_dag(dag_id)
# Explicitly set catchup=True
dag.catchup = True
dag.is_paused_upon_creation = False
dagbag.bag_dag(dag=dag)
# Deactivate other dags in this file
other_dag = dagbag.get_dag("test_start_date_scheduling")
other_dag.is_paused_upon_creation = True
dagbag.bag_dag(dag=other_dag)
sync_bag_to_db(dagbag, "testing", None)
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=3)
run_job(scheduler_job, execute_callable=self.job_runner._execute)
session = settings.Session()
tiq = session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id)
ti1s = tiq.filter(TaskInstance.task_id == "dummy1").all()
ti2s = tiq.filter(TaskInstance.task_id == "dummy2").all()
# With catchup=True, future task start dates are respected
assert len(ti1s) == 0, "Expected no instances for dummy1 (start date in future with catchup=True)"
assert len(ti2s) >= 2, "Expected multiple instances for dummy2"
for ti in ti2s:
assert ti.state == State.SUCCESS
def test_scheduler_task_start_date_catchup_false(self, testing_dag_bundle):
"""
Test that with catchup=False, the scheduler ignores task start dates and schedules for the most recent interval
"""
dagbag = DagBag(
dag_folder=os.path.join(settings.DAGS_FOLDER, "test_scheduler_dags.py"),
include_examples=False,
)
dag_id = "test_task_start_date_scheduling"
dag = dagbag.get_dag(dag_id)
dag.catchup = False
dag.is_paused_upon_creation = False
dagbag.bag_dag(dag=dag)
# Deactivate other dags in this file
other_dag = dagbag.get_dag("test_start_date_scheduling")
other_dag.is_paused_upon_creation = True
dagbag.bag_dag(dag=other_dag)
sync_bag_to_db(dagbag, "testing", None)
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=3)
run_job(scheduler_job, execute_callable=self.job_runner._execute)
session = settings.Session()
tiq = session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id)
ti1s = tiq.filter(TaskInstance.task_id == "dummy1").all()
ti2s = tiq.filter(TaskInstance.task_id == "dummy2").all()
# With catchup=False, future task start dates are ignored
assert len(ti1s) >= 1, "Expected instances for dummy1 (ignoring future start date with catchup=False)"
assert len(ti2s) >= 1, "Expected instances for dummy2"
# Check that both tasks are scheduled for the same recent interval
if ti1s and ti2s:
recent_ti1 = ti1s[0]
recent_ti2 = ti2s[0]
assert recent_ti1.logical_date == recent_ti2.logical_date, (
"Both tasks should be scheduled for the same interval"
)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dagbag = DagBag(TEST_DAG_FOLDER, include_examples=False)
dag_ids = [
"test_start_date_scheduling",
"test_task_start_date_scheduling",
]
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
if not dag:
raise ValueError(f"could not find dag {dag_id}")
create_scheduler_dag(dag).clear()
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
run_job(scheduler_job, execute_callable=self.job_runner._execute)
# zero tasks ran
dag_id = "test_start_date_scheduling"
session = settings.Session()
assert len(session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).all()) == 0
def test_scheduler_verify_pool_full(self, dag_maker, mock_executor):
"""
Test task instances not queued when pool is full
"""
with dag_maker(dag_id="test_scheduler_verify_pool_full"):
BashOperator(
task_id="dummy",
pool="test_scheduler_verify_pool_full",
bash_command="echo hi",
)
session = settings.Session()
pool = Pool(pool="test_scheduler_verify_pool_full", slots=1, include_deferred=False)
session.add(pool)
session.flush()
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# Create 2 dagruns, which will create 2 task instances.
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
self.job_runner._schedule_dag_run(dr, session)
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.RUNNING)
self.job_runner._schedule_dag_run(dr, session)
session.flush()
task_instances_list = self.job_runner._executable_task_instances_to_queued(
max_tis=32, session=session
)
assert len(task_instances_list) == 1
@pytest.mark.need_serialized_dag
def test_scheduler_verify_pool_full_2_slots_per_task(self, dag_maker, session, mock_executor):
"""
Test task instances not queued when pool is full.
Variation with non-default pool_slots
"""
# Explicitly set catchup=True as tests expect runs to be created in date order
with dag_maker(
dag_id="test_scheduler_verify_pool_full_2_slots_per_task",
start_date=DEFAULT_DATE,
session=session,
catchup=True,
):
BashOperator(
task_id="dummy",
pool="test_scheduler_verify_pool_full_2_slots_per_task",
pool_slots=2,
bash_command="echo hi",
)
pool = Pool(pool="test_scheduler_verify_pool_full_2_slots_per_task", slots=6, include_deferred=False)
session.add(pool)
session.flush()
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# Create 5 dagruns, which will create 5 task instances.
def _create_dagruns():
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
yield dr
for _ in range(4):
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED)
yield dr
for dr in _create_dagruns():
self.job_runner._schedule_dag_run(dr, session)
task_instances_list = self.job_runner._executable_task_instances_to_queued(
max_tis=32, session=session
)
# As tasks require 2 slots, only 3 can fit into 6 available
assert len(task_instances_list) == 3
@pytest.mark.need_serialized_dag
def test_scheduler_keeps_scheduling_pool_full(self, dag_maker, mock_executor):
"""
Test task instances in a pool that isn't full keep getting scheduled even when a pool is full.
"""
session = settings.Session()
pool_p1 = Pool(pool="test_scheduler_keeps_scheduling_pool_full_p1", slots=1, include_deferred=False)
pool_p2 = Pool(pool="test_scheduler_keeps_scheduling_pool_full_p2", slots=10, include_deferred=False)
session.add(pool_p1)
session.add(pool_p2)
session.flush()
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# We'll use this to create 30 dagruns for each DAG.
# To increase the chances the TIs from the "full" pool will get
# retrieved first, we schedule all TIs from the first dag first.
def _create_dagruns(dag: SerializedDAG):
next_info = dag.next_dagrun_info(None)
assert next_info is not None
for i in range(30):
yield dag.create_dagrun(
run_id=f"scheduled_{i}",
run_type=DagRunType.SCHEDULED,
logical_date=next_info.logical_date,
data_interval=next_info.data_interval,
run_after=next_info.run_after,
state=DagRunState.RUNNING,
triggered_by=DagRunTriggeredByType.TEST,
session=session,
)
next_info = dag.next_dagrun_info(next_info.data_interval)
if next_info is None:
break
with dag_maker(
dag_id="test_scheduler_keeps_scheduling_pool_full_d1",
start_date=DEFAULT_DATE,
) as dag_d1:
BashOperator(
task_id="test_scheduler_keeps_scheduling_pool_full_t1",
pool="test_scheduler_keeps_scheduling_pool_full_p1",
bash_command="echo hi",
)
for dr in _create_dagruns(dag_d1):
self.job_runner._schedule_dag_run(dr, session)
with dag_maker(
dag_id="test_scheduler_keeps_scheduling_pool_full_d2",
start_date=DEFAULT_DATE,
) as dag_d2:
BashOperator(
task_id="test_scheduler_keeps_scheduling_pool_full_t2",
pool="test_scheduler_keeps_scheduling_pool_full_p2",
bash_command="echo hi",
)
for dr in _create_dagruns(dag_d2):
self.job_runner._schedule_dag_run(dr, session)
self.job_runner._executable_task_instances_to_queued(max_tis=2, session=session)
task_instances_list2 = self.job_runner._executable_task_instances_to_queued(
max_tis=2, session=session
)
# Make sure we get TIs from a non-full pool in the 2nd list
assert len(task_instances_list2) > 0
assert all(
task_instance.pool != "test_scheduler_keeps_scheduling_pool_full_p1"
for task_instance in task_instances_list2
)
def test_scheduler_verify_priority_and_slots(self, dag_maker, mock_executor):
"""
Test task instances with higher priority are not queued
when pool does not have enough slots.
Though tasks with lower priority might be executed.
"""
with dag_maker(dag_id="test_scheduler_verify_priority_and_slots"):
# Medium priority, not enough slots
BashOperator(
task_id="test_scheduler_verify_priority_and_slots_t0",
pool="test_scheduler_verify_priority_and_slots",
pool_slots=2,
priority_weight=2,
bash_command="echo hi",
)
# High priority, occupies first slot
BashOperator(
task_id="test_scheduler_verify_priority_and_slots_t1",
pool="test_scheduler_verify_priority_and_slots",
pool_slots=1,
priority_weight=3,
bash_command="echo hi",
)
# Low priority, occupies second slot
BashOperator(
task_id="test_scheduler_verify_priority_and_slots_t2",
pool="test_scheduler_verify_priority_and_slots",
pool_slots=1,
priority_weight=1,
bash_command="echo hi",
)
session = settings.Session()
pool = Pool(pool="test_scheduler_verify_priority_and_slots", slots=2, include_deferred=False)
session.add(pool)
session.flush()
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr = dag_maker.create_dagrun()
for ti in dr.task_instances:
ti.state = State.SCHEDULED
session.merge(ti)
session.flush()
task_instances_list = self.job_runner._executable_task_instances_to_queued(
max_tis=32, session=session
)
# Only second and third
assert len(task_instances_list) == 2
ti0 = (
session.query(TaskInstance)
.filter(TaskInstance.task_id == "test_scheduler_verify_priority_and_slots_t0")
.first()
)
assert ti0.state == State.SCHEDULED
ti1 = (
session.query(TaskInstance)
.filter(TaskInstance.task_id == "test_scheduler_verify_priority_and_slots_t1")
.first()
)
assert ti1.state == State.QUEUED
ti2 = (
session.query(TaskInstance)
.filter(TaskInstance.task_id == "test_scheduler_verify_priority_and_slots_t2")
.first()
)
assert ti2.state == State.QUEUED
def test_verify_integrity_if_dag_not_changed(self, dag_maker, session):
# CleanUp
session.query(SerializedDagModel).filter(
SerializedDagModel.dag_id == "test_verify_integrity_if_dag_not_changed"
).delete(synchronize_session=False)
with dag_maker(dag_id="test_verify_integrity_if_dag_not_changed") as dag:
BashOperator(task_id="dummy", bash_command="echo hi")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
orm_dag = dag_maker.dag_model
assert orm_dag is not None
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._create_dag_runs([orm_dag], session)
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
# Verify that DagRun.verify_integrity is not called
with mock.patch("airflow.jobs.scheduler_job_runner.DagRun.verify_integrity") as mock_verify_integrity:
self.job_runner._schedule_dag_run(dr, session)
mock_verify_integrity.assert_not_called()
session.flush()
tis_count = (
session.query(func.count(TaskInstance.task_id))
.filter(
TaskInstance.dag_id == dr.dag_id,
TaskInstance.logical_date == dr.logical_date,
TaskInstance.task_id == dr.dag.tasks[0].task_id,
TaskInstance.state == State.SCHEDULED,
)
.scalar()
)
assert tis_count == 1
latest_dag_version = DagVersion.get_latest_version(dr.dag_id, session=session)
for ti in dr.task_instances:
assert ti.dag_version_id == latest_dag_version.id
session.rollback()
session.close()
def test_verify_integrity_if_dag_changed(self, dag_maker):
# CleanUp
with create_session() as session:
session.query(SerializedDagModel).filter(
SerializedDagModel.dag_id == "test_verify_integrity_if_dag_changed"
).delete(synchronize_session=False)
with dag_maker(dag_id="test_verify_integrity_if_dag_changed", serialized=False) as dag:
BashOperator(task_id="dummy", bash_command="echo hi")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
orm_dag = dag_maker.dag_model
assert orm_dag is not None
SerializedDagModel.write_dag(LazyDeserializedDAG.from_dag(dag), bundle_name="testing")
assert orm_dag.bundle_version is None
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._create_dag_runs([orm_dag], session)
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
self.job_runner._schedule_dag_run(dag_run=dr, session=session)
len(self.job_runner.scheduler_dag_bag.get_dag_for_run(dr, session).tasks) == 1
dag_version_1 = DagVersion.get_latest_version(dr.dag_id, session=session)
assert dr.dag_versions[-1].id == dag_version_1.id
# Now let's say the DAG got updated (new task got added)
BashOperator(task_id="bash_task_1", dag=dag, bash_command="echo hi")
SerializedDagModel.write_dag(
LazyDeserializedDAG.from_dag(dag), bundle_name="testing", session=session
)
session.commit()
dag_version_2 = DagVersion.get_latest_version(dr.dag_id, session=session)
assert dag_version_2 != dag_version_1
self.job_runner._schedule_dag_run(dr, session)
session.commit()
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
assert dr.dag_versions[-1].id == dag_version_2.id
assert len(self.job_runner.scheduler_dag_bag.get_dag_for_run(dr, session).tasks) == 2
if SQLALCHEMY_V_1_4:
tis_count = (
session.query(func.count(TaskInstance.task_id))
.filter(
TaskInstance.dag_id == dr.dag_id,
TaskInstance.logical_date == dr.logical_date,
TaskInstance.state == State.SCHEDULED,
)
.scalar()
)
if SQLALCHEMY_V_2_0:
tis_count = session.scalar(
select(func.count(TaskInstance.task_id)).where(
TaskInstance.dag_id == dr.dag_id,
TaskInstance.logical_date == dr.logical_date,
TaskInstance.state == State.SCHEDULED,
)
)
assert tis_count == 2
latest_dag_version = DagVersion.get_latest_version(dr.dag_id, session=session)
assert dr.dag_versions[-1].id == latest_dag_version.id
session.rollback()
session.close()
def test_verify_integrity_not_called_for_versioned_bundles(self, dag_maker, session):
with dag_maker("test_verify_integrity_if_dag_not_changed") as dag:
BashOperator(task_id="dummy", bash_command="echo hi")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
orm_dag = dag_maker.dag_model
assert orm_dag is not None
self.job_runner._create_dag_runs([orm_dag], session)
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
# Simulate versioned bundle by adding a version to dr.bundle_version
dr.bundle_version = "1"
session.merge(dr)
session.commit()
drs = session.query(DagRun).options(joinedload(DagRun.task_instances)).all()
dr = drs[0]
assert dr.bundle_version == "1"
dag_version_1 = DagVersion.get_latest_version(dr.dag_id, session=session)
# Now let's say the DAG got updated (new task got added)
BashOperator(task_id="bash_task_1", dag=dag_maker.dag, bash_command="echo hi")
sync_dag_to_db(dag_maker.dag, bundle_name="dag_maker", session=session)
session.commit()
dag_version_2 = DagVersion.get_latest_version(dr.dag_id, session=session)
assert dag_version_2 != dag_version_1
# Verify that DagRun.verify_integrity is not called
with mock.patch("airflow.jobs.scheduler_job_runner.DagRun.verify_integrity") as mock_verify_integrity:
self.job_runner._schedule_dag_run(dr, session)
mock_verify_integrity.assert_not_called()
@pytest.mark.need_serialized_dag
def test_retry_still_in_executor(self, dag_maker, session):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = MockExecutor(do_update=False)
with dag_maker(
dag_id="test_retry_still_in_executor",
schedule="@once",
session=session,
) as dag:
dag_task1 = BashOperator(
task_id="test_retry_handling_op",
bash_command="exit 1",
retries=1,
)
dag_maker.dag_model.calculate_dagrun_date_fields(dag, None)
@provide_session
def do_schedule(session):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler_job = Job(executor=executor)
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
scheduler_job.heartrate = 0
# Since the DAG is not in the directory watched by scheduler job,
# it would've been marked as deleted and not being scheduled.
with mock.patch.object(DagModel, "deactivate_deleted_dags"):
with mock.patch(
"airflow.executors.executor_loader.ExecutorLoader.load_executor"
) as loader_mock:
loader_mock.side_effect = executor.get_mock_loader_side_effect()
run_job(scheduler_job, execute_callable=self.job_runner._execute)
do_schedule()
with create_session() as session:
ti = (
session.query(TaskInstance)
.filter(
TaskInstance.dag_id == "test_retry_still_in_executor",
TaskInstance.task_id == "test_retry_handling_op",
)
.first()
)
assert ti is not None, "Task not created by scheduler"
ti.task = dag_task1
def run_with_error(ti, ignore_ti_state=False):
with contextlib.suppress(AirflowException):
ti.run(ignore_ti_state=ignore_ti_state)
assert ti.try_number == 1
# At this point, scheduler has tried to schedule the task once and
# heartbeated the executor once, which moved the state of the task from
# SCHEDULED to QUEUED and then to SCHEDULED, to fail the task execution
# we need to ignore the TaskInstance state as SCHEDULED is not a valid state to start
# executing task.
run_with_error(ti, ignore_ti_state=True)
assert ti.state == State.UP_FOR_RETRY
assert ti.try_number == 1
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# To verify that task does get re-queued.
executor.do_update = True
do_schedule()
ti.refresh_from_db()
assert ti.try_number == 1
assert ti.state == State.SUCCESS
def test_adopt_or_reset_orphaned_tasks_nothing(self):
"""Try with nothing."""
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
assert self.job_runner.adopt_or_reset_orphaned_tasks(session=session) == 0
@pytest.mark.parametrize(
"adoptable_state",
list(sorted(State.adoptable_states)),
)
def test_adopt_or_reset_resettable_tasks(self, dag_maker, adoptable_state, session):
dag_id = "test_adopt_or_reset_adoptable_tasks_" + adoptable_state.name
with dag_maker(dag_id=dag_id, schedule="@daily"):
task_id = dag_id + "_task"
EmptyOperator(task_id=task_id)
old_job = Job()
session.add(old_job)
session.commit()
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr1 = dag_maker.create_dagrun(run_type=DagRunType.MANUAL)
ti = dr1.get_task_instances(session=session)[0]
ti.state = adoptable_state
ti.queued_by_job_id = old_job.id
session.merge(ti)
session.merge(dr1)
session.commit()
num_reset_tis = self.job_runner.adopt_or_reset_orphaned_tasks(session=session)
assert num_reset_tis == 1
def test_adopt_or_reset_orphaned_tasks_external_triggered_dag(self, dag_maker, session):
dag_id = "test_reset_orphaned_tasks_external_triggered_dag"
with dag_maker(dag_id=dag_id, schedule="@daily"):
task_id = dag_id + "_task"
EmptyOperator(task_id=task_id)
old_job = Job()
session.add(old_job)
session.flush()
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
dr1 = dag_maker.create_dagrun(run_type=DagRunType.MANUAL)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.QUEUED
ti.queued_by_job_id = old_job.id
session.merge(ti)
session.merge(dr1)
session.commit()
num_reset_tis = self.job_runner.adopt_or_reset_orphaned_tasks(session=session)
assert num_reset_tis == 1
def test_adopt_or_reset_orphaned_tasks_backfill_dag(self, dag_maker):
dag_id = "test_adopt_or_reset_orphaned_tasks_backfill_dag"
with dag_maker(dag_id=dag_id, schedule="@daily"):
task_id = dag_id + "_task"
EmptyOperator(task_id=task_id)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
session.add(scheduler_job)
session.flush()
dr1 = dag_maker.create_dagrun(run_type=DagRunType.BACKFILL_JOB)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
session.merge(ti)
session.merge(dr1)
session.flush()
assert dr1.run_type == DagRunType.BACKFILL_JOB
assert self.job_runner.adopt_or_reset_orphaned_tasks(session=session) == 0
session.rollback()
def test_reset_orphaned_tasks_no_orphans(self, dag_maker):
dag_id = "test_reset_orphaned_tasks_no_orphans"
with dag_maker(dag_id=dag_id, schedule="@daily"):
task_id = dag_id + "_task"
EmptyOperator(task_id=task_id)
scheduler_job = Job()
scheduler_job.state = "running"
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
session.add(scheduler_job)
session.flush()
dr1 = dag_maker.create_dagrun()
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
tis[0].queued_by_job_id = scheduler_job.id
session.merge(dr1)
session.merge(tis[0])
session.flush()
assert self.job_runner.adopt_or_reset_orphaned_tasks(session=session) == 0
tis[0].refresh_from_db()
assert tis[0].state == State.RUNNING
def test_reset_orphaned_tasks_non_running_dagruns(self, dag_maker):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = "test_reset_orphaned_tasks_non_running_dagruns"
with dag_maker(dag_id=dag_id, schedule="@daily"):
task_id = dag_id + "_task"
EmptyOperator(task_id=task_id)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
session.add(scheduler_job)
session.flush()
dr1 = dag_maker.create_dagrun()
dr1.state = State.QUEUED
tis = dr1.get_task_instances(session=session)
assert len(tis) == 1
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.flush()
assert self.job_runner.adopt_or_reset_orphaned_tasks(session=session) == 0
session.rollback()
def test_adopt_or_reset_orphaned_tasks_stale_scheduler_jobs(self, dag_maker):
dag_id = "test_adopt_or_reset_orphaned_tasks_stale_scheduler_jobs"
with dag_maker(dag_id=dag_id, schedule="@daily"):
EmptyOperator(task_id="task1")
EmptyOperator(task_id="task2")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
scheduler_job.state = State.RUNNING
scheduler_job.latest_heartbeat = timezone.utcnow()
session.add(scheduler_job)
old_job = Job()
old_job_runner = SchedulerJobRunner(job=old_job)
old_job_runner.job.state = State.RUNNING
old_job_runner.job.latest_heartbeat = timezone.utcnow() - timedelta(minutes=15)
session.add(old_job)
session.flush()
dr1 = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
logical_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
)
ti1, ti2 = dr1.get_task_instances(session=session)
dr1.state = State.RUNNING
ti1.state = State.QUEUED
ti1.queued_by_job_id = old_job.id
session.merge(dr1)
session.merge(ti1)
ti2.state = State.QUEUED
ti2.queued_by_job_id = scheduler_job.id
session.merge(ti2)
session.flush()
num_reset_tis = self.job_runner.adopt_or_reset_orphaned_tasks(session=session)
assert num_reset_tis == 1
session.refresh(ti1)
assert ti1.state is None
session.refresh(ti2)
assert ti2.state == State.QUEUED
session.rollback()
def test_adopt_or_reset_orphaned_tasks_only_fails_scheduler_jobs(self, caplog):
"""Make sure we only set SchedulerJobs to failed, not all jobs"""
session = settings.Session()
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
scheduler_job.state = State.RUNNING
scheduler_job.latest_heartbeat = timezone.utcnow()
session.add(scheduler_job)
session.flush()
old_job = Job()
self.job_runner = SchedulerJobRunner(job=old_job)
old_job.state = State.RUNNING
old_job.latest_heartbeat = timezone.utcnow() - timedelta(minutes=15)
session.add(old_job)
session.flush()
old_task_job = Job(state=State.RUNNING)
old_task_job.latest_heartbeat = timezone.utcnow() - timedelta(minutes=15)
session.add(old_task_job)
session.flush()
with caplog.at_level("INFO", logger="airflow.jobs.scheduler_job_runner"):
self.job_runner.adopt_or_reset_orphaned_tasks(session=session)
session.expire_all()
assert old_job.state == State.FAILED
assert old_task_job.state == State.RUNNING
assert "Marked 1 SchedulerJob instances as failed" in caplog.messages
@pytest.mark.parametrize(
"kwargs",
[
param(
dict(
schedule=None,
backfill_runs=0,
other_runs=2,
max_active_runs=2,
should_update=False,
),
id="no_dag_schedule",
),
param(
dict(
schedule="0 0 * * *",
backfill_runs=0,
other_runs=2,
max_active_runs=2,
should_update=False,
),
id="dag_schedule_at_capacity",
),
param(
dict(
schedule="0 0 * * *",
backfill_runs=0,
other_runs=1,
max_active_runs=2,
should_update=True,
),
id="dag_schedule_under_capacity",
),
param(
dict(
schedule="0 0 * * *",
backfill_runs=0,
other_runs=5,
max_active_runs=2,
should_update=False,
),
id="dag_schedule_over_capacity",
),
param(
dict(
schedule="0 0 * * *",
number_running=None,
backfill_runs=5,
other_runs=1,
max_active_runs=2,
should_update=True,
),
id="dag_schedule_under_capacity_many_backfill",
),
],
)
@pytest.mark.parametrize("provide_run_count", [True, False])
def test_should_update_dag_next_dagruns(self, provide_run_count: bool, kwargs: dict, session, dag_maker):
"""Test if really required to update next dagrun or possible to save run time"""
schedule: str | None = kwargs["schedule"]
backfill_runs: int = kwargs["backfill_runs"]
other_runs: int = kwargs["other_runs"]
max_active_runs: int = kwargs["max_active_runs"]
should_update: bool = kwargs["should_update"]
with dag_maker(schedule=schedule, max_active_runs=max_active_runs) as dag:
EmptyOperator(task_id="dummy")
index = 0
for index in range(other_runs):
dag_maker.create_dagrun(
run_id=f"run_{index}",
logical_date=(DEFAULT_DATE + timedelta(days=index)),
start_date=timezone.utcnow(),
state=State.RUNNING,
run_type=DagRunType.SCHEDULED,
session=session,
)
for index in range(index + 1, index + 1 + backfill_runs):
dag_maker.create_dagrun(
run_id=f"run_{index}",
logical_date=(DEFAULT_DATE + timedelta(days=index)),
start_date=timezone.utcnow(),
state=State.RUNNING,
run_type=DagRunType.BACKFILL_JOB,
session=session,
)
assert index == other_runs + backfill_runs - 1 # sanity check
session.commit()
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
actual = self.job_runner._should_update_dag_next_dagruns(
dag=dag,
dag_model=dag_maker.dag_model,
active_non_backfill_runs=other_runs if provide_run_count else None, # exclude backfill here
session=session,
)
assert actual == should_update
@pytest.mark.parametrize(
("run_type", "expected"),
[
(DagRunType.MANUAL, False),
(DagRunType.SCHEDULED, True),
(DagRunType.BACKFILL_JOB, False),
(DagRunType.ASSET_TRIGGERED, False),
],
ids=[
DagRunType.MANUAL.name,
DagRunType.SCHEDULED.name,
DagRunType.BACKFILL_JOB.name,
DagRunType.ASSET_TRIGGERED.name,
],
)
def test_should_update_dag_next_dagruns_after_run_type(self, run_type, expected, session, dag_maker):
"""Test that whether next dag run is updated depends on run type"""
with dag_maker(
schedule="*/1 * * * *",
max_active_runs=3,
) as dag:
EmptyOperator(task_id="dummy")
dag_model = dag_maker.dag_model
run = dag_maker.create_dagrun(
run_id="run",
run_type=run_type,
logical_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.SUCCESS,
session=session,
)
session.flush()
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
actual = self.job_runner._should_update_dag_next_dagruns(
dag=dag,
dag_model=dag_model,
last_dag_run=run,
session=session,
)
assert actual == expected
def test_create_dag_runs(self, dag_maker):
"""
Test various invariants of _create_dag_runs.
- That the run created has the creating_job_id set
- That the run created is on QUEUED State
- That dag_model has next_dagrun
"""
with dag_maker(dag_id="test_create_dag_runs") as dag:
EmptyOperator(task_id="dummy")
dag_model = dag_maker.dag_model
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
with create_session() as session:
self.job_runner._create_dag_runs([dag_model], session)
dr = session.query(DagRun).filter(DagRun.dag_id == dag.dag_id).one()
assert dr.state == State.QUEUED
assert dr.start_date is None
assert dr.creating_job_id == scheduler_job.id
@pytest.mark.need_serialized_dag
def test_create_dag_runs_assets(self, session, dag_maker):
"""
Test various invariants of _create_dag_runs.
- That the run created has the creating_job_id set
- That the run created is on QUEUED State
- That dag_model has next_dagrun
"""
asset1 = Asset(uri="test://asset1", name="test_asset", group="test_group")
asset2 = Asset(uri="test://asset2", name="test_asset_2", group="test_group")
with dag_maker(dag_id="assets-1", start_date=timezone.utcnow(), session=session):
BashOperator(task_id="task", bash_command="echo 1", outlets=[asset1])
dr = dag_maker.create_dagrun(
run_id="run1",
logical_date=(DEFAULT_DATE + timedelta(days=100)),
data_interval=(DEFAULT_DATE + timedelta(days=10), DEFAULT_DATE + timedelta(days=11)),
)
asset1_id = session.query(AssetModel.id).filter_by(uri=asset1.uri).scalar()
event1 = AssetEvent(
asset_id=asset1_id,
source_task_id="task",
source_dag_id=dr.dag_id,
source_run_id=dr.run_id,
source_map_index=-1,
)
session.add(event1)
# Create a second event, creation time is more recent, but data interval is older
dr = dag_maker.create_dagrun(
run_id="run2",
logical_date=(DEFAULT_DATE + timedelta(days=101)),
data_interval=(DEFAULT_DATE + timedelta(days=5), DEFAULT_DATE + timedelta(days=6)),
)
event2 = AssetEvent(
asset_id=asset1_id,
source_task_id="task",
source_dag_id=dr.dag_id,
source_run_id=dr.run_id,
source_map_index=-1,
)
session.add(event2)
with dag_maker(dag_id="assets-consumer-multiple", schedule=[asset1, asset2]):
pass
dag2 = dag_maker.dag
with dag_maker(dag_id="assets-consumer-single", schedule=[asset1]):
pass
dag3 = dag_maker.dag
session = dag_maker.session
session.add_all(
[
AssetDagRunQueue(asset_id=asset1_id, target_dag_id=dag2.dag_id),
AssetDagRunQueue(asset_id=asset1_id, target_dag_id=dag3.dag_id),
]
)
session.flush()
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
with create_session() as session:
self.job_runner._create_dagruns_for_dags(session, session)
def dict_from_obj(obj):
"""Get dict of column attrs from SqlAlchemy object."""
return {k.key: obj.__dict__.get(k) for k in obj.__mapper__.column_attrs}
# dag3 should be triggered since it only depends on asset1, and it's been queued
created_run = session.query(DagRun).filter(DagRun.dag_id == dag3.dag_id).one()
assert created_run.state == State.QUEUED
assert created_run.start_date is None
# we don't have __eq__ defined on AssetEvent because... given the fact that in the future
# we may register events from other systems, asset_id + timestamp might not be enough PK
assert list(map(dict_from_obj, created_run.consumed_asset_events)) == list(
map(dict_from_obj, [event1, event2])
)
assert created_run.data_interval_start is None
assert created_run.data_interval_end is None
# dag2 ADRQ record should still be there since the dag run was *not* triggered
assert session.query(AssetDagRunQueue).filter_by(target_dag_id=dag2.dag_id).one() is not None
# dag2 should not be triggered since it depends on both asset 1 and 2
assert session.query(DagRun).filter(DagRun.dag_id == dag2.dag_id).one_or_none() is None
# dag3 ADRQ record should be deleted since the dag run was triggered
assert session.query(AssetDagRunQueue).filter_by(target_dag_id=dag3.dag_id).one_or_none() is None
assert created_run.creating_job_id == scheduler_job.id
@pytest.mark.need_serialized_dag
def test_create_dag_runs_asset_alias_with_asset_event_attached(self, session, dag_maker):
"""
Test Dag Run trigger on AssetAlias includes the corresponding AssetEvent in `consumed_asset_events`.
"""
# Simulate an Asset created at runtime, and it is not an active asset
asset1 = Asset(uri="test://asset1", name="test_asset", group="test_group")
# Create an AssetAlias, and the Asset will be attached to this AssetAlias
asset_alias = AssetAlias(name="test_asset_alias_with_asset_event", group="test_group")
# Add it to the DB so the event can be created from this Asset
asm = AssetModel(name=asset1.name, uri=asset1.uri, group=asset1.group)
session.add(asm)
asam = AssetAliasModel(name=asset_alias.name, group=asset_alias.group)
# Simulate a Producer dag attach an asset event at runtime to an AssetAlias
# Don't use outlets here because the needs to associate an asset alias with an asset event in the association table
with dag_maker(dag_id="asset-alias-producer", start_date=timezone.utcnow(), session=session):
BashOperator(task_id="simulate-asset-alias-outlet", bash_command="echo 1")
dr = dag_maker.create_dagrun(run_id="asset-alias-producer-run")
asset1_id = session.query(AssetModel.id).filter_by(uri=asset1.uri).scalar()
# Create an AssetEvent, which is associated with the Asset, and it is attached to the AssetAlias
event = AssetEvent(
asset_id=asset1_id,
source_task_id="simulate-asset-alias-outlet",
source_dag_id=dr.dag_id,
source_run_id=dr.run_id,
source_map_index=-1,
)
# Attach the Asset and the AssetEvent to the Asset Alias
asam.assets.append(asm)
asam.asset_events.append(event)
session.add_all([asam, event])
session.flush()
# Create the Consumer DAG and Trigger it with scheduler
with dag_maker(dag_id="asset-alias-consumer", schedule=[asset_alias]):
pass
consumer_dag = dag_maker.dag
session = dag_maker.session
session.add_all(
[
AssetDagRunQueue(asset_id=asset1_id, target_dag_id=consumer_dag.dag_id),
]
)
session.flush()
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
with create_session() as session:
self.job_runner._create_dagruns_for_dags(session, session)
def dict_from_obj(obj):
"""Get dict of column attrs from SqlAlchemy object."""
return {k.key: obj.__dict__.get(k) for k in obj.__mapper__.column_attrs}
created_run = session.query(DagRun).filter(DagRun.dag_id == consumer_dag.dag_id).one()
assert created_run.state == State.QUEUED
assert created_run.start_date is None
# The AssetEvent should be included in the consumed_asset_events when the consumer DAG is
# triggered on AssetAlias
assert list(map(dict_from_obj, created_run.consumed_asset_events)) == list(
map(dict_from_obj, [event])
)
assert created_run.data_interval_start is None
assert created_run.data_interval_end is None
assert created_run.creating_job_id == scheduler_job.id
@pytest.mark.need_serialized_dag
@pytest.mark.parametrize(
("disable", "enable"),
[
pytest.param({"is_stale": True}, {"is_stale": False}, id="active"),
pytest.param({"is_paused": True}, {"is_paused": False}, id="paused"),
],
)
def test_no_create_dag_runs_when_dag_disabled(self, session, dag_maker, disable, enable):
asset = Asset(uri="test://asset_1", name="test_asset_1", group="test_group")
with dag_maker(dag_id="consumer", schedule=[asset], session=session):
pass
with dag_maker(dag_id="producer", schedule="@daily", session=session):
BashOperator(task_id="task", bash_command="echo 1", outlets=asset)
asset_manger = AssetManager()
asset_id = session.scalars(select(AssetModel.id).filter_by(uri=asset.uri, name=asset.name)).one()
ase_q = select(AssetEvent).where(AssetEvent.asset_id == asset_id).order_by(AssetEvent.timestamp)
adrq_q = select(AssetDagRunQueue).where(
AssetDagRunQueue.asset_id == asset_id, AssetDagRunQueue.target_dag_id == "consumer"
)
# Simulate the consumer DAG being disabled.
session.execute(update(DagModel).where(DagModel.dag_id == "consumer").values(**disable))
# An ADRQ is not scheduled although an event is emitted.
dr1: DagRun = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
asset_manger.register_asset_change(
task_instance=dr1.get_task_instance("task", session=session),
asset=asset,
session=session,
)
session.flush()
assert session.scalars(ase_q).one().source_run_id == dr1.run_id
assert session.scalars(adrq_q).one_or_none() is None
# Simulate the consumer DAG being enabled.
session.execute(update(DagModel).where(DagModel.dag_id == "consumer").values(**enable))
# An ADRQ should be scheduled for the new event, but not the previous one.
dr2: DagRun = dag_maker.create_dagrun_after(dr1, run_type=DagRunType.SCHEDULED)
asset_manger.register_asset_change(
task_instance=dr2.get_task_instance("task", session=session),
asset=asset,
session=session,
)
session.flush()
assert [e.source_run_id for e in session.scalars(ase_q)] == [dr1.run_id, dr2.run_id]
assert session.scalars(adrq_q).one().target_dag_id == "consumer"
@time_machine.travel(DEFAULT_DATE + datetime.timedelta(days=1, seconds=9), tick=False)
@mock.patch("airflow.jobs.scheduler_job_runner.Stats.timing")
def test_start_dagruns(self, stats_timing, dag_maker, session):
"""
Test that _start_dagrun:
- moves runs to RUNNING State
- emit the right DagRun metrics
"""
from airflow.models.dag import get_last_dagrun
with dag_maker(dag_id="test_start_dag_runs") as dag:
EmptyOperator(task_id="dummy")
dag_model = dag_maker.dag_model
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._create_dag_runs([dag_model], session)
self.job_runner._start_queued_dagruns(session)
dr = session.query(DagRun).filter(DagRun.dag_id == dag.dag_id).first()
# Assert dr state is running
assert dr.state == State.RUNNING
stats_timing.assert_has_calls(
[
mock.call(
"dagrun.schedule_delay.test_start_dag_runs",
datetime.timedelta(seconds=9),
),
mock.call(
"dagrun.schedule_delay",
datetime.timedelta(seconds=9),
tags={"dag_id": "test_start_dag_runs"},
),
]
)
assert get_last_dagrun(dag.dag_id, session).creating_job_id == scheduler_job.id
def test_extra_operator_links_not_loaded_in_scheduler_loop(self, dag_maker):
"""
Test that Operator links are not loaded inside the Scheduling Loop (that does not include
DagFileProcessorProcess) especially the critical loop of the Scheduler.
This is to avoid running User code in the Scheduler and prevent any deadlocks
"""
with dag_maker(dag_id="test_extra_operator_links_not_loaded_in_scheduler") as dag:
# This CustomOperator has Extra Operator Links registered via plugins
_ = CustomOperator(task_id="custom_task")
custom_task = dag.task_dict["custom_task"]
# Test that custom_task has >= 1 Operator Links (after de-serialization)
assert custom_task.operator_extra_links
session = settings.Session()
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._do_scheduling(session=session)
self.job_runner._start_queued_dagruns(session)
session.flush()
# assert len(self.job_runner.scheduler_dag_bag._dags) == 1 # sanity check
# Get serialized dag
dr = DagRun.find(dag_id=dag.dag_id)[0]
s_dag_2 = self.job_runner.scheduler_dag_bag.get_dag_for_run(dr, session=session)
custom_task = s_dag_2.task_dict["custom_task"]
# Test that custom_task has no Operator Links (after de-serialization) in the Scheduling Loop
assert not custom_task.operator_extra_links
def test_scheduler_create_dag_runs_does_not_raise_error_when_no_serdag(self, caplog, dag_maker):
"""
Test that scheduler._create_dag_runs does not raise an error when the DAG does not exist
in serialized_dag table
"""
with dag_maker(dag_id="test_scheduler_create_dag_runs_does_not_raise_error", serialized=False):
EmptyOperator(
task_id="dummy",
)
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
caplog.set_level("FATAL")
caplog.clear()
with (
create_session() as session,
caplog.at_level(
"ERROR",
logger="airflow.jobs.scheduler_job_runner",
),
):
self._clear_serdags(dag_id=dag_maker.dag.dag_id, session=session)
self.job_runner._create_dag_runs([dag_maker.dag_model], session)
assert caplog.messages == [
"DAG 'test_scheduler_create_dag_runs_does_not_raise_error' not found in serialized_dag table",
]
def _clear_serdags(self, dag_id, session):
SDM = SerializedDagModel
sdms = session.scalars(select(SDM).where(SDM.dag_id == dag_id))
for sdm in sdms:
session.delete(sdm)
session.commit()
def test_bulk_write_to_db_external_trigger_dont_skip_scheduled_run(self, dag_maker, testing_dag_bundle):
"""
Test that externally triggered Dag Runs should not affect (by skipping) next
scheduled DAG runs
"""
with dag_maker(
dag_id="test_bulk_write_to_db_external_trigger_dont_skip_scheduled_run",
schedule="*/1 * * * *",
max_active_runs=5,
catchup=True,
) as dag:
EmptyOperator(task_id="dummy")
session = settings.Session()
# Verify that dag_model.next_dagrun is equal to next logical_date
dag_model = dag_maker.dag_model
assert dag_model.next_dagrun == DEFAULT_DATE
assert dag_model.next_dagrun_data_interval_start == DEFAULT_DATE
assert dag_model.next_dagrun_data_interval_end == DEFAULT_DATE + timedelta(minutes=1)
scheduler_job = Job(executor=MockExecutor(do_update=False))
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# Verify a DagRun is created with the correct dates
# when Scheduler._do_scheduling is run in the Scheduler Loop
self.job_runner._do_scheduling(session)
dr1 = session.scalar(
select(DagRun).where(DagRun.dag_id == dag_model.dag_id).order_by(DagRun.id.asc()).limit(1)
)
assert dr1 is not None
assert dr1.state == DagRunState.RUNNING
assert dr1.logical_date == DEFAULT_DATE
assert dr1.data_interval_start == DEFAULT_DATE
assert dr1.data_interval_end == DEFAULT_DATE + timedelta(minutes=1)
# Verify that dag_model.next_dagrun is set to next interval
dag_model = session.get(DagModel, dag.dag_id)
assert dag_model.next_dagrun == DEFAULT_DATE + timedelta(minutes=1)
assert dag_model.next_dagrun_data_interval_start == DEFAULT_DATE + timedelta(minutes=1)
assert dag_model.next_dagrun_data_interval_end == DEFAULT_DATE + timedelta(minutes=2)
# Trigger the Dag externally
data_interval = infer_automated_data_interval(dag.timetable, DEFAULT_LOGICAL_DATE)
dr = dag.create_dagrun(
run_id="test",
state=DagRunState.RUNNING,
logical_date=timezone.utcnow(),
run_type=DagRunType.MANUAL,
session=session,
data_interval=data_interval,
run_after=DEFAULT_LOGICAL_DATE,
triggered_by=DagRunTriggeredByType.TEST,
)
assert dr is not None
# Test that 'dag_model.next_dagrun' has not been changed because of newly created external
# triggered DagRun.
dag_model = session.get(DagModel, dag.dag_id)
assert dag_model.next_dagrun == DEFAULT_DATE + timedelta(minutes=1)
assert dag_model.next_dagrun_data_interval_start == DEFAULT_DATE + timedelta(minutes=1)
assert dag_model.next_dagrun_data_interval_end == DEFAULT_DATE + timedelta(minutes=2)
def test_scheduler_create_dag_runs_check_existing_run(self, dag_maker, session):
"""
Test that if a dag run exists, scheduler._create_dag_runs does not raise an error.
And if a Dag Run does not exist it creates next Dag Run. In both cases the Scheduler
sets next logical date as DagModel.next_dagrun
"""
# By setting catchup=True explicitly, we ensure the test behaves as originally intended
# using the historical date as the next_dagrun date.
with dag_maker(
dag_id="test_scheduler_create_dag_runs_check_existing_run",
schedule=timedelta(days=1),
catchup=True,
) as dag:
EmptyOperator(task_id="dummy")
assert get_last_dagrun(dag.dag_id, session) is None
dag_model = dag_maker.dag_model
# Assert dag_model.next_dagrun is set correctly
assert dag_model.next_dagrun == DEFAULT_DATE
dagrun = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
logical_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
session=session,
creating_job_id=2,
)
session.flush()
assert get_last_dagrun(dag.dag_id, session) == dagrun
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# Test that this does not raise any error
self.job_runner._create_dag_runs([dag_model], session)
# Assert the next dagrun fields are set correctly to next logical date
assert dag_model.next_dagrun_data_interval_start == DEFAULT_DATE + timedelta(days=1)
assert dag_model.next_dagrun_data_interval_end == DEFAULT_DATE + timedelta(days=2)
assert dag_model.next_dagrun == DEFAULT_DATE + timedelta(days=1)
session.rollback()
@conf_vars({("scheduler", "use_job_schedule"): "false"})
def test_do_schedule_max_active_runs_dag_timed_out(self, dag_maker, session):
"""Test that tasks are set to a finished state when their DAG times out"""
with dag_maker(
dag_id="test_max_active_run_with_dag_timed_out",
schedule="@once",
max_active_runs=1,
catchup=True,
dagrun_timeout=datetime.timedelta(seconds=1),
) as dag:
task1 = BashOperator(
task_id="task1",
bash_command=' for((i=1;i<=600;i+=1)); do sleep "$i"; done',
)
data_interval = infer_automated_data_interval(dag.timetable, DEFAULT_LOGICAL_DATE)
run1 = dag.create_dagrun(
run_id="test1",
run_type=DagRunType.SCHEDULED,
logical_date=DEFAULT_DATE,
state=State.RUNNING,
start_date=timezone.utcnow() - timedelta(seconds=2),
session=session,
data_interval=data_interval,
run_after=DEFAULT_LOGICAL_DATE,
triggered_by=DagRunTriggeredByType.TEST,
)
run1_ti = run1.get_task_instance(task1.task_id, session)
run1_ti.state = State.RUNNING
logical_date_2 = DEFAULT_DATE + timedelta(seconds=10)
run2 = dag.create_dagrun(
run_id="test2",
run_type=DagRunType.SCHEDULED,
logical_date=logical_date_2,
state=State.QUEUED,
session=session,
data_interval=data_interval,
run_after=logical_date_2,
triggered_by=DagRunTriggeredByType.TEST,
)
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
my_dag = session.get(DagModel, dag.dag_id)
self.job_runner._create_dag_runs([my_dag], session)
# Run relevant part of scheduling again to assert run2 has been scheduled
self.job_runner._schedule_dag_run(run1, session)
run1 = session.merge(run1)
session.refresh(run1)
assert run1.state == State.FAILED
assert run1_ti.state == State.SKIPPED
session.flush()
# Run relevant part of scheduling again to assert run2 has been scheduled
self.job_runner._start_queued_dagruns(session)
session.flush()
run2 = session.merge(run2)
session.refresh(run2)
assert run2.state == State.RUNNING
self.job_runner._schedule_dag_run(run2, session)
session.expunge_all()
run2_ti = run2.get_task_instance(task1.task_id, session)
assert run2_ti.state == State.SCHEDULED
def test_do_schedule_max_active_runs_task_removed(self, session, dag_maker):
"""Test that tasks in removed state don't count as actively running."""
with dag_maker(
dag_id="test_do_schedule_max_active_runs_task_removed",
start_date=DEFAULT_DATE,
schedule="@once",
max_active_runs=1,
session=session,
):
# Can't use EmptyOperator as that goes straight to success
BashOperator(task_id="dummy1", bash_command="true")
run1 = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
logical_date=DEFAULT_DATE + timedelta(hours=1),
state=State.RUNNING,
)
executor = MockExecutor(do_update=False)
scheduler_job = Job(executor=executor)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
with mock.patch("airflow.executors.executor_loader.ExecutorLoader.load_executor") as loader_mock:
# The executor is mocked, so cannot be loaded/imported. Mock load_executor and return the
# correct object for the given input executor name.
loader_mock.side_effect = executor.get_mock_loader_side_effect()
num_queued = self.job_runner._do_scheduling(session)
assert num_queued == 1
session.flush()
ti = run1.task_instances[0]
ti.refresh_from_db(session=session)
assert ti.state == State.QUEUED
def test_more_runs_are_not_created_when_max_active_runs_is_reached(self, dag_maker, caplog):
"""
This tests that when max_active_runs is reached, _create_dag_runs doesn't create
more dagruns
"""
# Explicitly set catchup=True as test specifically expects historical dates to be respected
with dag_maker(max_active_runs=1, catchup=True):
EmptyOperator(task_id="task")
scheduler_job = Job(executor=MockExecutor(do_update=False))
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
assert session.query(DagRun).count() == 0
query, _ = DagModel.dags_needing_dagruns(session)
dag_models = query.all()
self.job_runner._create_dag_runs(dag_models, session)
dr = session.query(DagRun).one()
dr.state == DagRunState.QUEUED
assert session.query(DagRun).count() == 1
assert dag_maker.dag_model.next_dagrun_create_after is None
session.flush()
# dags_needing_dagruns query should not return any value
query, _ = DagModel.dags_needing_dagruns(session)
assert len(query.all()) == 0
self.job_runner._create_dag_runs(dag_models, session)
assert session.query(DagRun).count() == 1
assert dag_maker.dag_model.next_dagrun_create_after is None
assert dag_maker.dag_model.next_dagrun == DEFAULT_DATE
# set dagrun to success
dr = session.query(DagRun).one()
dr.state = DagRunState.SUCCESS
ti = dr.get_task_instance("task", session)
ti.state = TaskInstanceState.SUCCESS
session.merge(ti)
session.merge(dr)
session.flush()
# check that next_dagrun is set properly by Schedulerjob._update_dag_next_dagruns
self.job_runner._schedule_dag_run(dr, session)
session.flush()
query, _ = DagModel.dags_needing_dagruns(session)
assert len(query.all()) == 1
# assert next_dagrun has been updated correctly
assert dag_maker.dag_model.next_dagrun == DEFAULT_DATE + timedelta(days=1)
# assert no dagruns is created yet
assert (
session.query(DagRun).filter(DagRun.state.in_([DagRunState.RUNNING, DagRunState.QUEUED])).count()
== 0
)
def test_max_active_runs_creation_phasing(self, dag_maker, session):
"""
Test that when creating runs once max_active_runs is reached that the runs come in the right order
without gaps
"""
def complete_one_dagrun():
ti = (
session.query(TaskInstance)
.join(TaskInstance.dag_run)
.filter(TaskInstance.state != State.SUCCESS)
.order_by(DagRun.logical_date)
.first()
)
if ti:
ti.state = State.SUCCESS
session.flush()
self.clean_db()
# Explicitly set catchup=True as test specifically expects runs to be created in date order
with dag_maker(max_active_runs=3, session=session, catchup=True) as dag:
# Need to use something that doesn't immediately get marked as success by the scheduler
BashOperator(task_id="task", bash_command="true")
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
query, _ = DagModel.dags_needing_dagruns(session)
query.all()
for _ in range(3):
self.job_runner._do_scheduling(session)
model: DagModel = session.get(DagModel, dag.dag_id)
# Pre-condition
assert DagRun.active_runs_of_dags(dag_ids=["test_dag"], exclude_backfill=True, session=session) == {
"test_dag": 3
}
assert model.next_dagrun == timezone.DateTime(2016, 1, 3, tzinfo=timezone.utc)
assert model.next_dagrun_create_after is None
complete_one_dagrun()
assert DagRun.active_runs_of_dags(dag_ids=["test_dag"], exclude_backfill=True, session=session) == {
"test_dag": 3
}
for _ in range(5):
self.job_runner._do_scheduling(session)
complete_one_dagrun()
expected_logical_dates = [datetime.datetime(2016, 1, d, tzinfo=timezone.utc) for d in range(1, 6)]
dagrun_logical_dates = [
dr.logical_date for dr in session.query(DagRun).order_by(DagRun.logical_date).all()
]
assert dagrun_logical_dates == expected_logical_dates
@pytest.mark.usefixtures("testing_dag_bundle")
def test_do_schedule_max_active_runs_and_manual_trigger(self, dag_maker, mock_executors):
"""
Make sure that when a DAG is already at max_active_runs, that manually triggered
dagruns don't start running.
"""
# Explicitly set catchup=True as test specifically expects runs to be created in date order
with dag_maker(
dag_id="test_max_active_run_plus_manual_trigger",
schedule="@once",
max_active_runs=1,
catchup=True,
) as dag:
# Can't use EmptyOperator as that goes straight to success
task1 = BashOperator(task_id="dummy1", bash_command="true")
task2 = BashOperator(task_id="dummy2", bash_command="true")
task1 >> task2
BashOperator(task_id="dummy3", bash_command="true")
session = settings.Session()
dag_run = dag_maker.create_dagrun(state=State.QUEUED, session=session)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
num_queued = self.job_runner._do_scheduling(session)
# Add it back in to the session so we can refresh it. (_do_scheduling does an expunge_all to reduce
# memory)
dag_run = session.merge(dag_run)
session.refresh(dag_run)
assert num_queued == 2
assert dag_run.state == State.RUNNING
# Now that this one is running, manually trigger a dag.
dag_maker.create_dagrun(
run_type=DagRunType.MANUAL,
logical_date=DEFAULT_DATE + timedelta(hours=1),
state=State.QUEUED,
session=session,
)
session.flush()
self.job_runner._do_scheduling(session)
# Assert that only 1 dagrun is active
assert len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)) == 1
# Assert that the other one is queued
assert len(DagRun.find(dag_id=dag.dag_id, state=State.QUEUED, session=session)) == 1
def test_max_active_runs_in_a_dag_doesnt_stop_running_dag_runs_in_other_dags(self, dag_maker):
session = settings.Session()
# Explicitly set catchup=True as test specifically expects historical dates to be respected
with dag_maker(
"test_dag1",
start_date=DEFAULT_DATE,
schedule=timedelta(hours=1),
max_active_runs=1,
catchup=True,
):
EmptyOperator(task_id="mytask")
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
for _ in range(29):
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
# Explicitly set catchup=True as test specifically expects historical dates to be respected
with dag_maker(
"test_dag2",
start_date=timezone.datetime(2020, 1, 1),
schedule=timedelta(hours=1),
catchup=True,
):
EmptyOperator(task_id="mytask")
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
for _ in range(9):
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
scheduler_job = Job(executor=MockExecutor(do_update=False))
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._start_queued_dagruns(session)
session.flush()
self.job_runner._start_queued_dagruns(session)
session.flush()
dag1_running_count = (
session.query(func.count(DagRun.id))
.filter(DagRun.dag_id == "test_dag1", DagRun.state == State.RUNNING)
.scalar()
)
running_count = session.query(func.count(DagRun.id)).filter(DagRun.state == State.RUNNING).scalar()
assert dag1_running_count == 1
assert running_count == 11
def test_max_active_runs_in_a_dag_doesnt_prevent_backfill_from_running_catchup_true(self, dag_maker):
session = settings.Session()
with dag_maker(
"test_dag1",
start_date=DEFAULT_DATE,
schedule=timedelta(days=1),
max_active_runs=1,
catchup=True,
) as dag:
EmptyOperator(task_id="mytask")
dag1_dag_id = dag.dag_id
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
for _ in range(29):
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
with dag_maker(
"test_dag2",
start_date=timezone.datetime(2020, 1, 1),
schedule=timedelta(days=1),
catchup=True,
):
EmptyOperator(task_id="mytask")
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
for _ in range(9):
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
scheduler_job = Job(executor=MockExecutor(do_update=False))
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._start_queued_dagruns(session)
session.flush()
self.job_runner._start_queued_dagruns(session)
session.flush()
dag1_running_count = (
session.query(func.count(DagRun.id))
.filter(DagRun.dag_id == "test_dag1", DagRun.state == State.RUNNING)
.scalar()
)
running_count = session.query(func.count(DagRun.id)).filter(DagRun.state == State.RUNNING).scalar()
assert dag1_running_count == 1
assert running_count == 11
from_date = pendulum.parse("2021-01-01")
to_date = pendulum.parse("2021-01-06")
_create_backfill(
dag_id=dag1_dag_id,
from_date=from_date,
to_date=to_date,
max_active_runs=3,
reverse=False,
triggering_user_name="test_user",
dag_run_conf={},
)
dag1_running_count = (
session.query(func.count(DagRun.id))
.filter(DagRun.dag_id == "test_dag1", DagRun.state == State.RUNNING)
.scalar()
)
assert dag1_running_count == 1
total_running_count = (
session.query(func.count(DagRun.id)).filter(DagRun.state == State.RUNNING).scalar()
)
assert total_running_count == 11
# scheduler will now mark backfill runs as running
self.job_runner._start_queued_dagruns(session)
session.flush()
dag1_running_count = (
session.query(func.count(DagRun.id))
.filter(
DagRun.dag_id == dag1_dag_id,
DagRun.state == State.RUNNING,
)
.scalar()
)
assert dag1_running_count == 4
total_running_count = (
session.query(func.count(DagRun.id)).filter(DagRun.state == State.RUNNING).scalar()
)
assert total_running_count == 14
# and doing it again does not change anything
self.job_runner._start_queued_dagruns(session)
session.flush()
dag1_running_count = (
session.query(func.count(DagRun.id))
.filter(
DagRun.dag_id == dag1_dag_id,
DagRun.state == State.RUNNING,
)
.scalar()
)
assert dag1_running_count == 4
total_running_count = (
session.query(func.count(DagRun.id)).filter(DagRun.state == State.RUNNING).scalar()
)
assert total_running_count == 14
def test_max_active_runs_in_a_dag_doesnt_prevent_backfill_from_running_catchup_false(self, dag_maker):
"""Test that with catchup=False, backfills can still run even when max_active_runs is reached for normal DAG runs"""
session = settings.Session()
with dag_maker(
"test_dag1",
start_date=DEFAULT_DATE,
schedule=timedelta(days=1),
max_active_runs=1,
catchup=False,
) as dag:
EmptyOperator(task_id="mytask")
dag1_dag_id = dag.dag_id
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
# Fewer DAG runs since we're only testing recent dates with catchup=False
for _ in range(2):
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
with dag_maker(
"test_dag2",
start_date=timezone.datetime(2020, 1, 1),
schedule=timedelta(days=1),
catchup=False,
) as dag:
EmptyOperator(task_id="mytask")
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
for _ in range(2):
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
scheduler_job = Job(executor=MockExecutor(do_update=False))
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._start_queued_dagruns(session)
session.flush()
self.job_runner._start_queued_dagruns(session)
session.flush()
dag1_running_count = (
session.query(func.count(DagRun.id))
.filter(DagRun.dag_id == "test_dag1", DagRun.state == State.RUNNING)
.scalar()
)
running_count = session.query(func.count(DagRun.id)).filter(DagRun.state == State.RUNNING).scalar()
assert dag1_running_count == 1
# With catchup=False, only the most recent interval is scheduled for each DAG
assert (
running_count == 2
) # 1 from test_dag1 (limited by max_active_runs) + 1 from test_dag2 (only most recent with catchup=False)
# Test that backfills can still run despite max_active_runs being reached for normal runs
from_date = pendulum.parse("2021-01-01")
to_date = pendulum.parse("2021-01-06")
_backfill = _create_backfill(
dag_id=dag1_dag_id,
from_date=from_date,
to_date=to_date,
max_active_runs=3,
reverse=False,
triggering_user_name="test_user",
dag_run_conf={},
)
# scheduler will now mark backfill runs as running
self.job_runner._start_queued_dagruns(session)
session.flush()
dag1_running_count = (
session.query(func.count(DagRun.id))
.filter(
DagRun.dag_id == dag1_dag_id,
DagRun.state == State.RUNNING,
)
.scalar()
)
# Even with catchup=False, backfill runs should start
assert dag1_running_count == 4
total_running_count = (
session.query(func.count(DagRun.id)).filter(DagRun.state == State.RUNNING).scalar()
)
assert (
total_running_count == 5
) # 4 from test_dag1 + 1 from test_dag2 (only most recent with catchup=False)
def test_backfill_runs_are_started_with_lower_priority_catchup_true(self, dag_maker, session):
"""
Here we are going to create all the runs at the same time and see which
ones are scheduled first.
On the first scheduler run, I expect that backfill runs would not be started
due to being outside the limit in the queued runs query.
"""
dag1_dag_id = "test_dag1"
with dag_maker(
dag_id=dag1_dag_id,
start_date=DEFAULT_DATE,
schedule=timedelta(days=1),
max_active_runs=1,
catchup=True,
):
EmptyOperator(task_id="mytask")
def _running_counts():
dag1_non_b_running = (
session.query(func.count(DagRun.id))
.filter(
DagRun.dag_id == dag1_dag_id,
DagRun.state == State.RUNNING,
DagRun.run_type != DagRunType.BACKFILL_JOB,
)
.scalar()
)
dag1_b_running = (
session.query(func.count(DagRun.id))
.filter(
DagRun.dag_id == dag1_dag_id,
DagRun.state == State.RUNNING,
DagRun.run_type == DagRunType.BACKFILL_JOB,
)
.scalar()
)
total_running_count = (
session.query(func.count(DagRun.id)).filter(DagRun.state == State.RUNNING).scalar()
)
return dag1_non_b_running, dag1_b_running, total_running_count
scheduler_job = Job(executor=MockExecutor(do_update=False))
self.job_runner = SchedulerJobRunner(job=scheduler_job)
from_date = pendulum.parse("2021-01-01")
to_date = pendulum.parse("2021-01-06")
_create_backfill(
dag_id=dag1_dag_id,
from_date=from_date,
to_date=to_date,
max_active_runs=3,
reverse=False,
triggering_user_name="test_user",
dag_run_conf={},
)
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
# now let's create some "normal" dag runs and verify that they can run
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
for _ in range(29):
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
with dag_maker(
"test_dag2",
start_date=timezone.datetime(2020, 1, 1),
schedule=timedelta(days=1),
catchup=True,
):
EmptyOperator(task_id="mytask")
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
for _ in range(9):
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
# initial state -- nothing is running
assert dag1_non_b_running == 0
assert dag1_b_running == 0
assert total_running == 0
assert session.query(func.count(DagRun.id)).scalar() == 46
assert session.scalar(select(func.count()).where(DagRun.dag_id == dag1_dag_id)) == 36
# now let's run it once
self.job_runner._start_queued_dagruns(session)
session.flush()
# after running the scheduler one time, observe that only one dag run is started
# this is because there are 30 runs for dag 1 so neither the backfills nor
# any runs for dag2 get started
assert DagRun.DEFAULT_DAGRUNS_TO_EXAMINE == 20
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
assert dag1_non_b_running == 1
assert dag1_b_running == 0
assert total_running == 1
assert session.scalar(select(func.count()).select_from(DagRun)) == 46
assert session.scalar(select(func.count()).where(DagRun.dag_id == dag1_dag_id)) == 36
# we run scheduler again and observe that now all the runs are created
# this must be because sorting is working
self.job_runner._start_queued_dagruns(session)
session.flush()
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
assert dag1_non_b_running == 1
assert dag1_b_running == 3
assert total_running == 14
assert session.scalar(select(func.count()).select_from(DagRun)) == 46
assert session.scalar(select(func.count()).where(DagRun.dag_id == dag1_dag_id)) == 36
# run it a 3rd time and nothing changes
self.job_runner._start_queued_dagruns(session)
session.flush()
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
assert dag1_non_b_running == 1
assert dag1_b_running == 3
assert total_running == 14
assert session.scalar(select(func.count()).select_from(DagRun)) == 46
assert session.scalar(select(func.count()).where(DagRun.dag_id == dag1_dag_id)) == 36
def test_backfill_runs_are_started_with_lower_priority_catchup_false(self, dag_maker, session):
"""
Test that with catchup=False, backfill runs are still started with lower priority than regular DAG runs,
but the scheduler processes fewer runs overall due to catchup=False behavior.
"""
dag1_dag_id = "test_dag1"
with dag_maker(
dag_id=dag1_dag_id,
start_date=DEFAULT_DATE,
schedule=timedelta(days=1),
max_active_runs=1,
catchup=False,
):
EmptyOperator(task_id="mytask")
def _running_counts():
dag1_non_b_running = (
session.query(func.count(DagRun.id))
.filter(
DagRun.dag_id == dag1_dag_id,
DagRun.state == State.RUNNING,
DagRun.run_type != DagRunType.BACKFILL_JOB,
)
.scalar()
)
dag1_b_running = (
session.query(func.count(DagRun.id))
.filter(
DagRun.dag_id == dag1_dag_id,
DagRun.state == State.RUNNING,
DagRun.run_type == DagRunType.BACKFILL_JOB,
)
.scalar()
)
total_running_count = (
session.query(func.count(DagRun.id)).filter(DagRun.state == State.RUNNING).scalar()
)
return dag1_non_b_running, dag1_b_running, total_running_count
scheduler_job = Job(executor=MockExecutor(do_update=False))
self.job_runner = SchedulerJobRunner(job=scheduler_job)
from_date = pendulum.parse("2021-01-01")
to_date = pendulum.parse("2021-01-06")
_create_backfill(
dag_id=dag1_dag_id,
from_date=from_date,
to_date=to_date,
max_active_runs=3,
reverse=False,
triggering_user_name="test_user",
dag_run_conf={},
)
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
# Create fewer DAG runs since we're only testing recent dates with catchup=False
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
# With catchup=False, we only create a few runs instead of 29
for _ in range(4):
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
with dag_maker(
"test_dag2",
start_date=timezone.datetime(2020, 1, 1),
schedule=timedelta(days=1),
catchup=False,
):
EmptyOperator(task_id="mytask")
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
# With catchup=False, we only create a few runs instead of 9
for _ in range(2):
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
# initial state -- nothing is running
assert dag1_non_b_running == 0
assert dag1_b_running == 0
assert total_running == 0
# Total 14 runs: 5 for dag1 + 3 for dag2 + 6 backfill runs (Jan 1-6 inclusive)
assert session.query(func.count(DagRun.id)).scalar() == 14
# now let's run it once
self.job_runner._start_queued_dagruns(session)
session.flush()
# With catchup=False, the scheduler behaves differently than with catchup=True
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
# One normal run starts due to max_active_runs=1
assert dag1_non_b_running == 1
# With catchup=False, backfill runs are started immediately alongside regular runs
assert dag1_b_running == 3
# Total running = 1 normal dag1 + 3 backfills + 1 from dag2
assert total_running == 5
# Running the scheduler again doesn't change anything since we've already reached
# the limits for both normal runs (max_active_runs=1) and backfill runs (default max_active_runs_per_dag=16)
self.job_runner._start_queued_dagruns(session)
session.flush()
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
# Still only one normal run due to max_active_runs=1
assert dag1_non_b_running == 1
# Backfill runs remain at 3 (the maximum allowed by our test configuration)
assert dag1_b_running == 3
# Total running count remains the same
assert total_running == 5
# Total runs remain the same
assert session.query(func.count(DagRun.id)).scalar() == 14
def test_backfill_maxed_out_no_prevent_non_backfill_max_out(self, dag_maker):
session = settings.Session()
dag1_dag_id = "test_dag1"
with dag_maker(
dag_id=dag1_dag_id,
start_date=DEFAULT_DATE,
schedule=timedelta(days=1),
max_active_runs=1,
catchup=True,
):
EmptyOperator(task_id="mytask")
def _running_counts():
dag1_non_b_running = (
session.query(func.count(DagRun.id))
.filter(
DagRun.dag_id == dag1_dag_id,
DagRun.state == State.RUNNING,
DagRun.run_type != DagRunType.BACKFILL_JOB,
)
.scalar()
)
dag1_b_running = (
session.query(func.count(DagRun.id))
.filter(
DagRun.dag_id == dag1_dag_id,
DagRun.state == State.RUNNING,
DagRun.run_type == DagRunType.BACKFILL_JOB,
)
.scalar()
)
total_running_count = (
session.query(func.count(DagRun.id)).filter(DagRun.state == State.RUNNING).scalar()
)
return dag1_non_b_running, dag1_b_running, total_running_count
scheduler_job = Job(executor=MockExecutor(do_update=False))
self.job_runner = SchedulerJobRunner(job=scheduler_job)
from_date = pendulum.parse("2021-01-01")
to_date = pendulum.parse("2021-01-06")
_create_backfill(
dag_id=dag1_dag_id,
from_date=from_date,
to_date=to_date,
max_active_runs=3,
reverse=False,
triggering_user_name="test_user",
dag_run_conf={},
)
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
assert dag1_non_b_running == 0
assert dag1_b_running == 0
assert total_running == 0
assert session.query(func.count(DagRun.id)).scalar() == 6
assert session.scalar(select(func.count()).where(DagRun.dag_id == dag1_dag_id)) == 6
# scheduler will now mark backfill runs as running
# it should mark 3 of them running since that is backfill max active runs
self.job_runner._start_queued_dagruns(session)
session.flush()
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
assert dag1_non_b_running == 0
assert dag1_b_running == 3
assert total_running == 3
assert session.scalar(select(func.count()).select_from(DagRun)) == 6
assert session.scalar(select(func.count()).where(DagRun.dag_id == dag1_dag_id)) == 6
# and nothing should change if scheduler runs again
self.job_runner._start_queued_dagruns(session)
session.flush()
self.job_runner._start_queued_dagruns(session)
session.flush()
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
assert dag1_non_b_running == 0
assert dag1_b_running == 3
assert total_running == 3
assert session.scalar(select(func.count()).select_from(DagRun)) == 6
assert session.scalar(select(func.count()).where(DagRun.dag_id == dag1_dag_id)) == 6
# now let's create some "normal" dag runs and verify that they can run
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
for _ in range(29):
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
with dag_maker(
"test_dag2",
start_date=timezone.datetime(2020, 1, 1),
schedule=timedelta(days=1),
catchup=True,
):
EmptyOperator(task_id="mytask")
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
for _ in range(9):
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
# ok at this point, there are new dag runs created, but no new running runs
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
assert dag1_non_b_running == 0
assert dag1_b_running == 3
assert total_running == 3
# we created a lot of drs
assert session.scalar(select(func.count()).select_from(DagRun)) == 46
# and in particular there are 36 total runs for dag1
assert session.scalar(select(func.count()).where(DagRun.dag_id == dag1_dag_id)) == 36
# but now let's run the scheduler once
self.job_runner._start_queued_dagruns(session)
session.flush()
# now we should see one more non-backfill run running, and 11 more in total
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
assert dag1_non_b_running == 1
assert dag1_b_running == 3
# this should be 14 but it is not. why?
# answer: because dag2 got starved out by dag1
# if we run the scheduler again, dag2 should get queued
assert total_running == 4
assert session.scalar(select(func.count()).select_from(DagRun)) == 46
assert session.scalar(select(func.count()).where(DagRun.dag_id == dag1_dag_id)) == 36
# run scheduler a second time
self.job_runner._start_queued_dagruns(session)
session.flush()
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
assert dag1_non_b_running == 1
assert dag1_b_running == 3
# on the second try, dag 2's 10 runs now start running
assert total_running == 14
assert session.scalar(select(func.count()).select_from(DagRun)) == 46
assert session.scalar(select(func.count()).where(DagRun.dag_id == dag1_dag_id)) == 36
@pytest.mark.parametrize(
("pause_it", "expected_running"),
[
(True, 0),
(False, 3),
],
)
def test_backfill_runs_not_started_when_backfill_paused(
self, pause_it, expected_running, dag_maker, session
):
"""
When backfill is paused, will not start.
"""
dag1_dag_id = "test_dag1"
# Explicitly needs catchup True for backfill test
with dag_maker(
dag_id=dag1_dag_id,
start_date=DEFAULT_DATE,
schedule=timedelta(days=1),
max_active_runs=1,
catchup=True,
):
EmptyOperator(task_id="mytask")
def _running_counts():
dag1_non_b_running = (
session.query(func.count(DagRun.id))
.filter(
DagRun.dag_id == dag1_dag_id,
DagRun.state == State.RUNNING,
DagRun.run_type != DagRunType.BACKFILL_JOB,
)
.scalar()
)
dag1_b_running = (
session.query(func.count(DagRun.id))
.filter(
DagRun.dag_id == dag1_dag_id,
DagRun.state == State.RUNNING,
DagRun.run_type == DagRunType.BACKFILL_JOB,
)
.scalar()
)
total_running_count = (
session.query(func.count(DagRun.id)).filter(DagRun.state == State.RUNNING).scalar()
)
return dag1_non_b_running, dag1_b_running, total_running_count
scheduler_job = Job(executor=MockExecutor(do_update=False))
self.job_runner = SchedulerJobRunner(job=scheduler_job)
from_date = pendulum.parse("2021-01-01")
to_date = pendulum.parse("2021-01-06")
b = _create_backfill(
dag_id=dag1_dag_id,
from_date=from_date,
to_date=to_date,
max_active_runs=3,
reverse=False,
triggering_user_name="test_user",
dag_run_conf={},
)
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
# initial state -- nothing is running
assert dag1_non_b_running == 0
assert dag1_b_running == 0
assert total_running == 0
assert session.query(func.count(DagRun.id)).scalar() == 6
assert session.scalar(select(func.count()).where(DagRun.dag_id == dag1_dag_id)) == 6
if pause_it:
b = session.get(Backfill, b.id)
b.is_paused = True
session.commit()
# now let's run scheduler once
self.job_runner._start_queued_dagruns(session)
session.flush()
assert DagRun.DEFAULT_DAGRUNS_TO_EXAMINE == 20
dag1_non_b_running, dag1_b_running, total_running = _running_counts()
assert dag1_non_b_running == 0
assert dag1_b_running == expected_running
assert total_running == expected_running
assert session.scalar(select(func.count()).select_from(DagRun)) == 6
assert session.scalar(select(func.count()).where(DagRun.dag_id == dag1_dag_id)) == 6
def test_start_queued_dagruns_do_follow_logical_date_order(self, dag_maker):
session = settings.Session()
with dag_maker("test_dag1", max_active_runs=1):
EmptyOperator(task_id="mytask")
date = DEFAULT_DATE
for i in range(30):
dr = dag_maker.create_dagrun(
run_id=f"dagrun_{i}",
run_type=DagRunType.SCHEDULED,
state=State.QUEUED,
logical_date=date,
)
date = dr.logical_date + timedelta(hours=1)
scheduler_job = Job(executor=MockExecutor(do_update=False))
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._start_queued_dagruns(session)
session.flush()
dr = DagRun.find(run_id="dagrun_0")
ti = dr[0].get_task_instance(task_id="mytask", session=session)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
assert dr[0].state == State.RUNNING
dr[0].state = State.SUCCESS
session.merge(dr[0])
session.flush()
assert dr[0].state == State.SUCCESS
self.job_runner._start_queued_dagruns(session)
session.flush()
dr = DagRun.find(run_id="dagrun_1")
assert len(session.query(DagRun).filter(DagRun.state == State.RUNNING).all()) == 1
assert dr[0].state == State.RUNNING
def test_no_dagruns_would_stuck_in_running(self, dag_maker):
# Test that running dagruns are not stuck in running.
# Create one dagrun in 'running' state and 1 in 'queued' state from one dag(max_active_runs=1)
# Create 16 dagruns in 'running' state and 16 in 'queued' state from another dag
# Create 16 dagruns in 'running' state and 16 in 'queued' state from yet another dag
# Finish the task of the first dag, and check that another dagrun starts running
# from the first dag.
session = settings.Session()
# first dag and dagruns
date = timezone.datetime(2016, 1, 1)
logical_date = timezone.coerce_datetime(date)
with dag_maker("test_dagrun_states_are_correct_1", max_active_runs=1, start_date=date) as dag:
task1 = EmptyOperator(task_id="dummy_task")
dr1_running = dag_maker.create_dagrun(run_id="dr1_run_1", logical_date=date)
data_interval = infer_automated_data_interval(dag.timetable, logical_date)
dag_maker.create_dagrun(
run_id="dr1_run_2",
state=State.QUEUED,
logical_date=dag.next_dagrun_info(
last_automated_dagrun=data_interval, restricted=False
).data_interval.start,
)
# second dag and dagruns
date = timezone.datetime(2020, 1, 1)
with dag_maker("test_dagrun_states_are_correct_2", start_date=date) as dag:
EmptyOperator(task_id="dummy_task")
for i in range(16):
dr = dag_maker.create_dagrun(
run_id=f"dr2_run_{i + 1}",
state=State.RUNNING,
logical_date=date,
)
date = dr.logical_date + timedelta(hours=1)
dr16 = DagRun.find(run_id="dr2_run_16")
date = dr16[0].logical_date + timedelta(hours=1)
for i in range(16, 32):
dr = dag_maker.create_dagrun(
run_id=f"dr2_run_{i + 1}",
state=State.QUEUED,
logical_date=date,
)
date = dr.logical_date + timedelta(hours=1)
# third dag and dagruns
date = timezone.datetime(2021, 1, 1)
with dag_maker("test_dagrun_states_are_correct_3", start_date=date) as dag:
EmptyOperator(task_id="dummy_task")
for i in range(16):
dr = dag_maker.create_dagrun(
run_id=f"dr3_run_{i + 1}",
state=State.RUNNING,
logical_date=date,
)
date = dr.logical_date + timedelta(hours=1)
dr16 = DagRun.find(run_id="dr3_run_16")
date = dr16[0].logical_date + timedelta(hours=1)
for i in range(16, 32):
dr = dag_maker.create_dagrun(
run_id=f"dr2_run_{i + 1}",
state=State.QUEUED,
logical_date=date,
)
date = dr.logical_date + timedelta(hours=1)
scheduler_job = Job(executor=MockExecutor(do_update=False))
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dag_version = DagVersion.get_latest_version(dag_id=dag.dag_id)
ti = TaskInstance(task=task1, run_id=dr1_running.run_id, dag_version_id=dag_version.id)
ti.refresh_from_db()
ti.state = State.SUCCESS
session.merge(ti)
session.flush()
# Run the scheduler loop
with mock.patch.object(settings, "USE_JOB_SCHEDULE", False):
self.job_runner._do_scheduling(session)
self.job_runner._do_scheduling(session)
assert DagRun.find(run_id="dr1_run_1")[0].state == State.SUCCESS
assert DagRun.find(run_id="dr1_run_2")[0].state == State.RUNNING
@pytest.mark.parametrize(
("state", "start_date", "end_date"),
[
[State.NONE, None, None],
[
State.UP_FOR_RETRY,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
[
State.UP_FOR_RESCHEDULE,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
],
)
def test_dag_file_processor_process_task_instances(self, state, start_date, end_date, dag_maker):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
with dag_maker(dag_id="test_scheduler_process_execute_task"):
BashOperator(task_id="dummy", bash_command="echo hi")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
assert dr is not None
with create_session() as session:
ti = dr.get_task_instances(session=session)[0]
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
self.job_runner._schedule_dag_run(dr, session)
assert session.query(TaskInstance).filter_by(state=State.SCHEDULED).count() == 1
session.refresh(ti)
assert ti.state == State.SCHEDULED
@pytest.mark.parametrize(
("state", "start_date", "end_date"),
[
[State.NONE, None, None],
[
State.UP_FOR_RETRY,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
[
State.UP_FOR_RESCHEDULE,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
],
)
def test_dag_file_processor_process_task_instances_with_max_active_tis_per_dag(
self, state, start_date, end_date, dag_maker
):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
with dag_maker(dag_id="test_scheduler_process_execute_task_with_max_active_tis_per_dag"):
BashOperator(task_id="dummy", max_active_tis_per_dag=2, bash_command="echo Hi")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
)
assert dr is not None
with create_session() as session:
ti = dr.get_task_instances(session=session)[0]
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
self.job_runner._schedule_dag_run(dr, session)
assert session.query(TaskInstance).filter_by(state=State.SCHEDULED).count() == 1
session.refresh(ti)
assert ti.state == State.SCHEDULED
@pytest.mark.parametrize(
("state", "start_date", "end_date"),
[
[State.NONE, None, None],
[
State.UP_FOR_RETRY,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
[
State.UP_FOR_RESCHEDULE,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
],
)
def test_dag_file_processor_process_task_instances_with_max_active_tis_per_dagrun(
self, state, start_date, end_date, dag_maker
):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
with dag_maker(dag_id="test_scheduler_process_execute_task_with_max_active_tis_per_dagrun"):
BashOperator(task_id="dummy", max_active_tis_per_dagrun=2, bash_command="echo Hi")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
)
assert dr is not None
with create_session() as session:
ti = dr.get_task_instances(session=session)[0]
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
self.job_runner._schedule_dag_run(dr, session)
assert session.query(TaskInstance).filter_by(state=State.SCHEDULED).count() == 1
session.refresh(ti)
assert ti.state == State.SCHEDULED
@pytest.mark.parametrize(
("state", "start_date", "end_date"),
[
[State.NONE, None, None],
[
State.UP_FOR_RETRY,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
[
State.UP_FOR_RESCHEDULE,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
],
)
def test_dag_file_processor_process_task_instances_depends_on_past(
self, state, start_date, end_date, dag_maker
):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
with dag_maker(
dag_id="test_scheduler_process_execute_task_depends_on_past",
default_args={
"depends_on_past": True,
},
):
BashOperator(task_id="dummy1", bash_command="echo hi")
BashOperator(task_id="dummy2", bash_command="echo hi")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
)
assert dr is not None
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
self.job_runner._schedule_dag_run(dr, session)
assert session.query(TaskInstance).filter_by(state=State.SCHEDULED).count() == 2
session.refresh(tis[0])
session.refresh(tis[1])
assert tis[0].state == State.SCHEDULED
assert tis[1].state == State.SCHEDULED
def test_scheduler_job_add_new_task(self, dag_maker):
"""
Test if a task instance will be added if the dag is updated
"""
with dag_maker(dag_id="test_scheduler_add_new_task", serialized=False) as dag:
BashOperator(task_id="dummy", bash_command="echo test")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
session = settings.Session()
orm_dag = dag_maker.dag_model
assert orm_dag is not None
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._create_dag_runs([orm_dag], session)
drs = (
session.query(DagRun)
.options(joinedload(DagRun.task_instances).joinedload(TaskInstance.dag_version))
.all()
)
assert len(drs) == 1
dr = drs[0]
tis = dr.get_task_instances(session=session)
assert len(tis) == 1
BashOperator(task_id="dummy2", dag=dag, bash_command="echo test")
sync_dag_to_db(dag_maker.dag, bundle_name="dag_maker", session=session)
session.commit()
self.job_runner._schedule_dag_run(dr, session)
session.expunge_all()
assert session.query(TaskInstance).filter_by(state=State.SCHEDULED).count() == 2
session.flush()
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
tis = dr.get_task_instances(session=session)
assert len(tis) == 2
@pytest.mark.need_serialized_dag
def test_runs_respected_after_clear(self, dag_maker, session):
"""
Test dag after dag.clear, max_active_runs is respected
"""
with dag_maker(
dag_id="test_scheduler_max_active_runs_respected_after_clear",
start_date=DEFAULT_DATE,
max_active_runs=1,
) as dag:
BashOperator(task_id="dummy", bash_command="echo Hi")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, state=State.QUEUED)
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.QUEUED)
dag.clear(session=session)
assert len(DagRun.find(dag_id=dag.dag_id, state=State.QUEUED, session=session)) == 3
self.job_runner._start_queued_dagruns(session)
session.flush()
# Assert that only 1 dagrun is active
assert len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)) == 1
# Assert that the other two are queued
assert len(DagRun.find(dag_id=dag.dag_id, state=State.QUEUED, session=session)) == 2
def test_timeout_triggers(self, dag_maker):
"""
Tests that tasks in the deferred state, but whose trigger timeout
has expired, are correctly failed.
"""
session = settings.Session()
# Create the test DAG and task
with dag_maker(
dag_id="test_timeout_triggers",
start_date=DEFAULT_DATE,
schedule="@once",
max_active_runs=1,
session=session,
):
EmptyOperator(task_id="dummy1")
# Create a Task Instance for the task that is allegedly deferred
# but past its timeout, and one that is still good.
# We don't actually need a linked trigger here; the code doesn't check.
dr1 = dag_maker.create_dagrun()
dr2 = dag_maker.create_dagrun(
run_id="test2", logical_date=DEFAULT_DATE + datetime.timedelta(seconds=1)
)
ti1 = dr1.get_task_instance("dummy1", session)
ti2 = dr2.get_task_instance("dummy1", session)
ti1.state = State.DEFERRED
ti1.trigger_timeout = timezone.utcnow() - datetime.timedelta(seconds=60)
ti2.state = State.DEFERRED
ti2.trigger_timeout = timezone.utcnow() + datetime.timedelta(seconds=60)
session.flush()
# Boot up the scheduler and make it check timeouts
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner.check_trigger_timeouts(session=session)
# Make sure that TI1 is now scheduled to fail, and 2 wasn't touched
session.refresh(ti1)
session.refresh(ti2)
assert ti1.state == State.SCHEDULED
assert ti1.next_method == "__fail__"
assert ti2.state == State.DEFERRED
def test_retry_on_db_error_when_update_timeout_triggers(self, dag_maker, testing_dag_bundle, session):
"""
Tests that it will retry on DB error like deadlock when updating timeout triggers.
"""
from sqlalchemy.exc import OperationalError
retry_times = 3
# Create the test DAG and task
with dag_maker(
dag_id="test_retry_on_db_error_when_update_timeout_triggers",
start_date=DEFAULT_DATE,
schedule="@once",
max_active_runs=1,
session=session,
):
EmptyOperator(task_id="dummy1")
# Mock the db failure within retry times
might_fail_session = MagicMock(wraps=session)
def check_if_trigger_timeout(max_retries: int):
def make_side_effect():
call_count = 0
def side_effect(*args, **kwargs):
nonlocal call_count
if call_count < retry_times - 1:
call_count += 1
raise OperationalError("any_statement", "any_params", "any_orig")
return session.execute(*args, **kwargs)
return side_effect
might_fail_session.execute.side_effect = make_side_effect()
try:
# Create a Task Instance for the task that is allegedly deferred
# but past its timeout, and one that is still good.
# We don't actually need a linked trigger here; the code doesn't check.
sync_dag_to_db(dag_maker.dag, session=session)
dr1 = dag_maker.create_dagrun()
dr2 = dag_maker.create_dagrun(
run_id="test2", logical_date=DEFAULT_DATE + datetime.timedelta(seconds=1)
)
ti1 = dr1.get_task_instance("dummy1", session)
ti2 = dr2.get_task_instance("dummy1", session)
ti1.state = State.DEFERRED
ti1.trigger_timeout = timezone.utcnow() - datetime.timedelta(seconds=60)
ti2.state = State.DEFERRED
ti2.trigger_timeout = timezone.utcnow() + datetime.timedelta(seconds=60)
session.flush()
# Boot up the scheduler and make it check timeouts
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner.check_trigger_timeouts(max_retries=max_retries, session=might_fail_session)
# Make sure that TI1 is now scheduled to fail, and 2 wasn't touched
session.refresh(ti1)
session.refresh(ti2)
assert ti1.state == State.SCHEDULED
assert ti1.next_method == "__fail__"
assert ti2.state == State.DEFERRED
finally:
self.clean_db()
# Positive case, will retry until success before reach max retry times
check_if_trigger_timeout(retry_times)
# Negative case: no retries, execute only once.
with pytest.raises(OperationalError):
check_if_trigger_timeout(1)
def test_find_and_purge_task_instances_without_heartbeats_nothing(self):
executor = MockExecutor(do_update=False)
scheduler_job = Job(executor=executor)
with mock.patch("airflow.executors.executor_loader.ExecutorLoader.load_executor") as loader_mock:
loader_mock.return_value = executor
self.job_runner = SchedulerJobRunner(scheduler_job)
self.job_runner._find_and_purge_task_instances_without_heartbeats()
executor.callback_sink.send.assert_not_called()
@pytest.mark.usefixtures("testing_dag_bundle")
def test_find_and_purge_task_instances_without_heartbeats(self, session, create_dagrun):
dagfile = EXAMPLE_STANDARD_DAGS_FOLDER / "example_branch_operator.py"
dagbag = DagBag(dagfile)
dag = dagbag.get_dag("example_branch_operator")
scheduler_dag = sync_dag_to_db(dag)
dag_v = DagVersion.get_latest_version(dag.dag_id)
data_interval = infer_automated_data_interval(scheduler_dag.timetable, DEFAULT_LOGICAL_DATE)
dag_run = create_dagrun(
scheduler_dag,
logical_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
data_interval=data_interval,
)
executor = MockExecutor()
scheduler_job = Job(executor=executor)
with mock.patch("airflow.executors.executor_loader.ExecutorLoader.load_executor") as loader_mock:
loader_mock.return_value = executor
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# We will provision 2 tasks so we can check we only find task instances without heartbeats from this scheduler
tasks_to_setup = ["branching", "run_this_first"]
for task_id in tasks_to_setup:
task = dag.get_task(task_id=task_id)
ti = TaskInstance(task, run_id=dag_run.run_id, state=State.RUNNING, dag_version_id=dag_v.id)
ti.last_heartbeat_at = timezone.utcnow() - timedelta(minutes=6)
ti.start_date = timezone.utcnow() - timedelta(minutes=10)
ti.queued_by_job_id = 999
session.add(ti)
session.flush()
assert task.task_id == "run_this_first" # Make sure we have the task/ti we expect
ti.queued_by_job_id = scheduler_job.id
session.flush()
executor.running.add(ti.key) # The executor normally does this during heartbeat.
self.job_runner._find_and_purge_task_instances_without_heartbeats()
assert ti.key not in executor.running
executor.callback_sink.send.assert_called_once()
callback_requests = executor.callback_sink.send.call_args.args
assert len(callback_requests) == 1
callback_request = callback_requests[0]
assert callback_request.filepath == dag.relative_fileloc
assert callback_request.msg == str(
self.job_runner._generate_task_instance_heartbeat_timeout_message_details(ti)
)
assert callback_request.is_failure_callback is True
assert callback_request.ti.dag_id == ti.dag_id
assert callback_request.ti.task_id == ti.task_id
assert callback_request.ti.run_id == ti.run_id
assert callback_request.ti.map_index == ti.map_index
# Verify context_from_server is passed
assert callback_request.context_from_server is not None
assert callback_request.context_from_server.dag_run.logical_date == ti.dag_run.logical_date
assert callback_request.context_from_server.max_tries == ti.max_tries
@pytest.mark.usefixtures("testing_dag_bundle")
def test_task_instance_heartbeat_timeout_message(self, session, create_dagrun):
"""
Check that the task instance heartbeat timeout message comes out as expected
"""
dagfile = EXAMPLE_STANDARD_DAGS_FOLDER / "example_branch_operator.py"
dagbag = DagBag(dagfile)
dag = dagbag.get_dag("example_branch_operator")
scheduler_dag = sync_dag_to_db(dag, session=session)
session.query(Job).delete()
data_interval = infer_automated_data_interval(scheduler_dag.timetable, DEFAULT_LOGICAL_DATE)
dag_run = create_dagrun(
scheduler_dag,
logical_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
data_interval=data_interval,
)
scheduler_job = Job(executor=MockExecutor())
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# We will provision 2 tasks so we can check we only find task instance heartbeat timeouts from this scheduler
tasks_to_setup = ["branching", "run_this_first"]
dag_version = DagVersion.get_latest_version(dag.dag_id)
for task_id in tasks_to_setup:
task = dag.get_task(task_id=task_id)
ti = TaskInstance(task, run_id=dag_run.run_id, state=State.RUNNING, dag_version_id=dag_version.id)
ti.queued_by_job_id = 999
session.add(ti)
session.flush()
assert task.task_id == "run_this_first" # Make sure we have the task/ti we expect
ti.queued_by_job_id = scheduler_job.id
session.flush()
task_instance_heartbeat_timeout_message = (
self.job_runner._generate_task_instance_heartbeat_timeout_message_details(ti)
)
assert task_instance_heartbeat_timeout_message == {
"DAG Id": "example_branch_operator",
"Task Id": "run_this_first",
"Run Id": "scheduled__2016-01-01T00:00:00+00:00",
}
ti.hostname = "10.10.10.10"
ti.map_index = 2
ti.external_executor_id = "abcdefg"
task_instance_heartbeat_timeout_message = (
self.job_runner._generate_task_instance_heartbeat_timeout_message_details(ti)
)
assert task_instance_heartbeat_timeout_message == {
"DAG Id": "example_branch_operator",
"Task Id": "run_this_first",
"Run Id": "scheduled__2016-01-01T00:00:00+00:00",
"Hostname": "10.10.10.10",
"Map Index": 2,
"External Executor Id": "abcdefg",
}
@mock.patch.object(settings, "USE_JOB_SCHEDULE", False)
def run_scheduler_until_dagrun_terminal(self):
"""
Run a scheduler until any dag run reaches a terminal state, or the scheduler becomes "idle".
This needs a DagRun to be pre-created (it can be in running or queued state) as no more will be
created as we turn off creating new DagRuns via setting USE_JOB_SCHEDULE to false
Note: This doesn't currently account for tasks that go into retry -- the scheduler would be detected
as idle in that circumstance
"""
# Spy on _do_scheduling and _process_executor_events so we can notice
# if nothing happened, and abort early! If there is nothing
# to schedule and no events, it means we have stalled.
def spy_on_return(orig, result):
def spy(*args, **kwargs):
ret = orig(*args, **kwargs)
result.append(ret)
return ret
return spy
num_queued_tis: deque[int] = deque([], 3)
num_finished_events: deque[int] = deque([], 3)
do_scheduling_spy = mock.patch.object(
self.job_runner,
"_do_scheduling",
side_effect=spy_on_return(self.job_runner._do_scheduling, num_queued_tis),
)
executor_events_spy = mock.patch.object(
self.job_runner,
"_process_executor_events",
side_effect=spy_on_return(self.job_runner._process_executor_events, num_finished_events),
)
orig_set_state = DagRun.set_state
def watch_set_state(dr: DagRun, state, **kwargs):
if state in (DagRunState.SUCCESS, DagRunState.FAILED):
# Stop the scheduler
self.job_runner.num_runs = 1 # type: ignore[union-attr]
orig_set_state(dr, state, **kwargs)
def watch_heartbeat(*args, **kwargs):
if len(num_queued_tis) < 3 or len(num_finished_events) < 3:
return
queued_any_tis = any(val > 0 for val in num_queued_tis)
finished_any_events = any(val > 0 for val in num_finished_events)
assert queued_any_tis or finished_any_events, (
"Scheduler has stalled without setting the DagRun state!"
)
set_state_spy = mock.patch.object(DagRun, "set_state", new=watch_set_state)
heartbeat_spy = mock.patch.object(self.job_runner.job, "heartbeat", new=watch_heartbeat)
# with heartbeat_spy, set_state_spy, do_scheduling_spy, executor_events_spy:
with heartbeat_spy, set_state_spy, do_scheduling_spy, executor_events_spy:
run_job(self.job_runner.job, execute_callable=self.job_runner._execute)
@pytest.mark.long_running
@pytest.mark.parametrize("dag_id", ["test_mapped_classic", "test_mapped_taskflow"])
def test_mapped_dag(self, dag_id, session, testing_dag_bundle):
"""End-to-end test of a simple mapped dag"""
from airflow.executors.local_executor import LocalExecutor
dagbag = DagBag(dag_folder=TEST_DAGS_FOLDER, include_examples=False)
sync_bag_to_db(dagbag, "testing", None)
dagbag.process_file(str(TEST_DAGS_FOLDER / f"{dag_id}.py"))
dag = dagbag.get_dag(dag_id)
assert dag
logical_date = timezone.coerce_datetime(timezone.utcnow() - datetime.timedelta(days=2))
data_interval = infer_automated_data_interval(dag.timetable, logical_date)
dr = dag.create_dagrun(
run_id=f"{dag_id}_1",
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
state=State.RUNNING,
session=session,
logical_date=logical_date,
data_interval=data_interval,
run_after=data_interval,
triggered_by=DagRunTriggeredByType.TEST,
)
executor = LocalExecutor()
job = Job(executor=executor)
self.job_runner = SchedulerJobRunner(job=job)
self.run_scheduler_until_dagrun_terminal()
dr.refresh_from_db(session)
assert dr.state == DagRunState.SUCCESS
def test_should_mark_empty_task_as_success(self, testing_dag_bundle):
dag_file = Path(__file__).parents[1] / "dags/test_only_empty_tasks.py"
# Write DAGs to dag and serialized_dag table
dagbag = DagBag(dag_folder=dag_file, include_examples=False)
sync_bag_to_db(dagbag, "testing", None)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# Create DagRun
session = settings.Session()
orm_dag = session.get(DagModel, "test_only_empty_tasks")
self.job_runner._create_dag_runs([orm_dag], session)
drs = DagRun.find(dag_id="test_only_empty_tasks", session=session)
assert len(drs) == 1
dr = drs[0]
# Schedule TaskInstances
self.job_runner._schedule_dag_run(dr, session)
session.expunge_all()
with create_session() as session:
tis = session.query(TaskInstance).all()
dags = self.job_runner.scheduler_dag_bag._dags.values()
assert [dag.dag_id for dag in dags] == ["test_only_empty_tasks"]
assert len(tis) == 6
assert {
("test_task_a", "success"),
("test_task_b", None),
("test_task_c", "success"),
("test_task_on_execute", "scheduled"),
("test_task_on_success", "scheduled"),
("test_task_outlets", "scheduled"),
} == {(ti.task_id, ti.state) for ti in tis}
for state, start_date, end_date, duration in [
(ti.state, ti.start_date, ti.end_date, ti.duration) for ti in tis
]:
if state == "success":
assert start_date is not None
assert end_date is not None
assert duration == 0.0
else:
assert start_date is None
assert end_date is None
assert duration is None
self.job_runner._schedule_dag_run(dr, session)
session.expunge_all()
with create_session() as session:
tis = session.query(TaskInstance).all()
assert len(tis) == 6
assert {
("test_task_a", "success"),
("test_task_b", "success"),
("test_task_c", "success"),
("test_task_on_execute", "scheduled"),
("test_task_on_success", "scheduled"),
("test_task_outlets", "scheduled"),
} == {(ti.task_id, ti.state) for ti in tis}
for state, start_date, end_date, duration in [
(ti.state, ti.start_date, ti.end_date, ti.duration) for ti in tis
]:
if state == "success":
assert start_date is not None
assert end_date is not None
assert duration == 0.0
else:
assert start_date is None
assert end_date is None
assert duration is None
@pytest.mark.need_serialized_dag
def test_catchup_works_correctly(self, dag_maker, testing_dag_bundle):
"""Test that catchup works correctly"""
session = settings.Session()
with dag_maker(
dag_id="test_catchup_schedule_dag",
schedule=timedelta(days=1),
start_date=DEFAULT_DATE,
catchup=True,
max_active_runs=1,
session=session,
) as dag:
EmptyOperator(task_id="dummy")
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._create_dag_runs([dag_maker.dag_model], session)
self.job_runner._start_queued_dagruns(session)
# first dagrun logical date is DEFAULT_DATE 2016-01-01T00:00:00+00:00
dr = DagRun.find(logical_date=DEFAULT_DATE, session=session)[0]
ti = dr.get_task_instance(task_id="dummy")
ti.state = State.SUCCESS
session.merge(ti)
session.flush()
self.job_runner._schedule_dag_run(dr, session)
session.flush()
# Run the second time so _update_dag_next_dagrun will run
self.job_runner._schedule_dag_run(dr, session)
session.flush()
dag_maker.dag.catchup = False
dag = sync_dag_to_db(dag_maker.dag, bundle_name="dag_maker", session=session)
assert not dag.catchup
dm = DagModel.get_dagmodel(dag.dag_id)
self.job_runner._create_dag_runs([dm], session)
# Check catchup worked correctly by ensuring logical_date is quite new
# Our dag is a daily dag
assert (
session.query(DagRun.logical_date)
.filter(DagRun.logical_date != DEFAULT_DATE) # exclude the first run
.scalar()
) > (timezone.utcnow() - timedelta(days=2))
def test_update_dagrun_state_for_paused_dag(self, dag_maker, session):
"""Test that _update_dagrun_state_for_paused_dag puts DagRuns in terminal states"""
with dag_maker("testdag") as dag:
EmptyOperator(task_id="task1")
scheduled_run = dag_maker.create_dagrun(
logical_date=datetime.datetime(2022, 1, 1),
run_type=DagRunType.SCHEDULED,
)
scheduled_run.last_scheduling_decision = datetime.datetime.now(timezone.utc) - timedelta(minutes=1)
ti = scheduled_run.get_task_instances(session=session)[0]
ti.set_state(TaskInstanceState.RUNNING)
dm = DagModel.get_dagmodel(dag.dag_id, session)
dm.is_paused = True
session.flush()
assert scheduled_run.state == State.RUNNING
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._update_dag_run_state_for_paused_dags(session=session)
session.flush()
# TI still running, DagRun left in running
(scheduled_run,) = DagRun.find(dag_id=dag.dag_id, run_type=DagRunType.SCHEDULED, session=session)
assert scheduled_run.state == State.RUNNING
prior_last_scheduling_decision = scheduled_run.last_scheduling_decision
# Make sure we don't constantly try dagruns over and over
self.job_runner._update_dag_run_state_for_paused_dags(session=session)
(scheduled_run,) = DagRun.find(dag_id=dag.dag_id, run_type=DagRunType.SCHEDULED, session=session)
assert scheduled_run.state == State.RUNNING
# last_scheduling_decision is bumped by update_state, so check that to determine if we tried again
assert prior_last_scheduling_decision == scheduled_run.last_scheduling_decision
# Once the TI is in a terminal state though, DagRun goes to success
ti.set_state(TaskInstanceState.SUCCESS, session=session)
self.job_runner._update_dag_run_state_for_paused_dags(session=session)
(scheduled_run,) = DagRun.find(dag_id=dag.dag_id, run_type=DagRunType.SCHEDULED, session=session)
assert scheduled_run.state == State.SUCCESS
def test_update_dagrun_state_for_paused_dag_not_for_backfill(self, dag_maker, session):
"""Test that the _update_dagrun_state_for_paused_dag does not affect backfilled dagruns"""
with dag_maker("testdag") as dag:
EmptyOperator(task_id="task1")
# Backfill run
backfill_run = dag_maker.create_dagrun(run_type=DagRunType.BACKFILL_JOB)
backfill_run.last_scheduling_decision = datetime.datetime.now(timezone.utc) - timedelta(minutes=1)
ti = backfill_run.get_task_instances(session=session)[0]
ti.set_state(TaskInstanceState.SUCCESS, session=session)
dm = DagModel.get_dagmodel(dag.dag_id, session=session)
dm.is_paused = True
session.flush()
assert backfill_run.state == State.RUNNING
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._update_dag_run_state_for_paused_dags(session=session)
session.flush()
(backfill_run,) = DagRun.find(dag_id=dag.dag_id, run_type=DagRunType.BACKFILL_JOB, session=session)
assert backfill_run.state == State.SUCCESS
@staticmethod
def _find_assets_activation(session) -> tuple[list[AssetModel], list[AssetModel]]:
assets = session.execute(
select(AssetModel, AssetActive)
.outerjoin(
AssetActive,
(AssetModel.name == AssetActive.name) & (AssetModel.uri == AssetActive.uri),
)
.order_by(AssetModel.uri)
).all()
return [a for a, v in assets if not v], [a for a, v in assets if v]
def test_asset_orphaning(self, dag_maker, session):
self.job_runner = SchedulerJobRunner(job=Job())
asset1 = Asset(uri="test://asset_1", name="test_asset_1", group="test_group")
asset2 = Asset(uri="test://asset_2", name="test_asset_2", group="test_group")
asset3 = Asset(uri="test://asset_3", name="test_asset_3", group="test_group")
asset4 = Asset(uri="test://asset_4", name="test_asset_4", group="test_group")
asset5 = Asset(uri="test://asset_5", name="test_asset_5", group="test_group")
with dag_maker(dag_id="assets-1", schedule=[asset1, asset2], session=session):
BashOperator(task_id="task", bash_command="echo 1", outlets=[asset3, asset4])
# asset5 is not registered (since it's not used anywhere).
orphaned, active = self._find_assets_activation(session)
assert active == [asset1, asset2, asset3, asset4]
assert orphaned == []
self.job_runner._update_asset_orphanage(session=session)
session.flush()
# Now remove 2 asset references and add asset5.
with dag_maker(dag_id="assets-1", schedule=[asset1], session=session):
BashOperator(task_id="task", bash_command="echo 1", outlets=[asset3, asset5])
# The DAG parser finds asset5.
orphaned, active = self._find_assets_activation(session)
assert active == [asset1, asset2, asset3, asset4, asset5]
assert orphaned == []
self.job_runner._update_asset_orphanage(session=session)
session.flush()
# Now we get the updated result.
orphaned, active = self._find_assets_activation(session)
assert active == [asset1, asset3, asset5]
assert orphaned == [asset2, asset4]
def test_asset_orphaning_ignore_orphaned_assets(self, dag_maker, session):
self.job_runner = SchedulerJobRunner(job=Job())
asset1 = Asset(uri="test://asset_1", name="test_asset_1", group="test_group")
with dag_maker(dag_id="assets-1", schedule=[asset1], session=session):
BashOperator(task_id="task", bash_command="echo 1")
orphaned, active = self._find_assets_activation(session)
assert active == [asset1]
assert orphaned == []
self.job_runner._update_asset_orphanage(session=session)
session.flush()
# now remove asset1 reference
with dag_maker(dag_id="assets-1", schedule=None, session=session):
BashOperator(task_id="task", bash_command="echo 1")
self.job_runner._update_asset_orphanage(session=session)
session.flush()
orphaned, active = self._find_assets_activation(session)
assert active == []
assert orphaned == [asset1]
updated_at_timestamps = [asset.updated_at for asset in orphaned]
# when rerunning we should ignore the already orphaned assets and thus the updated_at timestamp
# should remain the same
self.job_runner._update_asset_orphanage(session=session)
session.flush()
orphaned, active = self._find_assets_activation(session)
assert active == []
assert orphaned == [asset1]
assert [asset.updated_at for asset in orphaned] == updated_at_timestamps
@pytest.mark.parametrize(
("paused", "stale", "expected_classpath"),
[
pytest.param(
False,
False,
"airflow.providers.standard.triggers.temporal.DateTimeTrigger",
id="active",
),
pytest.param(False, True, None, id="stale"),
pytest.param(True, False, None, id="paused"),
pytest.param(True, False, None, id="stale-paused"),
],
)
@pytest.mark.need_serialized_dag(False)
def test_delete_unreferenced_triggers(self, dag_maker, session, paused, stale, expected_classpath):
self.job_runner = SchedulerJobRunner(job=Job())
classpath, kwargs = DateTimeTrigger(timezone.utcnow()).serialize()
asset1 = Asset(
name="test_asset_1",
watchers=[AssetWatcher(name="test", trigger={"classpath": classpath, "kwargs": kwargs})],
)
with dag_maker(dag_id="dag", schedule=[asset1], session=session) as dag:
EmptyOperator(task_id="task")
dags = {"dag": LazyDeserializedDAG.from_dag(dag)}
def _update_references() -> None:
asset_op = AssetModelOperation.collect(dags)
orm_assets = asset_op.sync_assets(session=session)
session.flush()
asset_op.add_dag_asset_references(orm_dags, orm_assets, session=session)
asset_op.activate_assets_if_possible(orm_assets.values(), session=session)
asset_op.add_asset_trigger_references(orm_assets, session=session)
session.flush()
# Initial setup.
orm_dags = DagModelOperation({"dag": dag}, "testing", None).add_dags(session=session)
_update_references()
assert session.scalars(select(Trigger.classpath)).one() == classpath
# Simulate dag state change.
orm_dags["dag"].is_paused = paused
orm_dags["dag"].is_stale = stale
_update_references()
assert session.scalars(select(Trigger.classpath)).one() == classpath
# Unreferenced trigger should be removed.
self.job_runner._remove_unreferenced_triggers(session=session)
assert session.scalars(select(Trigger.classpath)).one_or_none() == expected_classpath
def test_misconfigured_dags_doesnt_crash_scheduler(self, session, dag_maker, caplog):
"""Test that if dagrun creation throws an exception, the scheduler doesn't crash"""
with dag_maker("testdag1", serialized=True):
BashOperator(task_id="task", bash_command="echo 1")
dm1 = dag_maker.dag_model
# Here, the next_dagrun is set to None, which will cause an exception
dm1.next_dagrun = None
session.add(dm1)
session.flush()
with dag_maker("testdag2", serialized=True):
BashOperator(task_id="task", bash_command="echo 1")
dm2 = dag_maker.dag_model
scheduler_job = Job()
job_runner = SchedulerJobRunner(job=scheduler_job)
# In the dagmodel list, the first dag should fail, but the second one should succeed
job_runner._create_dag_runs([dm1, dm2], session)
assert "Failed creating DagRun for testdag1" in caplog.text
assert not DagRun.find(dag_id="testdag1", session=session)
# Check if the second dagrun was created
assert DagRun.find(dag_id="testdag2", session=session)
def test_activate_referenced_assets_with_no_existing_warning(self, session, testing_dag_bundle):
dag_warnings = session.query(DagWarning).all()
assert dag_warnings == []
dag_id1 = "test_asset_dag1"
asset1_name = "asset1"
asset_extra = {"foo": "bar"}
asset1 = Asset(name=asset1_name, uri="s3://bucket/key/1", extra=asset_extra)
asset1_1 = Asset(name=asset1_name, uri="it's duplicate", extra=asset_extra)
asset1_2 = Asset(name="it's also a duplicate", uri="s3://bucket/key/1", extra=asset_extra)
dag1 = DAG(dag_id=dag_id1, start_date=DEFAULT_DATE, schedule=[asset1, asset1_1, asset1_2])
sync_dag_to_db(dag1, session=session)
asset_models = session.scalars(select(AssetModel)).all()
assert len(asset_models) == 3
SchedulerJobRunner._activate_referenced_assets(asset_models, session=session)
session.flush()
dag_warning = session.scalar(
select(DagWarning).where(
DagWarning.dag_id == dag_id1, DagWarning.warning_type == "asset conflict"
)
)
assert dag_warning.message == (
'Cannot activate asset Asset(name="asset1", uri="it\'s duplica'
'te", group="asset"); name is already associated to \'s3://buck'
"et/key/1'\nCannot activate asset Asset(name=\"it's also a dup"
'licate", uri="s3://bucket/key/1", group="asset"); uri is alrea'
"dy associated to 'asset1'"
)
def test_activate_referenced_assets_with_existing_warnings(self, session, testing_dag_bundle):
dag_ids = [f"test_asset_dag{i}" for i in range(1, 4)]
asset1_name = "asset1"
asset_extra = {"foo": "bar"}
asset1 = Asset(name=asset1_name, uri="s3://bucket/key/1", extra=asset_extra)
asset1_1 = Asset(name=asset1_name, uri="it's duplicate", extra=asset_extra)
asset1_2 = Asset(name=asset1_name, uri="it's duplicate 2", extra=asset_extra)
dag1 = DAG(dag_id=dag_ids[0], start_date=DEFAULT_DATE, schedule=[asset1, asset1_1])
dag2 = DAG(dag_id=dag_ids[1], start_date=DEFAULT_DATE)
dag3 = DAG(dag_id=dag_ids[2], start_date=DEFAULT_DATE, schedule=[asset1_2])
sync_dags_to_db([dag1, dag2, dag3], session=session)
session.add_all(
DagWarning(dag_id=dag_id, warning_type="asset conflict", message="will not exist")
for dag_id in dag_ids
)
session.flush()
asset_models = session.scalars(select(AssetModel)).all()
SchedulerJobRunner._activate_referenced_assets(asset_models, session=session)
session.flush()
dag_warning = session.scalar(
select(DagWarning).where(
DagWarning.dag_id == dag_ids[0], DagWarning.warning_type == "asset conflict"
)
)
assert dag_warning.message == (
'Cannot activate asset Asset(name="asset1", uri="it\'s duplicate", group="asset"); '
"name is already associated to 's3://bucket/key/1'"
)
dag_warning = session.scalar(
select(DagWarning).where(
DagWarning.dag_id == dag_ids[1], DagWarning.warning_type == "asset conflict"
)
)
assert dag_warning is None
dag_warning = session.scalar(
select(DagWarning).where(
DagWarning.dag_id == dag_ids[2], DagWarning.warning_type == "asset conflict"
)
)
assert dag_warning.message == (
'Cannot activate asset Asset(name="asset1", uri="it\'s duplicate 2", group="asset"); '
"name is already associated to 's3://bucket/key/1'"
)
def test_activate_referenced_assets_with_multiple_conflict_asset_in_one_dag(
self, session, testing_dag_bundle
):
dag_id = "test_asset_dag"
asset1_name = "asset1"
asset_extra = {"foo": "bar"}
schedule = [Asset(name=asset1_name, uri="s3://bucket/key/1", extra=asset_extra)]
schedule.extend(
[Asset(name=asset1_name, uri=f"it's duplicate {i}", extra=asset_extra) for i in range(100)]
)
dag1 = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule=schedule)
sync_dag_to_db(dag1, session=session)
session.add(DagWarning(dag_id=dag_id, warning_type="asset conflict", message="will not exist"))
session.flush()
asset_models = session.scalars(select(AssetModel)).all()
SchedulerJobRunner._activate_referenced_assets(asset_models, session=session)
session.flush()
dag_warning = session.scalar(
select(DagWarning).where(DagWarning.dag_id == dag_id, DagWarning.warning_type == "asset conflict")
)
for i in range(100):
assert f"it's duplicate {i}" in dag_warning.message
def test_scheduler_passes_context_from_server_on_heartbeat_timeout(self, dag_maker, session):
"""Test that scheduler passes context_from_server when handling heartbeat timeouts."""
with dag_maker(dag_id="test_dag", session=session):
EmptyOperator(task_id="test_task")
dag_run = dag_maker.create_dagrun(run_id="test_run", state=DagRunState.RUNNING)
mock_executor = MagicMock()
scheduler_job = Job(executor=mock_executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
# Create a task instance that appears to be running but hasn't heartbeat
ti = dag_run.get_task_instance(task_id="test_task")
ti.state = TaskInstanceState.RUNNING
ti.queued_by_job_id = scheduler_job.id
# Set last_heartbeat_at to a time that would trigger timeout
ti.last_heartbeat_at = timezone.utcnow() - timedelta(seconds=600) # 10 minutes ago
session.merge(ti)
session.commit()
# Run the heartbeat timeout check
self.job_runner._find_and_purge_task_instances_without_heartbeats()
# Verify TaskCallbackRequest was created with context_from_server
mock_executor.send_callback.assert_called_once()
callback_request = mock_executor.send_callback.call_args[0][0]
assert isinstance(callback_request, TaskCallbackRequest)
assert callback_request.context_from_server is not None
assert callback_request.context_from_server.dag_run.logical_date == dag_run.logical_date
assert callback_request.context_from_server.max_tries == ti.max_tries
@pytest.mark.parametrize(
("retries", "callback_kind", "expected"),
[
(1, "retry", TaskInstanceState.UP_FOR_RETRY),
(0, "failure", TaskInstanceState.FAILED),
],
)
def test_external_kill_sets_callback_type_param(
self, dag_maker, session, retries, callback_kind, expected
):
"""External kill should mark callback type based on retry eligibility."""
with dag_maker(dag_id=f"ext_kill_{callback_kind}", fileloc="/test_path1/"):
if callback_kind == "retry":
EmptyOperator(task_id="t1", retries=retries, on_retry_callback=lambda ctx: None)
else:
EmptyOperator(task_id="t1", retries=retries, on_failure_callback=lambda ctx: None)
dr = dag_maker.create_dagrun(state=DagRunState.RUNNING)
ti = dr.get_task_instance(task_id="t1")
executor = MockExecutor(do_update=False)
scheduler_job = Job(executor=executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
ti.state = State.QUEUED
session.merge(ti)
session.commit()
# Executor reports task finished (FAILED) while TI still QUEUED -> external kill path
executor.event_buffer[ti.key] = State.FAILED, None
self.job_runner._process_executor_events(executor=executor, session=session)
scheduler_job.executor.callback_sink.send.assert_called()
request = scheduler_job.executor.callback_sink.send.call_args[0][0]
assert isinstance(request, TaskCallbackRequest)
assert request.task_callback_type == expected
def test_scheduler_passes_context_from_server_on_task_failure(self, dag_maker, session):
"""Test that scheduler passes context_from_server when handling task failures."""
with dag_maker(dag_id="test_dag", session=session):
EmptyOperator(task_id="test_task", on_failure_callback=lambda: print("failure"))
dag_run = dag_maker.create_dagrun(run_id="test_run", state=DagRunState.RUNNING)
# Create a task instance that's running
ti = dag_run.get_task_instance(task_id="test_task")
ti.state = TaskInstanceState.RUNNING
session.merge(ti)
session.commit()
# Mock the executor to simulate a task failure
mock_executor = MagicMock(spec=BaseExecutor)
mock_executor.has_task = mock.MagicMock(return_value=False)
scheduler_job = Job(executor=mock_executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
# Simulate executor reporting task as failed
executor_event = {ti.key: (TaskInstanceState.FAILED, None)}
mock_executor.get_event_buffer.return_value = executor_event
# Process the executor events
self.job_runner._process_executor_events(mock_executor, session)
# Verify TaskCallbackRequest was created with context_from_server
mock_executor.send_callback.assert_called_once()
callback_request = mock_executor.send_callback.call_args[0][0]
assert isinstance(callback_request, TaskCallbackRequest)
assert callback_request.context_from_server is not None
assert callback_request.context_from_server.dag_run.logical_date == dag_run.logical_date
assert callback_request.context_from_server.max_tries == ti.max_tries
def test_scheduler_passes_context_from_server_on_dag_timeout(self, dag_maker, session):
"""Test that scheduler passes context_from_server when DAG times out."""
from airflow.callbacks.callback_requests import DagCallbackRequest, DagRunContext
def on_failure_callback(context):
print("DAG failed")
with dag_maker(
dag_id="test_dag",
session=session,
on_failure_callback=on_failure_callback,
dagrun_timeout=timedelta(seconds=60), # 1 minute timeout
):
EmptyOperator(task_id="test_task")
dag_run = dag_maker.create_dagrun(run_id="test_run", state=DagRunState.RUNNING)
# Set the start time to make it appear timed out
dag_run.start_date = timezone.utcnow() - timedelta(seconds=120) # 2 minutes ago
session.merge(dag_run)
session.commit()
mock_executor = MagicMock()
scheduler_job = Job(executor=mock_executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
callback_req = self.job_runner._schedule_dag_run(dag_run, session)
assert isinstance(callback_req, DagCallbackRequest)
assert callback_req.is_failure_callback
assert callback_req.msg == "timed_out"
assert callback_req.context_from_server == DagRunContext(
dag_run=dag_run,
last_ti=dag_run.get_task_instance(task_id="test_task"),
)
@mock.patch("airflow.models.dagrun.get_listener_manager")
def test_dag_start_notifies_with_started_msg(self, mock_get_listener_manager, dag_maker, session):
"""Test that notify_dagrun_state_changed is called with msg='started' when DAG starts."""
mock_listener_manager = MagicMock()
mock_get_listener_manager.return_value = mock_listener_manager
with dag_maker(dag_id="test_dag_start_notify", session=session):
EmptyOperator(task_id="test_task")
# Create a QUEUED dag run that will be started
dag_run = dag_maker.create_dagrun(run_id="test_run", state=DagRunState.QUEUED)
session.commit()
mock_executor = MagicMock()
scheduler_job = Job(executor=mock_executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
self.job_runner._start_queued_dagruns(session)
# Verify that the listener hook was called with msg="started"
mock_listener_manager.hook.on_dag_run_running.assert_called_once()
call_args = mock_listener_manager.hook.on_dag_run_running.call_args
assert call_args.kwargs["msg"] == "started"
assert call_args.kwargs["dag_run"].dag_id == dag_run.dag_id
@mock.patch("airflow.models.dagrun.get_listener_manager")
def test_dag_timeout_notifies_with_timed_out_msg(self, mock_get_listener_manager, dag_maker, session):
"""Test that notify_dagrun_state_changed is called with msg='timed_out' when DAG times out."""
mock_listener_manager = MagicMock()
mock_get_listener_manager.return_value = mock_listener_manager
with dag_maker(
dag_id="test_dag_timeout_notify",
session=session,
dagrun_timeout=timedelta(seconds=60),
):
EmptyOperator(task_id="test_task")
dag_run = dag_maker.create_dagrun(run_id="test_run", state=DagRunState.RUNNING)
# Set the start time to make it appear timed out
dag_run.start_date = timezone.utcnow() - timedelta(seconds=120) # 2 minutes ago
session.merge(dag_run)
session.commit()
mock_executor = MagicMock()
scheduler_job = Job(executor=mock_executor)
self.job_runner = SchedulerJobRunner(scheduler_job)
self.job_runner._schedule_dag_run(dag_run, session)
# Verify that the listener hook was called with msg="timed_out"
mock_listener_manager.hook.on_dag_run_failed.assert_called_once()
call_args = mock_listener_manager.hook.on_dag_run_failed.call_args
assert call_args.kwargs["msg"] == "timed_out"
assert call_args.kwargs["dag_run"] == dag_run
@mock.patch("airflow.models.Deadline.handle_miss")
def test_process_expired_deadlines(self, mock_handle_miss, session, dag_maker):
"""Verify all expired and unhandled deadlines (and only those) are processed by the scheduler."""
scheduler_job = Job(executor=MockExecutor())
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
past_date = timezone.utcnow() - timedelta(minutes=5)
future_date = timezone.utcnow() + timedelta(minutes=5)
callback_path = "classpath.notify"
# Create a test Dag run for Deadline
dag_id = "test_deadline_dag"
with dag_maker(dag_id=dag_id):
EmptyOperator(task_id="empty")
dagrun_id = dag_maker.create_dagrun().id
handled_deadline_async = Deadline(
deadline_time=past_date,
callback=AsyncCallback(callback_path),
dagrun_id=dagrun_id,
dag_id=dag_id,
)
handled_deadline_async.missed = True
handled_deadline_sync = Deadline(
deadline_time=past_date,
callback=SyncCallback(callback_path),
dagrun_id=dagrun_id,
dag_id=dag_id,
)
handled_deadline_sync.missed = True
expired_deadline1 = Deadline(
deadline_time=past_date, callback=AsyncCallback(callback_path), dagrun_id=dagrun_id, dag_id=dag_id
)
expired_deadline2 = Deadline(
deadline_time=past_date, callback=SyncCallback(callback_path), dagrun_id=dagrun_id, dag_id=dag_id
)
future_deadline = Deadline(
deadline_time=future_date,
callback=AsyncCallback(callback_path),
dagrun_id=dagrun_id,
dag_id=dag_id,
)
session.add_all(
[
expired_deadline1,
expired_deadline2,
future_deadline,
handled_deadline_async,
handled_deadline_sync,
]
)
session.flush()
self.job_runner._execute()
# Assert that all deadlines which are both expired and unhandled get processed.
assert mock_handle_miss.call_count == 2
@mock.patch("airflow.models.Deadline.handle_miss")
def test_process_expired_deadlines_no_deadlines_found(self, mock_handle_miss, session):
"""Test handling when there are no deadlines to process."""
scheduler_job = Job(executor=MockExecutor())
self.job_runner = SchedulerJobRunner(job=scheduler_job, num_runs=1)
self.job_runner._execute()
# The handler should not be called, but no exceptions should be raised either.`
mock_handle_miss.assert_not_called()
def test_emit_running_dags_metric(self, dag_maker, monkeypatch):
"""Test that the running_dags metric is emitted correctly."""
with dag_maker("metric_dag") as dag:
_ = dag
dag_maker.create_dagrun(run_id="run_1", state=DagRunState.RUNNING, logical_date=timezone.utcnow())
dag_maker.create_dagrun(
run_id="run_2", state=DagRunState.RUNNING, logical_date=timezone.utcnow() + timedelta(hours=1)
)
recorded: list[tuple[str, int]] = []
def _fake_gauge(metric: str, value: int, *_, **__):
recorded.append((metric, value))
monkeypatch.setattr("airflow.jobs.scheduler_job_runner.Stats.gauge", _fake_gauge, raising=True)
with conf_vars({("metrics", "statsd_on"): "True"}):
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(scheduler_job)
self.job_runner._emit_running_dags_metric()
assert recorded == [("scheduler.dagruns.running", 2)]
# Multi-team scheduling tests
def test_multi_team_get_team_names_for_dag_ids_success(self, dag_maker, session):
"""Test successful team name resolution for multiple DAG IDs."""
# Setup test data
clear_db_teams()
clear_db_dag_bundles()
team1 = Team(name="team_a")
team2 = Team(name="team_b")
session.add_all([team1, team2])
session.flush()
bundle1 = DagBundleModel(name="bundle_a")
bundle2 = DagBundleModel(name="bundle_b")
bundle1.teams.append(team1)
bundle2.teams.append(team2)
session.add_all([bundle1, bundle2])
session.flush()
with dag_maker(dag_id="dag_a", bundle_name="bundle_a", session=session):
EmptyOperator(task_id="task_a")
with dag_maker(dag_id="dag_b", bundle_name="bundle_b", session=session):
EmptyOperator(task_id="task_b")
with dag_maker(dag_id="dag_no_team", session=session):
EmptyOperator(task_id="task_no_team")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
result = self.job_runner._get_team_names_for_dag_ids(["dag_a", "dag_b", "dag_no_team"], session)
expected = {"dag_a": "team_a", "dag_b": "team_b", "dag_no_team": None}
assert result == expected
def test_multi_team_get_team_names_for_dag_ids_empty_input(self, session):
"""Test that empty input returns empty dict."""
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
result = self.job_runner._get_team_names_for_dag_ids([], session)
assert result == {}
@mock.patch("airflow.jobs.scheduler_job_runner.SchedulerJobRunner.log")
def test_multi_team_get_team_names_for_dag_ids_database_error(self, mock_log, dag_maker, session):
"""Test graceful error handling when team resolution fails. This code should _not_ fail the scheduler."""
with dag_maker(dag_id="dag_test", session=session):
EmptyOperator(task_id="task")
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# Mock session.execute to raise an exception using context manager
with mock.patch.object(session, "execute", side_effect=Exception("Database error")):
result = self.job_runner._get_team_names_for_dag_ids(["dag_test"], session)
# Should return empty dict and log the error
assert result == {}
mock_log.exception.assert_called_once()
def test_multi_team_get_task_team_name_success(self, dag_maker, session):
"""Test successful team name resolution for a single task."""
clear_db_teams()
clear_db_dag_bundles()
team = Team(name="team_a")
session.add(team)
session.flush()
bundle = DagBundleModel(name="bundle_a")
bundle.teams.append(team)
session.add(bundle)
session.flush()
with dag_maker(dag_id="dag_a", bundle_name="bundle_a", session=session):
task = EmptyOperator(task_id="task_a")
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task.task_id, session)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
result = self.job_runner._get_task_team_name(ti, session)
assert result == "team_a"
def test_multi_team_get_task_team_name_no_team(self, dag_maker, session):
"""Test team resolution when no team is associated with the DAG."""
with dag_maker(dag_id="dag_no_team", session=session):
task = EmptyOperator(task_id="task_no_team")
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task.task_id, session)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
result = self.job_runner._get_task_team_name(ti, session)
assert result is None
def test_multi_team_get_task_team_name_database_error(self, dag_maker, session):
"""Test graceful error handling when individual task team resolution fails. This code should _not_ fail the scheduler."""
with dag_maker(dag_id="dag_test", session=session):
task = EmptyOperator(task_id="task_test")
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task.task_id, session)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# Mock _get_team_names_for_dag_ids to return empty dict (simulates database error handling in that function)
with mock.patch.object(self.job_runner, "_get_team_names_for_dag_ids", return_value={}) as mock_batch:
result = self.job_runner._get_task_team_name(ti, session)
mock_batch.assert_called_once_with([ti.dag_id], session)
# Should return None when batch function returns empty dict
assert result is None
@conf_vars({("core", "multi_team"): "false"})
def test_multi_team_try_to_load_executor_multi_team_disabled(self, dag_maker, mock_executors, session):
"""Test executor selection when multi_team is disabled (legacy behavior)."""
with dag_maker(dag_id="test_dag", session=session):
task = EmptyOperator(task_id="test_task", executor="secondary_exec")
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task.task_id, session)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
with mock.patch.object(self.job_runner, "_get_task_team_name") as mock_team_resolve:
result = self.job_runner._try_to_load_executor(ti, session)
# Should not call team resolution when multi_team is disabled
mock_team_resolve.assert_not_called()
assert result == mock_executors[1]
@conf_vars({("core", "multi_team"): "true"})
def test_multi_team_try_to_load_executor_no_explicit_executor_no_team(
self, dag_maker, mock_executors, session
):
"""Test executor selection when no explicit executor and no team (should use global default)."""
with dag_maker(dag_id="test_dag", session=session):
task = EmptyOperator(task_id="test_task") # No explicit executor
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task.task_id, session)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
result = self.job_runner._try_to_load_executor(ti, session)
# Should return the global default executor (first executor in Job)
assert result == scheduler_job.executor
@conf_vars({("core", "multi_team"): "true"})
def test_multi_team_try_to_load_executor_no_explicit_executor_with_team(
self, dag_maker, mock_executors, session
):
"""Test executor selection when no explicit executor but team exists (should find team's default executor)."""
clear_db_teams()
clear_db_dag_bundles()
team = Team(name="team_a")
session.add(team)
session.flush()
bundle = DagBundleModel(name="bundle_a")
bundle.teams.append(team)
session.add(bundle)
session.flush()
# Configure one executor to be team-specific
mock_executors[1].team_name = "team_a"
with dag_maker(dag_id="dag_a", bundle_name="bundle_a", session=session):
task = EmptyOperator(task_id="test_task") # No explicit executor
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task.task_id, session)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
result = self.job_runner._try_to_load_executor(ti, session)
# Should return the team-specific default executor set above
assert result == mock_executors[1]
@conf_vars({("core", "multi_team"): "true"})
def test_multi_team_try_to_load_executor_explicit_executor_matches_team(
self, dag_maker, mock_executors, session
):
"""Test executor selection when explicit executor matches task's team."""
clear_db_teams()
clear_db_dag_bundles()
team = Team(name="team_a")
session.add(team)
session.flush()
bundle = DagBundleModel(name="bundle_a")
bundle.teams.append(team)
session.add(bundle)
session.flush()
# Configure executor for the team
mock_executors[1].team_name = "team_a"
with dag_maker(dag_id="dag_a", bundle_name="bundle_a", session=session):
task = EmptyOperator(task_id="test_task", executor="secondary_exec")
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task.task_id, session)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
result = self.job_runner._try_to_load_executor(ti, session)
# Should return the team-specific executor that matches the explicit executor name
assert result == mock_executors[1]
@conf_vars({("core", "multi_team"): "true"})
def test_multi_team_try_to_load_executor_explicit_executor_global_fallback(
self, dag_maker, mock_executors, session
):
"""Test executor selection when explicit executor is global (team_name=None)."""
clear_db_teams()
clear_db_dag_bundles()
team = Team(name="team_a")
session.add(team)
session.flush()
bundle = DagBundleModel(name="bundle_a")
bundle.teams.append(team)
session.add(bundle)
session.flush()
# Configure one executor for the team, but keep default as global
mock_executors[1].team_name = "team_a"
with dag_maker(dag_id="dag_a", bundle_name="bundle_a", session=session):
task = EmptyOperator(task_id="test_task", executor="default_exec") # Global executor
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task.task_id, session)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
result = self.job_runner._try_to_load_executor(ti, session)
# Should return the global executor (default) even though task has a team
assert result == mock_executors[0]
@conf_vars({("core", "multi_team"): "true"})
def test_multi_team_try_to_load_executor_explicit_executor_team_mismatch(
self, dag_maker, mock_executors, session
):
"""Test executor selection when explicit executor doesn't match task's team (should return None)."""
clear_db_teams()
clear_db_dag_bundles()
team1 = Team(name="team_a")
team2 = Team(name="team_b")
session.add_all([team1, team2])
session.flush()
bundle = DagBundleModel(name="bundle_a")
bundle.teams.append(team1)
session.add(bundle)
session.flush()
# Configure executors for different teams
mock_executors[1].team_name = "team_b" # Different team!
with dag_maker(dag_id="dag_a", bundle_name="bundle_a", session=session): # DAG belongs to team_a
task = EmptyOperator(
task_id="test_task", executor="secondary_exec"
) # Executor for different team
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task.task_id, session)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
with mock.patch("airflow.jobs.scheduler_job_runner.SchedulerJobRunner.log") as mock_log:
result = self.job_runner._try_to_load_executor(ti, session)
# Should log a warning when no executor is found
mock_log.warning.assert_called_once_with(
"Executor, %s, was not found but a Task was configured to use it", "secondary_exec"
)
# Should return None since we failed to resolve an executor due to the mismatch. In practice, this
# should never happen since we assert this at DagBag validation time.
assert result is None
@conf_vars({("core", "multi_team"): "true"})
def test_multi_team_try_to_load_executor_invalid_executor_name(self, dag_maker, mock_executors, session):
"""Test executor selection with invalid executor name (should return None and log warning)."""
with dag_maker(dag_id="test_dag", session=session):
task = EmptyOperator(task_id="test_task", executor="nonexistent_executor")
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task.task_id, session)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
with mock.patch("airflow.jobs.scheduler_job_runner.SchedulerJobRunner.log") as mock_log:
result = self.job_runner._try_to_load_executor(ti, session)
assert result is None
mock_log.warning.assert_called_once()
@conf_vars({("core", "multi_team"): "true"})
def test_multi_team_try_to_load_executor_team_name_pre_resolved(self, dag_maker, mock_executors, session):
"""Test executor selection when team_name is pre-resolved."""
clear_db_teams()
clear_db_dag_bundles()
team = Team(name="team_a")
session.add(team)
session.flush()
bundle = DagBundleModel(name="bundle_a")
bundle.teams.append(team)
session.add(bundle)
session.flush()
mock_executors[1].team_name = "team_a"
with dag_maker(dag_id="dag_a", bundle_name="bundle_a", session=session):
task = EmptyOperator(task_id="test_task")
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task.task_id, session)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# Call with pre-resolved team name (as done in the scheduling loop)
with mock.patch.object(self.job_runner, "_get_task_team_name") as mock_team_resolve:
result = self.job_runner._try_to_load_executor(ti, session, team_name="team_a")
mock_team_resolve.assert_not_called() # We don't query for the team if it is pre-resolved
assert result == mock_executors[1]
@conf_vars({("core", "multi_team"): "true"})
def test_multi_team_scheduling_loop_batch_optimization(self, dag_maker, mock_executors, session):
"""Test that the scheduling loop uses batch team resolution optimization."""
clear_db_teams()
clear_db_dag_bundles()
team1 = Team(name="team_a")
team2 = Team(name="team_b")
session.add_all([team1, team2])
session.flush()
bundle1 = DagBundleModel(name="bundle_a")
bundle2 = DagBundleModel(name="bundle_b")
bundle1.teams.append(team1)
bundle2.teams.append(team2)
session.add_all([bundle1, bundle2])
session.flush()
mock_executors[0].team_name = "team_a"
mock_executors[1].team_name = "team_b"
with dag_maker(dag_id="dag_a", bundle_name="bundle_a", session=session):
EmptyOperator(task_id="task_a")
dr1 = dag_maker.create_dagrun()
with dag_maker(dag_id="dag_b", bundle_name="bundle_b", session=session):
EmptyOperator(task_id="task_b")
dr2 = dag_maker.create_dagrun()
ti1 = dr1.get_task_instance("task_a", session)
ti2 = dr2.get_task_instance("task_b", session)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.flush()
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
# The scheduling loop should call batch resolution and pass resolved names
with mock.patch.object(self.job_runner, "_get_team_names_for_dag_ids") as mock_batch:
mock_batch.return_value = {"dag_a": "team_a", "dag_b": "team_b"}
res = self.job_runner._executable_task_instances_to_queued(max_tis=32, session=session)
# Verify batch method was called with unique DAG IDs
mock_batch.assert_called_once_with({"dag_a", "dag_b"}, session)
assert len(res) == 2
@conf_vars({("core", "multi_team"): "false"})
def test_multi_team_config_disabled_uses_legacy_behavior(self, dag_maker, mock_executors, session):
"""Test that when multi_team config is disabled, legacy behavior is preserved."""
with dag_maker(dag_id="test_dag", session=session):
task1 = EmptyOperator(task_id="test_task1") # No explicit executor
task2 = EmptyOperator(task_id="test_task2", executor="secondary_exec")
dr = dag_maker.create_dagrun()
ti1 = dr.get_task_instance(task1.task_id, session)
ti2 = dr.get_task_instance(task2.task_id, session)
scheduler_job = Job()
self.job_runner = SchedulerJobRunner(job=scheduler_job)
with mock.patch.object(self.job_runner, "_get_task_team_name") as mock_team_resolve:
result1 = self.job_runner._try_to_load_executor(ti1, session)
result2 = self.job_runner._try_to_load_executor(ti2, session)
# Should use legacy logic without calling team resolution
mock_team_resolve.assert_not_called()
assert result1 == scheduler_job.executor # Default for no explicit executor
assert result2 == mock_executors[1] # Matched by executor name
@pytest.mark.need_serialized_dag
def test_schedule_dag_run_with_upstream_skip(dag_maker, session):
"""
Test if _schedule_dag_run puts a task instance into SKIPPED state if any of its
upstream tasks are skipped according to TriggerRuleDep.
"""
with dag_maker(
dag_id="test_task_with_upstream_skip_process_task_instances",
start_date=DEFAULT_DATE,
session=session,
):
dummy1 = EmptyOperator(task_id="dummy1")
dummy2 = EmptyOperator(task_id="dummy2")
dummy3 = EmptyOperator(task_id="dummy3")
[dummy1, dummy2] >> dummy3
# dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dr = dag_maker.create_dagrun(state=State.RUNNING)
assert dr is not None
tis = {ti.task_id: ti for ti in dr.get_task_instances(session=session)}
# Set dummy1 to skipped and dummy2 to success. dummy3 remains as none.
tis[dummy1.task_id].state = State.SKIPPED
tis[dummy2.task_id].state = State.SUCCESS
assert tis[dummy3.task_id].state == State.NONE
session.flush()
# dag_runs = DagRun.find(dag_id='test_task_with_upstream_skip_dag')
# dag_file_processor._process_task_instances(dag, dag_runs=dag_runs)
scheduler_job = Job()
job_runner = SchedulerJobRunner(job=scheduler_job)
job_runner._schedule_dag_run(dr, session)
session.flush()
tis = {ti.task_id: ti for ti in dr.get_task_instances(session=session)}
assert tis[dummy1.task_id].state == State.SKIPPED
assert tis[dummy2.task_id].state == State.SUCCESS
# dummy3 should be skipped because dummy1 is skipped.
assert tis[dummy3.task_id].state == State.SKIPPED
def test_start_queued_dagruns_uses_latest_max_active_runs_from_dag_model(self, dag_maker, session):
"""
Test that _start_queued_dagruns uses max_active_runs from DagModel (via dag_run)
instead of stale SerializedDAG max_active_runs.
This test verifies the fix where SerializedDAG may have stale max_active_runs,
but DagModel has the latest value updated by version changes(versioned bundles). The scheduler should
use the latest value from DagModel to respect user updates.
"""
# Create a DAG with max_active_runs=1 initially
with dag_maker(
dag_id="test_max_active_runs_stale_serialized",
max_active_runs=1,
session=session,
) as dag:
EmptyOperator(task_id="dummy_task")
dag_model = dag_maker.dag_model
assert dag_model.max_active_runs == 1
# Create a SerializedDAG (which will have max_active_runs=1)
# This simulates the SerializedDAG being created/updated from the DAG file
scheduler_job = Job(executor=self.null_exec)
self.job_runner = SchedulerJobRunner(job=scheduler_job)
self.job_runner._create_dag_runs([dag_model], session)
# Verify SerializedDAG has max_active_runs=1
dag_run_1 = (
session.query(DagRun).filter(DagRun.dag_id == dag.dag_id).order_by(DagRun.logical_date).first()
)
assert dag_run_1 is not None
serialized_dag = self.job_runner.scheduler_dag_bag.get_dag_for_run(dag_run_1, session=session)
assert serialized_dag is not None
assert serialized_dag.max_active_runs == 1
# Now update DagModel.max_active_runs to 2 (simulating a versioned bundle update)
# This is the latest value, but SerializedDAG still has the old value
dag_model.max_active_runs = 2
session.commit()
session.refresh(dag_model)
# Create 1 running dag run
dag_run_1.state = DagRunState.RUNNING
session.commit()
# Create 1 queued dag run
dag_run_2 = dag_maker.create_dagrun(
run_id="test_run_2",
state=DagRunState.QUEUED,
run_type=DagRunType.SCHEDULED,
session=session,
)
# Ensure dag_run_2 has the updated DagModel relationship loaded
# The association proxy dag_run.max_active_runs accesses dag_model.max_active_runs
# so we need to ensure the relationship is loaded
session.refresh(dag_run_2)
# Verify we have 1 running and 1 queued
running_count = (
session.query(DagRun)
.filter(DagRun.dag_id == dag.dag_id, DagRun.state == DagRunState.RUNNING)
.count()
)
queued_count = (
session.query(DagRun)
.filter(DagRun.dag_id == dag.dag_id, DagRun.state == DagRunState.QUEUED)
.count()
)
assert running_count == 1
assert queued_count == 1
# The SerializedDAG still has max_active_runs=1 (stale)
# But DagModel has max_active_runs=2 (latest)
assert serialized_dag.max_active_runs == 1
assert dag_model.max_active_runs == 2
# Call _start_queued_dagruns
# With the fix: Should start the queued run (using DagModel max_active_runs=2, active_runs=1 < 2)
# Without the fix: Would block the queued run (using SerializedDAG max_active_runs=1, active_runs=1 >= 1)
self.job_runner._start_queued_dagruns(session)
session.flush()
# Verify that the queued dag run started (proves it used DagModel.max_active_runs=2)
dag_run_2 = session.get(DagRun, dag_run_2.id)
assert dag_run_2.state == DagRunState.RUNNING, (
"The queued dag run should have started because DagModel.max_active_runs=2 "
"allows it (active_runs=1 < 2), even though SerializedDAG.max_active_runs=1 for that dagrun serdag version "
"would have blocked it."
)
# Verify we now have 2 running dag runs
running_count = (
session.query(DagRun)
.filter(DagRun.dag_id == dag.dag_id, DagRun.state == DagRunState.RUNNING)
.count()
)
assert running_count == 2
|
TestSchedulerJob
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/c_cpp.py
|
{
"start": 8245,
"end": 10523
}
|
class ____(CFamilyLexer):
"""
For C++ source code with preprocessor directives.
"""
name = 'C++'
aliases = ['cpp', 'c++']
filenames = ['*.cpp', '*.hpp', '*.c++', '*.h++',
'*.cc', '*.hh', '*.cxx', '*.hxx',
'*.C', '*.H', '*.cp', '*.CPP']
mimetypes = ['text/x-c++hdr', 'text/x-c++src']
priority = 0.1
tokens = {
'statements': [
(words((
'catch', 'const_cast', 'delete', 'dynamic_cast', 'explicit',
'export', 'friend', 'mutable', 'namespace', 'new', 'operator',
'private', 'protected', 'public', 'reinterpret_cast',
'restrict', 'static_cast', 'template', 'this', 'throw', 'throws',
'try', 'typeid', 'typename', 'using', 'virtual',
'constexpr', 'nullptr', 'decltype', 'thread_local',
'alignas', 'alignof', 'static_assert', 'noexcept', 'override',
'final'), suffix=r'\b'), Keyword),
(r'char(16_t|32_t)\b', Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
# C++11 raw strings
(r'(R)(")([^\\()\s]{,16})(\()((?:.|\n)*?)(\)\3)(")',
bygroups(String.Affix, String, String.Delimiter, String.Delimiter,
String, String.Delimiter, String)),
# C++11 UTF-8/16/32 strings
(r'(u8|u|U)(")', bygroups(String.Affix, String), 'string'),
inherit,
],
'root': [
inherit,
# C++ Microsoft-isms
(words(('virtual_inheritance', 'uuidof', 'super', 'single_inheritance',
'multiple_inheritance', 'interface', 'event'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
# Offload C++ extensions, http://offload.codeplay.com/
(r'__(offload|blockingoffload|outer)\b', Keyword.Pseudo),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
def analyse_text(text):
if re.search('#include <[a-z_]+>', text):
return 0.2
if re.search('using namespace ', text):
return 0.4
|
CppLexer
|
python
|
conda__conda
|
conda/plugins/types.py
|
{
"start": 14559,
"end": 15090
}
|
class ____(CondaPlugin):
"""
Return type to use when defining a post-transaction action hook.
For details on how this is used, see
:meth:`~conda.plugins.hookspec.CondaSpecs.conda_post_transaction_actions`.
:param name: Post transaction name (this is just a label)
:param action: Action class which implements
plugin behavior. See
:class:`~conda.core.path_actions.Action` for
implementation details
"""
name: str
action: type[Action]
@dataclass
|
CondaPostTransactionAction
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/parallel_for/control_flow_ops_test.py
|
{
"start": 93542,
"end": 95208
}
|
class ____(PForTestCase, parameterized.TestCase):
@parameterized.parameters(
(fft_ops.fft,),
(fft_ops.fft2d,),
(fft_ops.fft3d,),
(fft_ops.ifft,),
(fft_ops.ifft2d,),
(fft_ops.ifft3d,),
)
def test_fft(self, op_func):
shape = [2, 3, 4, 3, 4]
x = np.random.uniform(size=shape) + 1j * np.random.uniform(size=shape)
def loop_fn(i):
x_i = array_ops.gather(x, i)
return op_func(x_i)
self._test_loop_fn(loop_fn, 2)
@parameterized.parameters(
(fft_ops.rfft,),
(fft_ops.rfft2d,),
(fft_ops.rfft3d,),
)
def test_rfft(self, op_func):
for dtype in (dtypes.float32, dtypes.float64):
x = random_ops.random_uniform([2, 3, 4, 3, 4], dtype=dtype)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x_i = array_ops.gather(x, i)
return op_func(x_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
@parameterized.parameters(
(fft_ops.irfft,),
(fft_ops.irfft2d,),
(fft_ops.irfft3d,),
)
def test_irfft(self, op_func):
if config.list_physical_devices("GPU"):
# TODO(b/149957923): The test is flaky
self.skipTest("b/149957923: irfft vectorization flaky")
for dtype in (dtypes.complex64, dtypes.complex128):
shape = [2, 3, 4, 3, 4]
x = np.random.uniform(size=shape) + 1j * np.random.uniform(size=shape)
x = math_ops.cast(x, dtype=dtype)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x_i = array_ops.gather(x, i)
return op_func(x_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
|
SpectralTest
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/core/test_metric_result_integration.py
|
{
"start": 1550,
"end": 9390
}
|
class ____(Metric):
x: Tensor
def __init__(self):
super().__init__()
self.add_state("x", tensor(0), dist_reduce_fx="sum")
def update(self, x):
self.x += x
def compute(self):
return self.x
def result_reduce_ddp_fn(strategy):
rank = strategy.local_rank
worldsize = strategy.num_processes
tensor([1.0])
metric_a = DummyMetric()
metric_b = DummyMetric()
metric_c = DummyMetric()
metric_a = metric_a.to(f"cuda:{rank}")
metric_b = metric_b.to(f"cuda:{rank}")
metric_c = metric_c.to(f"cuda:{rank}")
result = _ResultCollection(True)
for _ in range(3):
cumulative_sum = 0
for i in range(5):
metric_a(i)
metric_b(i)
metric_c(i)
cumulative_sum += i
result.log("h", "a", metric_a, on_step=True, on_epoch=True)
result.log("h", "b", metric_b, on_step=False, on_epoch=True)
result.log("h", "c", metric_c, on_step=True, on_epoch=False)
batch_log = result.metrics(True)["log"]
assert batch_log == {"a_step": i, "c": i}
epoch_log = result.metrics(False)["log"]
result.reset()
# assert metric state reset to default values
assert metric_a.x == metric_a._defaults["x"], (metric_a.x, metric_a._defaults["x"])
assert metric_b.x == metric_b._defaults["x"]
assert metric_c.x == metric_c._defaults["x"]
assert epoch_log == {"b": cumulative_sum * worldsize, "a_epoch": cumulative_sum * worldsize}
@RunIf(min_cuda_gpus=2, skip_windows=True)
def test_result_reduce_ddp():
"""Make sure result logging works with DDP."""
spawn_launch(result_reduce_ddp_fn, [torch.device("cuda:0"), torch.device("cuda:1")])
def test_result_metric_integration():
metric_a = DummyMetric()
metric_b = DummyMetric()
metric_c = DummyMetric()
result = _ResultCollection(True)
for _ in range(3):
cumulative_sum = 0
for i in range(5):
metric_a(i)
metric_b(i)
metric_c(i)
cumulative_sum += i
result.log("h", "a", metric_a, on_step=True, on_epoch=True)
result.log("h", "b", metric_b, on_step=False, on_epoch=True)
result.log("h", "c", metric_c, on_step=True, on_epoch=False)
batch_log = result.metrics(True)["log"]
assert batch_log == {"a_step": i, "c": i}
epoch_log = result.metrics(False)["log"]
result.reset()
# assert metric state reset to default values
assert metric_a.x == metric_a._defaults["x"]
assert metric_b.x == metric_b._defaults["x"]
assert metric_c.x == metric_c._defaults["x"]
assert epoch_log == {"b": cumulative_sum, "a_epoch": cumulative_sum}
result.minimize = tensor(1.0)
result.extra = {}
assert str(result) == (
"_ResultCollection("
"{"
"'h.a': _ResultMetric('a', value=DummyMetric()), "
"'h.b': _ResultMetric('b', value=DummyMetric()), "
"'h.c': _ResultMetric('c', value=DummyMetric())"
"})"
)
assert repr(result) == (
"{"
"True, "
"{'h.a': _ResultMetric('a', value=DummyMetric()), "
"'h.b': _ResultMetric('b', value=DummyMetric()), "
"'h.c': _ResultMetric('c', value=DummyMetric())"
"}}"
)
def test_result_collection_simple_loop():
result = _ResultCollection(True)
current_fx_name = None
batch_idx = None
def lightning_log(fx, *args, **kwargs):
nonlocal current_fx_name
if current_fx_name != fx and batch_idx in (None, 0):
result.reset(metrics=False, fx=fx)
result.log(fx, *args, **kwargs)
current_fx_name = fx
lightning_log("a0", "a", tensor(0.0), on_step=True, on_epoch=True)
lightning_log("a1", "a", tensor(0.0), on_step=True, on_epoch=True)
for epoch in range(2):
lightning_log("b0", "a", tensor(1.0) + epoch, on_step=True, on_epoch=True)
lightning_log("b1", "a", tensor(1.0) + epoch, on_step=True, on_epoch=True)
for batch_idx in range(2):
lightning_log("c0", "a", tensor(2.0) + epoch, on_step=True, on_epoch=True)
lightning_log("c1", "a", tensor(2.0) + epoch, on_step=True, on_epoch=True)
lightning_log("c2", "a", tensor(2.0) + epoch, on_step=True, on_epoch=True)
batch_idx = None
lightning_log("d0", "a", tensor(3.0) + epoch, on_step=False, on_epoch=True)
lightning_log("d1", "a", tensor(3.0) + epoch, on_step=False, on_epoch=True)
for k in ("a0.a", "a1.a"):
assert result[k].value == tensor(0.0), k
assert result[k].cumulated_batch_size == tensor(1.0), k
for k in ("b0.a", "b1.a"):
assert result[k].value == tensor(1.0) + epoch, k
assert result[k].cumulated_batch_size == tensor(1.0), k
for k in ("c0.a", "c1.a", "c2.a"):
assert result[k].value == tensor(4.0) + epoch * 2, k
assert result[k].cumulated_batch_size == tensor(2.0), k
for k in ("d0.a", "d1.a"):
assert result[k].value == tensor(3.0) + epoch, k
assert result[k].cumulated_batch_size == tensor(1.0), k
def my_sync_dist(x, *_, **__):
return x
def test_result_collection_restoration(tmp_path):
"""This test make sure metrics are properly reloaded on failure."""
result = _ResultCollection(True)
metric_a = DummyMetric()
metric_b = DummyMetric()
metric_c = DummyMetric()
metric_d = DummyMetric()
current_fx_name = None
batch_idx = None
def lightning_log(fx, *args, **kwargs):
nonlocal current_fx_name
if current_fx_name != fx and batch_idx in (None, 0):
result.reset(metrics=False, fx=fx)
result.log(fx, *args, **kwargs, sync_dist_fn=my_sync_dist)
current_fx_name = fx
for epoch in range(2):
cumulative_sum = 0
for i in range(3):
a = metric_a(i)
b = metric_b(i)
c = metric_c(i)
metric_d(i)
cumulative_sum += i
metric = metric_a if i < 1 else metric_d
lightning_log("training_step", "a", metric, on_step=True, on_epoch=True, metric_attribute="metric")
lightning_log("training_step", "b", metric_b, on_step=False, on_epoch=True, metric_attribute="metric_b")
lightning_log("training_step", "c", metric_c, on_step=True, on_epoch=False, metric_attribute="metric_c")
lightning_log("training_step", "a_1", a, on_step=True, on_epoch=True)
lightning_log("training_step", "b_1", b, on_step=False, on_epoch=True)
lightning_log("training_step", "c_1", c, on_step=True, on_epoch=False)
batch_log = result.metrics(on_step=True)["log"]
assert set(batch_log) == {"a_step", "c", "a_1_step", "c_1"}
assert len(result.result_metrics) == 6 + epoch > 0
lightning_log("train_epoch_end", "a", metric_a, on_step=False, on_epoch=True)
epoch_log = result.metrics(on_step=False)["log"]
assert epoch_log == {
"a_1_epoch": 1,
"a_epoch": cumulative_sum,
"a": cumulative_sum,
"b": cumulative_sum,
"b_1": 1,
}
# make sure can be pickled
pickle.loads(pickle.dumps(result))
# make sure can be torch.loaded
filepath = str(tmp_path / "result")
torch.save(result, filepath)
torch.load(filepath, weights_only=False)
# assert metric state reset to default values
result.reset()
assert metric_a.x == metric_a._defaults["x"]
assert metric_b.x == metric_b._defaults["x"]
assert metric_c.x == metric_c._defaults["x"]
batch_idx = None
|
DummyMetric
|
python
|
great-expectations__great_expectations
|
great_expectations/datasource/fluent/data_asset/path/spark/csv_asset.py
|
{
"start": 7998,
"end": 8078
}
|
class ____(FileDataAsset, CSVAssetBase):
type: Literal["csv"] = "csv"
|
CSVAsset
|
python
|
PyCQA__pylint
|
pylint/utils/linterstats.py
|
{
"start": 1072,
"end": 1233
}
|
class ____(TypedDict):
"""TypedDict to store counts of different types of nodes."""
function: int
klass: int
method: int
module: int
|
NodeCount
|
python
|
walkccc__LeetCode
|
solutions/3493. Properties Graph/3493.py
|
{
"start": 609,
"end": 972
}
|
class ____:
def numberOfComponents(self, properties: list[list[int]], k: int) -> int:
n = len(properties)
uf = UnionFind(n)
propertySets = [set(property) for property in properties]
for i, j in itertools.combinations(range(n), 2):
if len(propertySets[i] & propertySets[j]) >= k:
uf.unionByRank(i, j)
return uf.getCount()
|
Solution
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/testing/suite/test_types.py
|
{
"start": 22857,
"end": 23258
}
|
class ____(_DateFixture, fixtures.TablesTest):
__requires__ = ("time_timezone",)
__backend__ = True
datatype = Time(timezone=True)
data = datetime.time(12, 57, 18, tzinfo=datetime.timezone.utc)
@testing.requires.time_implicit_bound
def test_select_direct(self, connection):
result = connection.scalar(select(literal(self.data)))
eq_(result, self.data)
|
TimeTZTest
|
python
|
huggingface__transformers
|
src/transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py
|
{
"start": 22786,
"end": 34469
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen2_5OmniTalkerForConditionalGeneration`]. It is used to instantiate an
Qwen2.5-Omni-Talker model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Qwen2.5-Omni-Thinker.
e.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
audio_token_index (`int`, *optional*, defaults to 151646):
The audio token index to encode the audio prompt.
image_token_index (`int`, *optional*, defaults to 151655):
The image token index to encode the image prompt.
video_token_index (`int`, *optional*, defaults to 151656):
The video token index to encode the video prompt.
vocab_size (`int`, *optional*, defaults to 8448):
Vocabulary size of the QwenOmni model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Qwen2VLModel`]
tts_text_start_token_id (`int`, *optional*, defaults to 151860):
The tts text start token index to encode the start of tts text.
tts_text_end_token_id (`int`, *optional*, defaults to 151861):
The tts text end token index to encode the end of tts text.
tts_text_pad_token_id (`int`, *optional*, defaults to 151859):
The tts text pad token index to encode the pad of tts text.
tts_codec_start_token_id (`int`, *optional*, defaults to 8293):
The tts codec start token index to encode the start of tts codec.
tts_codec_end_token_id (`int`, *optional*, defaults to 8294):
The tts codec end token index to encode the end of tts codec.
tts_codec_pad_token_id (`int`, *optional*, defaults to 8292):
The tts codec pad token index to encode the pad of tts codec.
tts_codec_mask_token_id (`int`, *optional*, defaults to 8296):
The tts codec mask token index to encode the mask of tts codec.
vision_start_token_id (`int`, *optional*, defaults to 151652):
The tts vision start token index to encode the start of vision.
vision_end_token_id (`int`, *optional*, defaults to 151653):
The tts vision end token index to encode the end of vision.
embedding_size (`int`, *optional*, defaults to 3584):
Dimension of the embedding representations.
hidden_size (`int`, *optional*, defaults to 3584):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 18944):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 28):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 28):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 32768):
The maximum sequence length that this model might ever be used with.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
head_dim (`int`, *optional*, defaults to 128):
The dimension of each attention head.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
use_sliding_window (`bool`, *optional*, defaults to `False`):
Whether to use sliding window attention.
sliding_window (`int`, *optional*, defaults to 32768):
Sliding window attention (SWA) window size. If not specified, will default to `4096`.
max_window_layers (`int`, *optional*, defaults to 28):
The number of layers using full attention. The first `max_window_layers` layers will use full attention, while any
additional layer afterwards will use SWA (Sliding Window Attention).
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
position_id_per_seconds (`int`, *optional*, defaults to 25):
The increment of position id per second.
seconds_per_chunk (`int`, *optional*, defaults to 2):
The duration in seconds of the chunk of audio and video data.
audio_start_token_id (`int`, *optional*, defaults to 151647):
The audio start token index to encode the audio prompt.
audio_end_token_id (`int`, *optional*, defaults to 151648):
The audio end token index to encode the audio prompt.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
spatial_merge_size (`int`, *optional*, defaults to 2):
The size used for merging spatial dimensions.
layer_types (`list`, *optional*):
Attention pattern for each layer.
Example:
```python
>>> from transformers import Qwen2_5OmniTalkerForConditionalGeneration, Qwen2_5OmniThinkerConfig, Qwen2_5OmniAudioEncoderConfig, Qwen2_5OmniVisionEncoderConfig
>>> # Initializing a Qwen2_5OmniAudioEncoder config
>>> audio_config = Qwen2_5OmniAudioEncoderConfig()
>>> # Initializing a Qwen2 config
>>> text_config = Qwen2Config()
>>> # Initializing a Qwen2_5Omni configuration
>>> configuration = Qwen2_5OmniThinkerConfig(audio_config, text_config)
>>> # Initializing a model from the qwen2-audio style configuration
>>> model = Qwen2_5OmniTalkerForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen2_5_omni_talker"
default_theta = 1000000.0
attribute_map = {
"image_token_id": "image_token_index",
"video_token_id": "video_token_index",
"audio_token_id": "audio_token_index",
}
def __init__(
self,
audio_token_index=151646,
image_token_index=151655,
video_token_index=151656,
vocab_size=8448,
tts_text_start_token_id=151860,
tts_text_end_token_id=151861,
tts_text_pad_token_id=151859,
tts_codec_start_token_id=8293,
tts_codec_end_token_id=8294,
tts_codec_pad_token_id=8292,
tts_codec_mask_token_id=8296,
vision_start_token_id=151652,
vision_end_token_id=151653,
embedding_size=3584,
hidden_size=3584,
intermediate_size=18944,
num_hidden_layers=28,
num_attention_heads=28,
num_key_value_heads=4,
hidden_act="silu",
max_position_embeddings=32768,
rms_norm_eps=1e-06,
head_dim=128,
use_cache=True,
tie_word_embeddings=False,
use_sliding_window=False,
sliding_window=32768,
max_window_layers=28,
attention_dropout=0.0,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
position_id_per_seconds=25,
seconds_per_chunk=2,
audio_start_token_id=151647,
audio_end_token_id=151648,
initializer_range=0.02,
spatial_merge_size=2,
layer_types=None,
**kwargs,
):
self.audio_token_index = audio_token_index
self.image_token_index = image_token_index
self.video_token_index = video_token_index
self.tts_text_start_token_id = tts_text_start_token_id
self.tts_text_end_token_id = tts_text_end_token_id
self.tts_text_pad_token_id = tts_text_pad_token_id
self.tts_codec_start_token_id = tts_codec_start_token_id
self.tts_codec_end_token_id = tts_codec_end_token_id
self.tts_codec_pad_token_id = tts_codec_pad_token_id
self.tts_codec_mask_token_id = tts_codec_mask_token_id
self.vision_start_token_id = vision_start_token_id
self.vision_end_token_id = vision_end_token_id
self.vocab_size = vocab_size
self.head_dim = head_dim
self.embedding_size = embedding_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.use_sliding_window = use_sliding_window
self.sliding_window = sliding_window if self.use_sliding_window else None
self.max_window_layers = max_window_layers
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
self.position_id_per_seconds = position_id_per_seconds # zf
self.seconds_per_chunk = seconds_per_chunk # zf
self.audio_start_token_id = audio_start_token_id # zf
self.audio_end_token_id = audio_end_token_id # zf
self.initializer_range = initializer_range
self.spatial_merge_size = spatial_merge_size
self.layer_types = layer_types
if self.layer_types is None:
self.layer_types = [
"sliding_attention"
if self.sliding_window is not None and i >= self.max_window_layers
else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope"}, **kwargs)
|
Qwen2_5OmniTalkerConfig
|
python
|
ansible__ansible
|
test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py
|
{
"start": 1176,
"end": 3121
}
|
class ____(TextIOWrapper):
def write(self, s):
super(AnsibleTextIOWrapper, self).write(to_text(s, self.encoding, errors='replace'))
def find_executable(executable, cwd=None, path=None):
"""Finds the full path to the executable specified"""
match = None
real_cwd = os.getcwd()
if not cwd:
cwd = real_cwd
if os.path.dirname(executable):
target = os.path.join(cwd, executable)
if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK):
match = executable
else:
path = os.environ.get('PATH', os.path.defpath)
path_dirs = path.split(os.path.pathsep)
seen_dirs = set()
for path_dir in path_dirs:
if path_dir in seen_dirs:
continue
seen_dirs.add(path_dir)
if os.path.abspath(path_dir) == real_cwd:
path_dir = cwd
candidate = os.path.join(path_dir, executable)
if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
match = candidate
break
return match
def find_globals(g, tree):
"""Uses AST to find globals in an ast tree"""
for child in tree:
if hasattr(child, 'body') and isinstance(child.body, list):
find_globals(g, child.body)
elif isinstance(child, (ast.FunctionDef, ast.ClassDef)):
g.add(child.name)
continue
elif isinstance(child, ast.Assign):
try:
g.add(child.targets[0].id)
except (IndexError, AttributeError):
pass
elif isinstance(child, ast.Import):
g.add(child.names[0].name)
elif isinstance(child, ast.ImportFrom):
for name in child.names:
g_name = name.asname or name.name
if g_name == '*':
continue
g.add(g_name)
|
AnsibleTextIOWrapper
|
python
|
aio-libs__aiohttp
|
tests/test_payload.py
|
{
"start": 4989,
"end": 47363
}
|
class ____(AbstractStreamWriter):
"""Mock stream writer for testing payload writes."""
def __init__(self) -> None:
self.written: list[bytes] = []
async def write(
self, chunk: Union[bytes, bytearray, "memoryview[int]", "memoryview[bytes]"]
) -> None:
"""Store the chunk in the written list."""
self.written.append(bytes(chunk))
async def write_eof(self, chunk: bytes | None = None) -> None:
"""write_eof implementation - no-op for tests."""
async def drain(self) -> None:
"""Drain implementation - no-op for tests."""
def enable_compression(
self, encoding: str = "deflate", strategy: int | None = None
) -> None:
"""Enable compression - no-op for tests."""
def enable_chunking(self) -> None:
"""Enable chunking - no-op for tests."""
async def write_headers(self, status_line: str, headers: CIMultiDict[str]) -> None:
"""Write headers - no-op for tests."""
def get_written_bytes(self) -> bytes:
"""Return all written bytes as a single bytes object."""
return b"".join(self.written)
async def test_bytes_payload_write_with_length_no_limit() -> None:
"""Test BytesPayload writing with no content length limit."""
data = b"0123456789"
p = payload.BytesPayload(data)
writer = MockStreamWriter()
await p.write_with_length(writer, None)
assert writer.get_written_bytes() == data
assert len(writer.get_written_bytes()) == 10
async def test_bytes_payload_write_with_length_exact() -> None:
"""Test BytesPayload writing with exact content length."""
data = b"0123456789"
p = payload.BytesPayload(data)
writer = MockStreamWriter()
await p.write_with_length(writer, 10)
assert writer.get_written_bytes() == data
assert len(writer.get_written_bytes()) == 10
async def test_bytes_payload_write_with_length_truncated() -> None:
"""Test BytesPayload writing with truncated content length."""
data = b"0123456789"
p = payload.BytesPayload(data)
writer = MockStreamWriter()
await p.write_with_length(writer, 5)
assert writer.get_written_bytes() == b"01234"
assert len(writer.get_written_bytes()) == 5
async def test_iobase_payload_write_with_length_no_limit() -> None:
"""Test IOBasePayload writing with no content length limit."""
data = b"0123456789"
p = payload.IOBasePayload(io.BytesIO(data))
writer = MockStreamWriter()
await p.write_with_length(writer, None)
assert writer.get_written_bytes() == data
assert len(writer.get_written_bytes()) == 10
async def test_iobase_payload_write_with_length_exact() -> None:
"""Test IOBasePayload writing with exact content length."""
data = b"0123456789"
p = payload.IOBasePayload(io.BytesIO(data))
writer = MockStreamWriter()
await p.write_with_length(writer, 10)
assert writer.get_written_bytes() == data
assert len(writer.get_written_bytes()) == 10
async def test_iobase_payload_write_with_length_truncated() -> None:
"""Test IOBasePayload writing with truncated content length."""
data = b"0123456789"
p = payload.IOBasePayload(io.BytesIO(data))
writer = MockStreamWriter()
await p.write_with_length(writer, 5)
assert writer.get_written_bytes() == b"01234"
assert len(writer.get_written_bytes()) == 5
async def test_bytesio_payload_write_with_length_no_limit() -> None:
"""Test BytesIOPayload writing with no content length limit."""
data = b"0123456789"
p = payload.BytesIOPayload(io.BytesIO(data))
writer = MockStreamWriter()
await p.write_with_length(writer, None)
assert writer.get_written_bytes() == data
assert len(writer.get_written_bytes()) == 10
async def test_bytesio_payload_write_with_length_exact() -> None:
"""Test BytesIOPayload writing with exact content length."""
data = b"0123456789"
p = payload.BytesIOPayload(io.BytesIO(data))
writer = MockStreamWriter()
await p.write_with_length(writer, 10)
assert writer.get_written_bytes() == data
assert len(writer.get_written_bytes()) == 10
async def test_bytesio_payload_write_with_length_truncated() -> None:
"""Test BytesIOPayload writing with truncated content length."""
data = b"0123456789"
payload_bytesio = payload.BytesIOPayload(io.BytesIO(data))
writer = MockStreamWriter()
await payload_bytesio.write_with_length(writer, 5)
assert writer.get_written_bytes() == b"01234"
assert len(writer.get_written_bytes()) == 5
async def test_bytesio_payload_write_with_length_remaining_zero() -> None:
"""Test BytesIOPayload with content_length smaller than first read chunk."""
data = b"0123456789" * 10 # 100 bytes
bio = io.BytesIO(data)
payload_bytesio = payload.BytesIOPayload(bio)
writer = MockStreamWriter()
# Mock the read method to return smaller chunks
original_read = bio.read
read_calls = 0
def mock_read(size: int | None = None) -> bytes:
nonlocal read_calls
read_calls += 1
if read_calls == 1:
# First call: return 3 bytes (less than content_length=5)
return original_read(3)
else:
# Subsequent calls return remaining data normally
return original_read(size)
with unittest.mock.patch.object(bio, "read", mock_read):
await payload_bytesio.write_with_length(writer, 5)
assert len(writer.get_written_bytes()) == 5
assert writer.get_written_bytes() == b"01234"
async def test_bytesio_payload_large_data_multiple_chunks() -> None:
"""Test BytesIOPayload with large data requiring multiple read chunks."""
chunk_size = 2**16 # 64KB (READ_SIZE)
data = b"x" * (chunk_size + 1000) # Slightly larger than READ_SIZE
payload_bytesio = payload.BytesIOPayload(io.BytesIO(data))
writer = MockStreamWriter()
await payload_bytesio.write_with_length(writer, None)
assert writer.get_written_bytes() == data
assert len(writer.get_written_bytes()) == chunk_size + 1000
async def test_bytesio_payload_remaining_bytes_exhausted() -> None:
"""Test BytesIOPayload when remaining_bytes becomes <= 0."""
data = b"0123456789abcdef" * 1000 # 16000 bytes
payload_bytesio = payload.BytesIOPayload(io.BytesIO(data))
writer = MockStreamWriter()
await payload_bytesio.write_with_length(writer, 8000) # Exactly half the data
written = writer.get_written_bytes()
assert len(written) == 8000
assert written == data[:8000]
async def test_iobase_payload_exact_chunk_size_limit() -> None:
"""Test IOBasePayload with content length matching exactly one read chunk."""
chunk_size = 2**16 # 65536 bytes (READ_SIZE)
data = b"x" * chunk_size + b"extra" # Slightly larger than one read chunk
p = payload.IOBasePayload(io.BytesIO(data))
writer = MockStreamWriter()
await p.write_with_length(writer, chunk_size)
written = writer.get_written_bytes()
assert len(written) == chunk_size
assert written == data[:chunk_size]
async def test_iobase_payload_reads_in_chunks() -> None:
"""Test IOBasePayload reads data in chunks of READ_SIZE, not all at once."""
# Create a large file that's multiple times larger than READ_SIZE
large_data = b"x" * (READ_SIZE * 3 + 1000) # ~192KB + 1000 bytes
# Mock the file-like object to track read calls
mock_file = unittest.mock.Mock(spec=io.BytesIO)
mock_file.tell.return_value = 0
mock_file.fileno.side_effect = AttributeError # Make size return None
# Track the sizes of read() calls
read_sizes = []
def mock_read(size: int) -> bytes:
read_sizes.append(size)
# Return data based on how many times read was called
call_count = len(read_sizes)
if call_count == 1:
return large_data[:size]
elif call_count == 2:
return large_data[READ_SIZE : READ_SIZE + size]
elif call_count == 3:
return large_data[READ_SIZE * 2 : READ_SIZE * 2 + size]
else:
return large_data[READ_SIZE * 3 :]
mock_file.read.side_effect = mock_read
payload_obj = payload.IOBasePayload(mock_file)
writer = MockStreamWriter()
# Write with a large content_length
await payload_obj.write_with_length(writer, len(large_data))
# Verify that reads were limited to READ_SIZE
assert len(read_sizes) > 1 # Should have multiple reads
for read_size in read_sizes:
assert (
read_size <= READ_SIZE
), f"Read size {read_size} exceeds READ_SIZE {READ_SIZE}"
async def test_iobase_payload_large_content_length() -> None:
"""Test IOBasePayload with very large content_length doesn't read all at once."""
data = b"x" * (READ_SIZE + 1000)
# Create a custom file-like object that tracks read sizes
class TrackingBytesIO(io.BytesIO):
def __init__(self, data: bytes) -> None:
super().__init__(data)
self.read_sizes: list[int] = []
def read(self, size: int | None = -1) -> bytes:
self.read_sizes.append(size if size is not None else -1)
return super().read(size)
tracking_file = TrackingBytesIO(data)
payload_obj = payload.IOBasePayload(tracking_file)
writer = MockStreamWriter()
# Write with a very large content_length (simulating the bug scenario)
large_content_length = 10 * 1024 * 1024 # 10MB
await payload_obj.write_with_length(writer, large_content_length)
# Verify no single read exceeded READ_SIZE
for read_size in tracking_file.read_sizes:
assert (
read_size <= READ_SIZE
), f"Read size {read_size} exceeds READ_SIZE {READ_SIZE}"
# Verify the correct amount of data was written
assert writer.get_written_bytes() == data
async def test_textio_payload_reads_in_chunks() -> None:
"""Test TextIOPayload reads data in chunks of READ_SIZE, not all at once."""
# Create a large text file that's multiple times larger than READ_SIZE
large_text = "x" * (READ_SIZE * 3 + 1000) # ~192KB + 1000 chars
# Mock the file-like object to track read calls
mock_file = unittest.mock.Mock(spec=io.StringIO)
mock_file.tell.return_value = 0
mock_file.fileno.side_effect = AttributeError # Make size return None
mock_file.encoding = "utf-8"
# Track the sizes of read() calls
read_sizes = []
def mock_read(size: int) -> str:
read_sizes.append(size)
# Return data based on how many times read was called
call_count = len(read_sizes)
if call_count == 1:
return large_text[:size]
elif call_count == 2:
return large_text[READ_SIZE : READ_SIZE + size]
elif call_count == 3:
return large_text[READ_SIZE * 2 : READ_SIZE * 2 + size]
else:
return large_text[READ_SIZE * 3 :]
mock_file.read.side_effect = mock_read
payload_obj = payload.TextIOPayload(mock_file)
writer = MockStreamWriter()
# Write with a large content_length
await payload_obj.write_with_length(writer, len(large_text.encode("utf-8")))
# Verify that reads were limited to READ_SIZE
assert len(read_sizes) > 1 # Should have multiple reads
for read_size in read_sizes:
assert (
read_size <= READ_SIZE
), f"Read size {read_size} exceeds READ_SIZE {READ_SIZE}"
async def test_textio_payload_large_content_length() -> None:
"""Test TextIOPayload with very large content_length doesn't read all at once."""
text_data = "x" * (READ_SIZE + 1000)
# Create a custom file-like object that tracks read sizes
class TrackingStringIO(io.StringIO):
def __init__(self, data: str) -> None:
super().__init__(data)
self.read_sizes: list[int] = []
def read(self, size: int | None = -1) -> str:
self.read_sizes.append(size if size is not None else -1)
return super().read(size)
tracking_file = TrackingStringIO(text_data)
payload_obj = payload.TextIOPayload(tracking_file)
writer = MockStreamWriter()
# Write with a very large content_length (simulating the bug scenario)
large_content_length = 10 * 1024 * 1024 # 10MB
await payload_obj.write_with_length(writer, large_content_length)
# Verify no single read exceeded READ_SIZE
for read_size in tracking_file.read_sizes:
assert (
read_size <= READ_SIZE
), f"Read size {read_size} exceeds READ_SIZE {READ_SIZE}"
# Verify the correct amount of data was written
assert writer.get_written_bytes() == text_data.encode("utf-8")
async def test_async_iterable_payload_write_with_length_no_limit() -> None:
"""Test AsyncIterablePayload writing with no content length limit."""
async def gen() -> AsyncIterator[bytes]:
yield b"0123"
yield b"4567"
yield b"89"
p = payload.AsyncIterablePayload(gen())
writer = MockStreamWriter()
await p.write_with_length(writer, None)
assert writer.get_written_bytes() == b"0123456789"
assert len(writer.get_written_bytes()) == 10
async def test_async_iterable_payload_write_with_length_exact() -> None:
"""Test AsyncIterablePayload writing with exact content length."""
async def gen() -> AsyncIterator[bytes]:
yield b"0123"
yield b"4567"
yield b"89"
p = payload.AsyncIterablePayload(gen())
writer = MockStreamWriter()
await p.write_with_length(writer, 10)
assert writer.get_written_bytes() == b"0123456789"
assert len(writer.get_written_bytes()) == 10
async def test_async_iterable_payload_write_with_length_truncated_mid_chunk() -> None:
"""Test AsyncIterablePayload writing with content length truncating mid-chunk."""
async def gen() -> AsyncIterator[bytes]:
yield b"0123"
yield b"4567"
yield b"89" # pragma: no cover
p = payload.AsyncIterablePayload(gen())
writer = MockStreamWriter()
await p.write_with_length(writer, 6)
assert writer.get_written_bytes() == b"012345"
assert len(writer.get_written_bytes()) == 6
async def test_async_iterable_payload_write_with_length_truncated_at_chunk() -> None:
"""Test AsyncIterablePayload writing with content length truncating at chunk boundary."""
async def gen() -> AsyncIterator[bytes]:
yield b"0123"
yield b"4567" # pragma: no cover
yield b"89" # pragma: no cover
p = payload.AsyncIterablePayload(gen())
writer = MockStreamWriter()
await p.write_with_length(writer, 4)
assert writer.get_written_bytes() == b"0123"
assert len(writer.get_written_bytes()) == 4
async def test_bytes_payload_backwards_compatibility() -> None:
"""Test BytesPayload.write() backwards compatibility delegates to write_with_length()."""
p = payload.BytesPayload(b"1234567890")
writer = MockStreamWriter()
await p.write(writer)
assert writer.get_written_bytes() == b"1234567890"
async def test_textio_payload_with_encoding() -> None:
"""Test TextIOPayload reading with encoding and size constraints."""
data = io.StringIO("hello world")
p = payload.TextIOPayload(data, encoding="utf-8")
writer = MockStreamWriter()
await p.write_with_length(writer, 8)
# Should write exactly 8 bytes: "hello wo"
assert writer.get_written_bytes() == b"hello wo"
async def test_textio_payload_as_bytes() -> None:
"""Test TextIOPayload.as_bytes method with different encodings."""
# Test with UTF-8 encoding
data = io.StringIO("Hello 世界")
p = payload.TextIOPayload(data, encoding="utf-8")
# Test as_bytes() method
result = await p.as_bytes()
assert result == "Hello 世界".encode()
# Test that position is restored for multiple reads
result2 = await p.as_bytes()
assert result2 == "Hello 世界".encode()
# Test with different encoding parameter (should use instance encoding)
result3 = await p.as_bytes(encoding="latin-1")
assert result3 == "Hello 世界".encode() # Should still use utf-8
# Test with different encoding in payload
data2 = io.StringIO("Hello World")
p2 = payload.TextIOPayload(data2, encoding="latin-1")
result4 = await p2.as_bytes()
assert result4 == b"Hello World" # latin-1 encoding
# Test with no explicit encoding (defaults to utf-8)
data3 = io.StringIO("Test データ")
p3 = payload.TextIOPayload(data3)
result5 = await p3.as_bytes()
assert result5 == "Test データ".encode()
# Test with encoding errors parameter
data4 = io.StringIO("Test")
p4 = payload.TextIOPayload(data4, encoding="ascii")
result6 = await p4.as_bytes(errors="strict")
assert result6 == b"Test"
async def test_bytesio_payload_backwards_compatibility() -> None:
"""Test BytesIOPayload.write() backwards compatibility delegates to write_with_length()."""
data = io.BytesIO(b"test data")
p = payload.BytesIOPayload(data)
writer = MockStreamWriter()
await p.write(writer)
assert writer.get_written_bytes() == b"test data"
async def test_async_iterable_payload_backwards_compatibility() -> None:
"""Test AsyncIterablePayload.write() backwards compatibility delegates to write_with_length()."""
async def gen() -> AsyncIterator[bytes]:
yield b"chunk1"
yield b"chunk2" # pragma: no cover
p = payload.AsyncIterablePayload(gen())
writer = MockStreamWriter()
await p.write(writer)
assert writer.get_written_bytes() == b"chunk1chunk2"
async def test_async_iterable_payload_with_none_iterator() -> None:
"""Test AsyncIterablePayload with None iterator returns early without writing."""
async def gen() -> AsyncIterator[bytes]:
yield b"test" # pragma: no cover
p = payload.AsyncIterablePayload(gen())
# Manually set _iter to None to test the guard clause
p._iter = None
writer = MockStreamWriter()
# Should return early without writing anything
await p.write_with_length(writer, 10)
assert writer.get_written_bytes() == b""
async def test_async_iterable_payload_caching() -> None:
"""Test AsyncIterablePayload caching behavior."""
async def gen() -> AsyncIterator[bytes]:
yield b"Hello"
yield b" "
yield b"World"
p = payload.AsyncIterablePayload(gen())
# First call to as_bytes should consume iterator and cache
result1 = await p.as_bytes()
assert result1 == b"Hello World"
assert p._iter is None # Iterator exhausted
assert p._cached_chunks == [b"Hello", b" ", b"World"] # Chunks cached
assert p._consumed is False # Not marked as consumed to allow reuse
# Second call should use cache
result2 = await p.as_bytes()
assert result2 == b"Hello World"
assert p._cached_chunks == [b"Hello", b" ", b"World"] # Still cached
# decode should work with cached chunks
decoded = p.decode()
assert decoded == "Hello World"
# write_with_length should use cached chunks
writer = MockStreamWriter()
await p.write_with_length(writer, None)
assert writer.get_written_bytes() == b"Hello World"
# write_with_length with limit should respect it
writer2 = MockStreamWriter()
await p.write_with_length(writer2, 5)
assert writer2.get_written_bytes() == b"Hello"
async def test_async_iterable_payload_decode_without_cache() -> None:
"""Test AsyncIterablePayload decode raises error without cache."""
async def gen() -> AsyncIterator[bytes]:
yield b"test"
p = payload.AsyncIterablePayload(gen())
# decode should raise without cache
with pytest.raises(TypeError) as excinfo:
p.decode()
assert "Unable to decode - content not cached" in str(excinfo.value)
# After as_bytes, decode should work
await p.as_bytes()
assert p.decode() == "test"
async def test_async_iterable_payload_write_then_cache() -> None:
"""Test AsyncIterablePayload behavior when written before caching."""
async def gen() -> AsyncIterator[bytes]:
yield b"Hello"
yield b"World"
p = payload.AsyncIterablePayload(gen())
# First write without caching (streaming)
writer1 = MockStreamWriter()
await p.write_with_length(writer1, None)
assert writer1.get_written_bytes() == b"HelloWorld"
assert p._iter is None # Iterator exhausted
assert p._cached_chunks is None # No cache created
assert p._consumed is True # Marked as consumed
# Subsequent operations should handle exhausted iterator
result = await p.as_bytes()
assert result == b"" # Empty since iterator exhausted without cache
# Write should also be empty
writer2 = MockStreamWriter()
await p.write_with_length(writer2, None)
assert writer2.get_written_bytes() == b""
async def test_bytes_payload_reusability() -> None:
"""Test that BytesPayload can be written and read multiple times."""
data = b"test payload data"
p = payload.BytesPayload(data)
# First write_with_length
writer1 = MockStreamWriter()
await p.write_with_length(writer1, None)
assert writer1.get_written_bytes() == data
# Second write_with_length (simulating redirect)
writer2 = MockStreamWriter()
await p.write_with_length(writer2, None)
assert writer2.get_written_bytes() == data
# Write with partial length
writer3 = MockStreamWriter()
await p.write_with_length(writer3, 5)
assert writer3.get_written_bytes() == b"test "
# Test as_bytes multiple times
bytes1 = await p.as_bytes()
bytes2 = await p.as_bytes()
bytes3 = await p.as_bytes()
assert bytes1 == bytes2 == bytes3 == data
async def test_string_payload_reusability() -> None:
"""Test that StringPayload can be written and read multiple times."""
text = "test string data"
expected_bytes = text.encode("utf-8")
p = payload.StringPayload(text)
# First write_with_length
writer1 = MockStreamWriter()
await p.write_with_length(writer1, None)
assert writer1.get_written_bytes() == expected_bytes
# Second write_with_length (simulating redirect)
writer2 = MockStreamWriter()
await p.write_with_length(writer2, None)
assert writer2.get_written_bytes() == expected_bytes
# Write with partial length
writer3 = MockStreamWriter()
await p.write_with_length(writer3, 5)
assert writer3.get_written_bytes() == b"test "
# Test as_bytes multiple times
bytes1 = await p.as_bytes()
bytes2 = await p.as_bytes()
bytes3 = await p.as_bytes()
assert bytes1 == bytes2 == bytes3 == expected_bytes
async def test_bytes_io_payload_reusability() -> None:
"""Test that BytesIOPayload can be written and read multiple times."""
data = b"test bytesio payload"
bytes_io = io.BytesIO(data)
p = payload.BytesIOPayload(bytes_io)
# First write_with_length
writer1 = MockStreamWriter()
await p.write_with_length(writer1, None)
assert writer1.get_written_bytes() == data
# Second write_with_length (simulating redirect)
writer2 = MockStreamWriter()
await p.write_with_length(writer2, None)
assert writer2.get_written_bytes() == data
# Write with partial length
writer3 = MockStreamWriter()
await p.write_with_length(writer3, 5)
assert writer3.get_written_bytes() == b"test "
# Test as_bytes multiple times
bytes1 = await p.as_bytes()
bytes2 = await p.as_bytes()
bytes3 = await p.as_bytes()
assert bytes1 == bytes2 == bytes3 == data
async def test_string_io_payload_reusability() -> None:
"""Test that StringIOPayload can be written and read multiple times."""
text = "test stringio payload"
expected_bytes = text.encode("utf-8")
string_io = io.StringIO(text)
p = payload.StringIOPayload(string_io)
# Note: StringIOPayload reads all content in __init__ and becomes a StringPayload
# So it should be fully reusable
# First write_with_length
writer1 = MockStreamWriter()
await p.write_with_length(writer1, None)
assert writer1.get_written_bytes() == expected_bytes
# Second write_with_length (simulating redirect)
writer2 = MockStreamWriter()
await p.write_with_length(writer2, None)
assert writer2.get_written_bytes() == expected_bytes
# Write with partial length
writer3 = MockStreamWriter()
await p.write_with_length(writer3, 5)
assert writer3.get_written_bytes() == b"test "
# Test as_bytes multiple times
bytes1 = await p.as_bytes()
bytes2 = await p.as_bytes()
bytes3 = await p.as_bytes()
assert bytes1 == bytes2 == bytes3 == expected_bytes
async def test_buffered_reader_payload_reusability() -> None:
"""Test that BufferedReaderPayload can be written and read multiple times."""
data = b"test buffered reader payload"
buffer = io.BufferedReader(io.BytesIO(data))
p = payload.BufferedReaderPayload(buffer)
# First write_with_length
writer1 = MockStreamWriter()
await p.write_with_length(writer1, None)
assert writer1.get_written_bytes() == data
# Second write_with_length (simulating redirect)
writer2 = MockStreamWriter()
await p.write_with_length(writer2, None)
assert writer2.get_written_bytes() == data
# Write with partial length
writer3 = MockStreamWriter()
await p.write_with_length(writer3, 5)
assert writer3.get_written_bytes() == b"test "
# Test as_bytes multiple times
bytes1 = await p.as_bytes()
bytes2 = await p.as_bytes()
bytes3 = await p.as_bytes()
assert bytes1 == bytes2 == bytes3 == data
async def test_async_iterable_payload_reusability_with_cache() -> None:
"""Test that AsyncIterablePayload can be reused when cached via as_bytes."""
async def gen() -> AsyncIterator[bytes]:
yield b"async "
yield b"iterable "
yield b"payload"
expected_data = b"async iterable payload"
p = payload.AsyncIterablePayload(gen())
# First call to as_bytes should cache the data
bytes1 = await p.as_bytes()
assert bytes1 == expected_data
assert p._cached_chunks is not None
assert p._iter is None # Iterator exhausted
# Subsequent as_bytes calls should use cache
bytes2 = await p.as_bytes()
bytes3 = await p.as_bytes()
assert bytes1 == bytes2 == bytes3 == expected_data
# Now writes should also use the cached data
writer1 = MockStreamWriter()
await p.write_with_length(writer1, None)
assert writer1.get_written_bytes() == expected_data
# Second write should also work
writer2 = MockStreamWriter()
await p.write_with_length(writer2, None)
assert writer2.get_written_bytes() == expected_data
# Write with partial length
writer3 = MockStreamWriter()
await p.write_with_length(writer3, 5)
assert writer3.get_written_bytes() == b"async"
async def test_async_iterable_payload_no_reuse_without_cache() -> None:
"""Test that AsyncIterablePayload cannot be reused without caching."""
async def gen() -> AsyncIterator[bytes]:
yield b"test "
yield b"data"
p = payload.AsyncIterablePayload(gen())
# First write exhausts the iterator
writer1 = MockStreamWriter()
await p.write_with_length(writer1, None)
assert writer1.get_written_bytes() == b"test data"
assert p._iter is None # Iterator exhausted
assert p._consumed is True
# Second write should produce empty result
writer2 = MockStreamWriter()
await p.write_with_length(writer2, None)
assert writer2.get_written_bytes() == b""
async def test_bytes_io_payload_close_does_not_close_io() -> None:
"""Test that BytesIOPayload close() does not close the underlying BytesIO."""
bytes_io = io.BytesIO(b"data")
bytes_io_payload = payload.BytesIOPayload(bytes_io)
# Close the payload
await bytes_io_payload.close()
# BytesIO should NOT be closed
assert not bytes_io.closed
# Can still write after close
writer = MockStreamWriter()
await bytes_io_payload.write_with_length(writer, None)
assert writer.get_written_bytes() == b"data"
async def test_custom_payload_backwards_compat_as_bytes() -> None:
"""Test backwards compatibility for custom Payload that only implements decode()."""
class LegacyPayload(payload.Payload):
"""A custom payload that only implements decode() like old code might do."""
def __init__(self, data: str) -> None:
super().__init__(data, headers=CIMultiDict())
self._data = data
def decode(self, encoding: str = "utf-8", errors: str = "strict") -> str:
"""Custom decode implementation."""
return self._data
async def write(self, writer: AbstractStreamWriter) -> None:
"""Write implementation which is a no-op for this test."""
# Create instance with test data
p = LegacyPayload("Hello, World!")
# Test that as_bytes() works even though it's not explicitly implemented
# The base class should call decode() and encode the result
result = await p.as_bytes()
assert result == b"Hello, World!"
# Test with different text
p2 = LegacyPayload("Test with special chars: café")
result_utf8 = await p2.as_bytes(encoding="utf-8")
assert result_utf8 == "Test with special chars: café".encode()
# Test that decode() still works as expected
assert p.decode() == "Hello, World!"
assert p2.decode() == "Test with special chars: café"
async def test_custom_payload_with_encoding_backwards_compat() -> None:
"""Test custom Payload with encoding set uses instance encoding for as_bytes()."""
class EncodedPayload(payload.Payload):
"""A custom payload with specific encoding."""
def __init__(self, data: str, encoding: str) -> None:
super().__init__(data, headers=CIMultiDict(), encoding=encoding)
self._data = data
def decode(self, encoding: str = "utf-8", errors: str = "strict") -> str:
"""Custom decode implementation."""
return self._data
async def write(self, writer: AbstractStreamWriter) -> None:
"""Write implementation is a no-op."""
# Create instance with specific encoding
p = EncodedPayload("Test data", encoding="latin-1")
# as_bytes() should use the instance encoding (latin-1) not the default utf-8
result = await p.as_bytes()
assert result == b"Test data" # ASCII chars are same in latin-1
# Test with non-ASCII that differs between encodings
p2 = EncodedPayload("café", encoding="latin-1")
result_latin1 = await p2.as_bytes()
assert result_latin1 == "café".encode("latin-1")
assert result_latin1 != "café".encode() # Should be different bytes
async def test_iobase_payload_close_idempotent() -> None:
"""Test that IOBasePayload.close() is idempotent and covers the _consumed check."""
file_like = io.BytesIO(b"test data")
p = payload.IOBasePayload(file_like)
# First close should set _consumed to True
await p.close()
assert p._consumed is True
# Second close should be a no-op due to _consumed check (line 621)
await p.close()
assert p._consumed is True
def test_iobase_payload_decode() -> None:
"""Test IOBasePayload.decode() returns correct string."""
# Test with UTF-8 encoded text
text = "Hello, 世界! 🌍"
file_like = io.BytesIO(text.encode("utf-8"))
p = payload.IOBasePayload(file_like)
# decode() should return the original string
assert p.decode() == text
# Test with different encoding
latin1_text = "café"
file_like2 = io.BytesIO(latin1_text.encode("latin-1"))
p2 = payload.IOBasePayload(file_like2)
assert p2.decode("latin-1") == latin1_text
# Test that file position is restored
file_like3 = io.BytesIO(b"test data")
file_like3.read(4) # Move position forward
p3 = payload.IOBasePayload(file_like3)
# decode() should read from the stored start position (4)
assert p3.decode() == " data"
def test_bytes_payload_size() -> None:
"""Test BytesPayload.size property returns correct byte length."""
# Test with bytes
bp = payload.BytesPayload(b"Hello World")
assert bp.size == 11
# Test with empty bytes
bp_empty = payload.BytesPayload(b"")
assert bp_empty.size == 0
# Test with bytearray
ba = bytearray(b"Hello World")
bp_array = payload.BytesPayload(ba)
assert bp_array.size == 11
def test_string_payload_size() -> None:
"""Test StringPayload.size property with different encodings."""
# Test ASCII string with default UTF-8 encoding
sp = payload.StringPayload("Hello World")
assert sp.size == 11
# Test Unicode string with default UTF-8 encoding
unicode_str = "Hello 世界"
sp_unicode = payload.StringPayload(unicode_str)
assert sp_unicode.size == len(unicode_str.encode("utf-8"))
# Test with UTF-16 encoding
sp_utf16 = payload.StringPayload("Hello World", encoding="utf-16")
assert sp_utf16.size == len("Hello World".encode("utf-16"))
# Test with latin-1 encoding
sp_latin1 = payload.StringPayload("café", encoding="latin-1")
assert sp_latin1.size == len("café".encode("latin-1"))
def test_string_io_payload_size() -> None:
"""Test StringIOPayload.size property."""
# Test normal string
sio = StringIO("Hello World")
siop = payload.StringIOPayload(sio)
assert siop.size == 11
# Test Unicode string
sio_unicode = StringIO("Hello 世界")
siop_unicode = payload.StringIOPayload(sio_unicode)
assert siop_unicode.size == len("Hello 世界".encode())
# Test with custom encoding
sio_custom = StringIO("Hello")
siop_custom = payload.StringIOPayload(sio_custom, encoding="utf-16")
assert siop_custom.size == len("Hello".encode("utf-16"))
# Test with emoji to ensure correct byte count
sio_emoji = StringIO("Hello 👋🌍")
siop_emoji = payload.StringIOPayload(sio_emoji)
assert siop_emoji.size == len("Hello 👋🌍".encode())
# Verify it's not the string length
assert siop_emoji.size != len("Hello 👋🌍")
def test_all_string_payloads_size_is_bytes() -> None:
"""Test that all string-like payload classes report size in bytes, not string length."""
# Test string with multibyte characters
test_str = "Hello 👋 世界 🌍" # Contains emoji and Chinese characters
# StringPayload
sp = payload.StringPayload(test_str)
assert sp.size == len(test_str.encode("utf-8"))
assert sp.size != len(test_str) # Ensure it's not string length
# StringIOPayload
sio = StringIO(test_str)
siop = payload.StringIOPayload(sio)
assert siop.size == len(test_str.encode("utf-8"))
assert siop.size != len(test_str)
# Test with different encoding
sp_utf16 = payload.StringPayload(test_str, encoding="utf-16")
assert sp_utf16.size == len(test_str.encode("utf-16"))
assert sp_utf16.size != sp.size # Different encoding = different size
# JsonPayload (which extends BytesPayload)
json_data = {"message": test_str}
jp = payload.JsonPayload(json_data)
# JSON escapes Unicode, so we need to check the actual encoded size
json_str = json.dumps(json_data)
assert jp.size == len(json_str.encode("utf-8"))
# Test JsonPayload with ensure_ascii=False to get actual UTF-8 encoding
jp_utf8 = payload.JsonPayload(
json_data, dumps=lambda x: json.dumps(x, ensure_ascii=False)
)
json_str_utf8 = json.dumps(json_data, ensure_ascii=False)
assert jp_utf8.size == len(json_str_utf8.encode("utf-8"))
assert jp_utf8.size != len(
json_str_utf8
) # Now it's different due to multibyte chars
def test_bytes_io_payload_size() -> None:
"""Test BytesIOPayload.size property."""
# Test normal bytes
bio = io.BytesIO(b"Hello World")
biop = payload.BytesIOPayload(bio)
assert biop.size == 11
# Test empty BytesIO
bio_empty = io.BytesIO(b"")
biop_empty = payload.BytesIOPayload(bio_empty)
assert biop_empty.size == 0
# Test with position not at start
bio_pos = io.BytesIO(b"Hello World")
bio_pos.seek(5)
biop_pos = payload.BytesIOPayload(bio_pos)
assert biop_pos.size == 6 # Size should be from position to end
def test_json_payload_size() -> None:
"""Test JsonPayload.size property."""
# Test simple dict
data = {"hello": "world"}
jp = payload.JsonPayload(data)
expected_json = json.dumps(data) # Use actual json.dumps output
assert jp.size == len(expected_json.encode("utf-8"))
# Test with Unicode
data_unicode = {"message": "Hello 世界"}
jp_unicode = payload.JsonPayload(data_unicode)
expected_unicode = json.dumps(data_unicode)
assert jp_unicode.size == len(expected_unicode.encode("utf-8"))
# Test with custom encoding
data_custom = {"test": "data"}
jp_custom = payload.JsonPayload(data_custom, encoding="utf-16")
expected_custom = json.dumps(data_custom)
assert jp_custom.size == len(expected_custom.encode("utf-16"))
async def test_text_io_payload_size_matches_file_encoding(tmp_path: Path) -> None:
"""Test TextIOPayload.size when file encoding matches payload encoding."""
# Create UTF-8 file
utf8_file = tmp_path / "test_utf8.txt"
content = "Hello 世界"
# Write file in executor
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, utf8_file.write_text, content, "utf-8")
# Open file in executor
def open_file() -> TextIO:
return open(utf8_file, encoding="utf-8")
f = await loop.run_in_executor(None, open_file)
try:
tiop = payload.TextIOPayload(f)
# Size should match the actual UTF-8 encoded size
assert tiop.size == len(content.encode("utf-8"))
finally:
await loop.run_in_executor(None, f.close)
async def test_text_io_payload_size_utf16(tmp_path: Path) -> None:
"""Test TextIOPayload.size reports correct size with utf-16."""
# Create UTF-16 file
utf16_file = tmp_path / "test_utf16.txt"
content = "Hello World"
loop = asyncio.get_running_loop()
# Write file in executor
await loop.run_in_executor(None, utf16_file.write_text, content, "utf-16")
# Get file size in executor
utf16_file_size = await loop.run_in_executor(
None, lambda: utf16_file.stat().st_size
)
# Open file in executor
def open_file() -> TextIO:
return open(utf16_file, encoding="utf-16")
f = await loop.run_in_executor(None, open_file)
try:
tiop = payload.TextIOPayload(f, encoding="utf-16")
# Payload reports file size on disk (UTF-16)
assert tiop.size == utf16_file_size
# Write to a buffer to see what actually gets sent
writer = BufferWriter()
await tiop.write(writer)
# Check that the actual written bytes match file size
assert len(writer.buffer) == utf16_file_size
finally:
await loop.run_in_executor(None, f.close)
async def test_iobase_payload_size_after_reading(tmp_path: Path) -> None:
"""Test that IOBasePayload.size returns correct size after file has been read.
This verifies that size calculation properly accounts for the initial
file position, which is critical for 307/308 redirects where the same
payload instance is reused.
"""
# Create a test file with known content
test_file = tmp_path / "test.txt"
content = b"Hello, World! This is test content."
await asyncio.to_thread(test_file.write_bytes, content)
expected_size = len(content)
# Open the file and create payload
f = await asyncio.to_thread(open, test_file, "rb")
try:
p = payload.BufferedReaderPayload(f)
# First size check - should return full file size
assert p.size == expected_size
# Read the file (simulating first request)
writer = BufferWriter()
await p.write(writer)
assert len(writer.buffer) == expected_size
# Second size check - should still return full file size
assert p.size == expected_size
# Attempting to write again should write the full content
writer2 = BufferWriter()
await p.write(writer2)
assert len(writer2.buffer) == expected_size
finally:
await asyncio.to_thread(f.close)
async def test_iobase_payload_size_unseekable() -> None:
"""Test that IOBasePayload.size returns None for unseekable files."""
class UnseekableFile:
"""Mock file object that doesn't support seeking."""
def __init__(self, content: bytes) -> None:
self.content = content
self.pos = 0
def read(self, size: int) -> bytes:
result = self.content[self.pos : self.pos + size]
self.pos += len(result)
return result
def tell(self) -> int:
raise OSError("Unseekable file")
content = b"Unseekable content"
f = UnseekableFile(content)
p = payload.IOBasePayload(f) # type: ignore[arg-type]
# Size should return None for unseekable files
assert p.size is None
# Payload should not be consumed before writing
assert p.consumed is False
# Writing should still work
writer = BufferWriter()
await p.write(writer)
assert writer.buffer == content
# For unseekable files that can't tell() or seek(),
# they are marked as consumed after the first write
assert p.consumed is True
async def test_empty_bytes_payload_is_reusable() -> None:
"""Test that empty BytesPayload can be safely reused across requests."""
empty_payload = payload.PAYLOAD_REGISTRY.get(b"", disposition=None)
assert isinstance(empty_payload, payload.BytesPayload)
assert empty_payload.size == 0
assert empty_payload.consumed is False
assert empty_payload.autoclose is True
initial_headers = dict(empty_payload.headers)
for i in range(3):
writer = BufferWriter()
await empty_payload.write_with_length(writer, None)
assert writer.buffer == b""
assert empty_payload.consumed is False, f"consumed flag changed on write {i+1}"
assert (
dict(empty_payload.headers) == initial_headers
), f"headers mutated on write {i+1}"
assert empty_payload.size == 0, f"size changed on write {i+1}"
assert empty_payload.headers == CIMultiDict(initial_headers)
|
MockStreamWriter
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/nocover/test_sampled_from.py
|
{
"start": 2640,
"end": 4156
}
|
class ____(enum.Flag):
# Would fail under EnumCheck.NAMED_FLAGS
a = 0
b = 7
def test_flag_enum_repr_uses_class_not_a_list():
lazy_repr = repr(st.sampled_from(AFlag))
assert lazy_repr == "sampled_from(tests.nocover.test_sampled_from.AFlag)"
def test_exhaustive_flags():
# Generate powerset of flag combinations. There are only 2^3 of them, so
# we can reasonably expect that they are all are found.
unseen_flags = {
functools.reduce(operator.or_, flaglist, AFlag(0))
for r in range(len(AFlag) + 1)
for flaglist in itertools.combinations(AFlag, r)
}
@given(st.sampled_from(AFlag))
def accept(flag):
unseen_flags.discard(flag)
accept()
assert not unseen_flags
def test_flags_minimize_to_first_named_flag():
assert minimal(st.sampled_from(LargeFlag)) == LargeFlag.bit0
def test_flags_minimizes_bit_count():
assert (
minimal(st.sampled_from(LargeFlag), lambda f: bit_count(f.value) > 1)
== LargeFlag.bit0 | LargeFlag.bit1
)
@pytest.mark.skipif(
settings.get_current_profile_name() == "crosshair",
reason="takes ~10 mins; path tree is too large",
)
def test_flags_finds_all_bits_set():
assert find_any(st.sampled_from(LargeFlag), lambda f: f == ~LargeFlag(0))
def test_sample_unnamed_alias():
assert find_any(st.sampled_from(UnnamedFlag), lambda f: f == UnnamedFlag.b)
def test_shrink_to_named_empty():
assert minimal(st.sampled_from(UnnamedFlag)) == UnnamedFlag(0)
|
UnnamedFlag
|
python
|
numba__numba
|
numba/core/types/npytypes.py
|
{
"start": 7905,
"end": 8517
}
|
class ____(DTypeSpec, Opaque):
"""
Type class associated with the `np.dtype`.
i.e. :code:`assert type(np.dtype('int32')) == np.dtype`
np.dtype('int32')
"""
def __init__(self, dtype):
assert isinstance(dtype, Type)
self._dtype = dtype
name = "dtype(%s)" % (dtype,)
super(DTypeSpec, self).__init__(name)
@property
def key(self):
return self.dtype
@property
def dtype(self):
return self._dtype
def __getitem__(self, arg):
res = super(DType, self).__getitem__(arg)
return res.copy(dtype=self.dtype)
|
DType
|
python
|
getsentry__sentry
|
fixtures/safe_migrations_apps/good_flow_delete_field_pending_with_not_null_m2m_app/models.py
|
{
"start": 123,
"end": 319
}
|
class ____(models.Model):
alert_rule = FlexibleForeignKey(OtherTable)
test_table = FlexibleForeignKey(
"good_flow_delete_field_pending_with_not_null_m2m_app.TestTable"
)
|
M2MTable
|
python
|
jazzband__django-simple-history
|
simple_history/tests/view.py
|
{
"start": 2459,
"end": 2592
}
|
class ____(CreateView):
model = PollWithHistoricalIPAddress
fields = ["question", "pub_date"]
|
PollWithHistoricalIPAddressCreate
|
python
|
tensorflow__tensorflow
|
tensorflow/python/tpu/tensor_tracer_report.py
|
{
"start": 4429,
"end": 7205
}
|
class ____(object):
"""Class that is responsible from storing the trace-id of the tensors."""
def __init__(self, graph_order, traced_tensors):
self.graph_order = graph_order
self.traced_tensors = traced_tensors
self._create_tensor_maps()
def _create_tensor_maps(self):
"""Creates tensor to cache id maps."""
self.tensorname_to_cache_idx = {}
self.cache_idx_to_tensor_idx = []
for out_tensor in self.traced_tensors:
tensor_name = out_tensor.name
if tensor_name in self.tensorname_to_cache_idx:
raise ValueError('Tensor name {} should not be already in '
'tensorname_to_cache_idx'.format(tensor_name))
if tensor_name not in self.graph_order.tensor_to_idx:
raise ValueError(
'Tensor name {} is not in the tensor_to_idx, tensor_to_idx={} '
.format(tensor_name, self.graph_order.tensor_to_idx))
tensor_idx = self.graph_order.tensor_to_idx[tensor_name]
cache_idx = len(self.tensorname_to_cache_idx)
self.tensorname_to_cache_idx[tensor_name] = cache_idx
self.cache_idx_to_tensor_idx.append(tensor_idx)
if len(self.tensorname_to_cache_idx) != len(
self.cache_idx_to_tensor_idx):
raise RuntimeError(
'len(self.tensorname_to_cache_idx) must equal'
'len(self.cache_idx_to_tensor_idx), got '
'len(self.tensorname_to_cache_idx)={}, '
'len(self.cache_idx_to_tensor_idx)={}'
.format(
len(self.tensorname_to_cache_idx),
len(self.cache_idx_to_tensor_idx)))
def sort_tensors_and_ops(graph):
"""Returns a wrapper that has consistent tensor and op orders."""
graph_wrapper = collections.namedtuple('GraphWrapper',
['graph', 'operations', 'op_to_idx',
'tensors', 'tensor_to_idx',
'contains_cycle',
'topological_order_or_cycle'])
contains_cycle, topological_order_or_cycle = topological_sort(graph)
if not contains_cycle:
operations = topological_order_or_cycle
else:
operations = graph.get_operations()
op_to_idx = {op.name: index for index, op
in enumerate(operations)}
tensors = []
for op in operations:
tensors.extend(op.outputs)
tensor_to_idx = {tensor.name: index for index, tensor in
enumerate(tensors)}
return graph_wrapper(graph=graph, operations=operations, op_to_idx=op_to_idx,
tensors=tensors, tensor_to_idx=tensor_to_idx,
contains_cycle=contains_cycle,
topological_order_or_cycle=topological_order_or_cycle)
|
TensorTraceOrder
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/c/_symbol.py
|
{
"start": 838,
"end": 1313
}
|
class ____:
__slots__ = 'symbols', 'parent_symbol', 'ident'
symbols: Iterable[Symbol]
parent_symbol: Symbol
ident_or_op: ASTIdentifier
def __init__(
self, symbols: Iterable[Symbol], parent_symbol: Symbol, ident: ASTIdentifier
) -> None:
self.symbols = symbols
self.parent_symbol = parent_symbol
self.ident = ident
@property
def parentSymbol(self) -> Symbol:
return self.parent_symbol
|
SymbolLookupResult
|
python
|
django-extensions__django-extensions
|
django_extensions/management/commands/validate_templates.py
|
{
"start": 438,
"end": 4040
}
|
class ____(BaseCommand):
args = ""
help = "Validate templates on syntax and compile errors"
ignores = set(
[
".DS_Store",
"*.swp",
"*~",
]
)
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--no-apps",
action="store_true",
dest="no_apps",
default=False,
help="Do not automatically include apps.",
)
parser.add_argument(
"--break",
"-b",
action="store_true",
dest="break",
default=False,
help="Break on first error.",
)
parser.add_argument(
"--include",
"-i",
action="append",
dest="includes",
default=[],
help="Append these paths to TEMPLATE DIRS",
)
parser.add_argument(
"--ignore-app",
action="append",
dest="ignore_apps",
default=[],
help="Ignore these apps",
)
def ignore_filename(self, filename):
filename = os.path.basename(filename)
for ignore_pattern in self.ignores:
if fnmatch.fnmatch(filename, ignore_pattern):
return True
return False
@signalcommand
def handle(self, *args, **options):
if hasattr(settings, "VALIDATE_TEMPLATES_IGNORES"):
self.ignores = getattr(settings, "VALIDATE_TEMPLATES_IGNORES")
style = color_style()
template_dirs = set(get_template_setting("DIRS", []))
template_dirs |= set(options["includes"])
template_dirs |= set(
getattr(settings, "VALIDATE_TEMPLATES_EXTRA_TEMPLATE_DIRS", [])
)
if not options["no_apps"]:
ignore_apps = options["ignore_apps"]
if not ignore_apps and hasattr(settings, "VALIDATE_TEMPLATES_IGNORE_APPS"):
ignore_apps = getattr(settings, "VALIDATE_TEMPLATES_IGNORE_APPS")
for app in apps.get_app_configs():
if app.name in ignore_apps:
continue
app_template_dir = os.path.join(app.path, "templates")
if os.path.isdir(app_template_dir):
template_dirs.add(app_template_dir)
settings.TEMPLATES[0]["DIRS"] = list(template_dirs)
settings.TEMPLATE_DEBUG = True
verbosity = options["verbosity"]
errors = 0
for template_dir in template_dirs:
for root, dirs, filenames in os.walk(template_dir):
for filename in filenames:
if self.ignore_filename(filename):
continue
filepath = os.path.join(root, filename)
if verbosity > 1:
self.stdout.write(filepath)
try:
get_template(filepath)
except Exception as e:
errors += 1
self.stdout.write(
"%s: %s"
% (
filepath,
style.ERROR("%s %s" % (e.__class__.__name__, str(e))),
)
)
if errors and options["break"]:
raise CommandError("Errors found")
if errors:
raise CommandError("%s errors found" % errors)
self.stdout.write("%s errors found" % errors)
|
Command
|
python
|
ray-project__ray
|
python/ray/serve/_private/request_router/request_router.py
|
{
"start": 1427,
"end": 6753
}
|
class ____:
"""Mixin for locality routing.
This mixin is used to route requests to replicas that are colocated
with the handle. It adds necessary attributes and methods to keep track of
locality scopes and offer the helpers to apply locality routing and
rank replicas based on locality.
"""
def __init__(
self,
self_node_id: Optional[str] = None,
prefer_local_node_routing: bool = False,
prefer_local_az_routing: bool = False,
self_availability_zone: Optional[str] = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self._self_node_id = self_node_id
self._prefer_local_node_routing = prefer_local_node_routing
self._prefer_local_az_routing = prefer_local_az_routing
self._self_availability_zone = self_availability_zone
# Colocated replicas (e.g. wrt node, AZ)
self._colocated_replica_ids: DefaultDict[
LocalityScope, Set[ReplicaID]
] = defaultdict(set)
self._replica_id_set: Set[ReplicaID] = set()
def _discard_colocated_replica_ids_on_replica_actor_died(
self, replica_id: ReplicaID
):
"""Remove the replica ID from the colocated replica IDs.
This is called when a replica actor dies.
"""
for id_set in self._colocated_replica_ids.values():
id_set.discard(replica_id)
def _update_colocated_replica_ids_with_replicas(
self, replicas: List[RunningReplica]
):
"""Update the colocated replica IDs based on the replicas.
This is called when the replicas are updated.
"""
new_colocated_replica_ids = defaultdict(set)
for r in replicas:
if self._self_node_id is not None and r.node_id == self._self_node_id:
new_colocated_replica_ids[LocalityScope.NODE].add(r.replica_id)
if (
self._self_availability_zone is not None
and r.availability_zone == self._self_availability_zone
):
new_colocated_replica_ids[LocalityScope.AVAILABILITY_ZONE].add(
r.replica_id
)
self._colocated_replica_ids = new_colocated_replica_ids
def apply_locality_routing(
self,
pending_request: Optional[PendingRequest] = None,
) -> Set[ReplicaID]:
"""Apply locality routing to the pending request.
When the reqeust is None, return all replicas. Each call will try to
route the request to replicas in the priority of first on the
same node, then in the same availability zone, and finally all
replicas.
Args:
pending_request: The pending request to be routed.
Returns:
A set of replica IDs that are candidates based on
the locality policy.
"""
if not pending_request:
return self._replica_id_set
if (
self._prefer_local_node_routing
and not pending_request.routing_context.tried_same_node
and len(self._colocated_replica_ids[LocalityScope.NODE]) > 0
):
# Attempt to route requests to replicas on the
# same node at most once
candidate_replica_ids = self._colocated_replica_ids[LocalityScope.NODE]
pending_request.routing_context.tried_same_node = True
pending_request.routing_context.should_backoff = False
elif (
self._prefer_local_az_routing
and not pending_request.routing_context.tried_same_az
and len(self._colocated_replica_ids[LocalityScope.AVAILABILITY_ZONE]) > 0
):
# Attempt to route requests to replicas in the same
# AZ at most once
candidate_replica_ids = self._colocated_replica_ids[
LocalityScope.AVAILABILITY_ZONE
]
pending_request.routing_context.tried_same_az = True
pending_request.routing_context.should_backoff = False
else:
# On subsequent iterations or when there are no replicas on the same
# node or AZ, consider all available replicas.
candidate_replica_ids = self._replica_id_set
pending_request.routing_context.should_backoff = True
return candidate_replica_ids
def rank_replicas_via_locality(
self,
replicas: List[RunningReplica],
) -> List[List[RunningReplica]]:
"""Rank the replicas based on the locality preference.
Rank 0 is the list of replicas that are on the same node.
Rank 1 is the list of replicas that are on the same availability zone.
Rank 2 is the list of all other replicas.
"""
ranked_replicas = [[] for _ in range(3)]
for replica in replicas:
if replica.replica_id in self._colocated_replica_ids[LocalityScope.NODE]:
ranked_replicas[0].append(replica)
elif (
replica.replica_id
in self._colocated_replica_ids[LocalityScope.AVAILABILITY_ZONE]
):
ranked_replicas[1].append(replica)
else:
ranked_replicas[2].append(replica)
return ranked_replicas
@PublicAPI(stability="alpha")
|
LocalityMixin
|
python
|
aio-libs__aiohttp
|
aiohttp/web_protocol.py
|
{
"start": 1877,
"end": 1950
}
|
class ____(Exception):
"""Payload parsing error."""
|
RequestPayloadError
|
python
|
pytorch__pytorch
|
benchmarks/gpt_fast/common.py
|
{
"start": 153,
"end": 544
}
|
class ____:
name: str
metric: str
target: float
actual: float
dtype: str
device: str
arch: str # GPU name for CUDA or CPU arch for CPU
is_model: bool = False
def register_experiment(name: Optional[str] = None):
def decorator(func):
key = name or func.__name__
all_experiments[key] = func
return func
return decorator
|
Experiment
|
python
|
viewflow__viewflow
|
viewflow/this_object.py
|
{
"start": 2555,
"end": 3645
}
|
class ____:
"""
Helper for building forward references to class attributes and methods.
The rationale is the ability to specify references to the class attributes and
methods before they are declared. `this` acts similarly to `self`, but for
class-level forward references.
"""
def resolve(self, instance: object, this_ref: Union[ThisObject, ThisMethod, Any]):
"""
Resolve a forward reference on the given instance.
Args:
instance (object): The instance on which to resolve the reference.
this_ref (Union[ThisObject, ThisMethod, Any]): The reference to resolve.
Returns:
Any: The resolved reference.
Raises:
AttributeError: If the reference cannot be resolved.
"""
if isinstance(this_ref, (ThisObject, ThisMethod)):
return this_ref.resolve(instance)
else:
return this_ref
def __getattr__(self, name):
return ThisObject(name)
# Instantiate a global `this` object for use in class definitions.
this = This()
|
This
|
python
|
django__django
|
tests/model_fields/test_uuid.py
|
{
"start": 7966,
"end": 8963
}
|
class ____(SimpleTestCase):
test_data = (
'[{"fields": {"field": "550e8400-e29b-41d4-a716-446655440000"}, '
'"model": "model_fields.uuidmodel", "pk": null}]'
)
nullable_test_data = (
'[{"fields": {"field": null}, '
'"model": "model_fields.nullableuuidmodel", "pk": null}]'
)
def test_dumping(self):
instance = UUIDModel(field=uuid.UUID("550e8400e29b41d4a716446655440000"))
data = serializers.serialize("json", [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize("json", self.test_data))[0].object
self.assertEqual(
instance.field, uuid.UUID("550e8400-e29b-41d4-a716-446655440000")
)
def test_nullable_loading(self):
instance = list(serializers.deserialize("json", self.nullable_test_data))[
0
].object
self.assertIsNone(instance.field)
|
TestSerialization
|
python
|
miyuchina__mistletoe
|
test/base_test.py
|
{
"start": 108,
"end": 1719
}
|
class ____(TestCase):
"""
Base class for tests of renderers.
"""
def setUp(self):
self.maxDiff = None
def markdownResultTest(self, markdown, expected):
output = self.renderer.render(Document(markdown))
self.assertEqual(output, expected)
def filesBasedTest(func):
"""
Note: Use this as a decorator on a test function with an empty body.
This is a realization of the "convention over configuration"
practice. You only need to define a unique ``sampleOutputExtension`` within your
test case setup, in addition to the ``renderer`` under the test of course.
Runs the current renderer against input parsed from a file and
asserts that the renderer output is equal to content stored in another file.
Both the "input" and "expected output" files need to have the same ``filename``
that is extracted from the decorated test function name.
"""
def wrapper(self):
# take the string after the last '__' in function name
filename = func.__name__
filename = filename.split('__', 1)[1]
# parse input markdown, call render on it and check the output
with open('test/samples/{}.md'.format(filename), 'r') as fin:
output = self.renderer.render(Document(fin))
with open('test/samples/{}.{}'.format(filename, self.sampleOutputExtension), 'r') as expectedFin:
expected = ''.join(expectedFin)
self.assertEqual(output, expected)
return wrapper
|
BaseRendererTest
|
python
|
PrefectHQ__prefect
|
tests/server/models/test_variables.py
|
{
"start": 6038,
"end": 6667
}
|
class ____:
async def test_count_zero_variables(
self,
session,
):
res = await count_variables(session)
assert res == 0
async def test_count_one_variables(
self,
session,
variable,
):
res = await count_variables(session)
assert res == 1
async def test_count_variables_with_filter(
self,
session,
variables,
):
res = await count_variables(
session,
variable_filter=VariableFilter(name=VariableFilterName(like_="variable1%")),
)
assert res == 2
|
TestCountVariables
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/destination-convex/destination_convex/writer.py
|
{
"start": 193,
"end": 1628
}
|
class ____:
"""
Buffers messages before sending them to Convex.
"""
write_buffer: List[Mapping[str, Any]] = []
flush_interval = 1000
def __init__(self, client: ConvexClient):
self.client = client
def delete_tables(self, table_names: List[str]) -> None:
"""Deletes all the records belonging to the input stream"""
if len(table_names) > 0:
self.client.delete(table_names)
def add_indexes(self, indexes: Mapping[str, List[List[str]]]) -> None:
self.client.add_primary_key_indexes(indexes)
self.__poll_for_indexes(indexes)
def __poll_for_indexes(self, indexes: Mapping[str, List[List[str]]]) -> None:
"""Polls until the indexes specified are ready"""
tables = list(indexes.keys())
while True:
resp = self.client.primary_key_indexes_ready(tables)
if resp.json()["indexesReady"]:
break
else:
time.sleep(1)
return
def queue_write_operation(self, message: Mapping[str, Any]) -> None:
"""Adds messages to the write queue and flushes if the buffer is full"""
self.write_buffer.append(message)
if len(self.write_buffer) == self.flush_interval:
self.flush()
def flush(self) -> None:
"""Writes to Convex"""
self.client.batch_write(self.write_buffer)
self.write_buffer.clear()
|
ConvexWriter
|
python
|
pypa__warehouse
|
tests/unit/accounts/test_services.py
|
{
"start": 73650,
"end": 79045
}
|
class ____:
def test_device_is_known(self, user_service):
user = UserFactory.create()
UserUniqueLoginFactory.create(
user=user, ip_address=REMOTE_ADDR, status="confirmed"
)
request = pretend.stub(
db=user_service.db,
remote_addr=REMOTE_ADDR,
find_service=lambda *a, **kw: pretend.stub(),
)
assert user_service.device_is_known(user.id, request)
def test_device_is_not_known(self, user_service, monkeypatch):
user = UserFactory.create(with_verified_primary_email=True)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(services, "send_unrecognized_login_email", send_email)
token_service = pretend.stub(dumps=lambda d: "fake_token", max_age=60)
user_service.request = pretend.stub(
db=user_service.db,
remote_addr=REMOTE_ADDR,
headers={
"User-Agent": (
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) "
"Gecko/20100101 Firefox/15.0.1"
)
},
find_service=lambda *a, **kw: token_service,
)
assert not user_service.device_is_known(user.id, user_service.request)
unique_login = (
user_service.db.query(services.UserUniqueLogin)
.filter(
services.UserUniqueLogin.user_id == user.id,
services.UserUniqueLogin.ip_address == REMOTE_ADDR,
)
.one()
)
assert unique_login.expires is not None
assert send_email.calls == [
pretend.call(
user_service.request,
user,
ip_address=REMOTE_ADDR,
user_agent="Firefox (Ubuntu)",
token="fake_token",
)
]
def test_device_is_pending_not_expired(self, user_service, monkeypatch):
user = UserFactory.create(with_verified_primary_email=True)
UserUniqueLoginFactory.create(
user=user, ip_address=REMOTE_ADDR, status="pending"
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(services, "send_unrecognized_login_email", send_email)
token_service = pretend.stub(dumps=lambda d: "fake_token", max_age=60)
user_service.request = pretend.stub(
db=user_service.db,
remote_addr=REMOTE_ADDR,
headers={
"User-Agent": (
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) "
"Gecko/20100101 Firefox/15.0.1"
)
},
find_service=lambda *a, **kw: token_service,
)
assert not user_service.device_is_known(user.id, user_service.request)
assert send_email.calls == []
def test_device_is_pending_and_expired(self, user_service, monkeypatch):
user = UserFactory.create(with_verified_primary_email=True)
UserUniqueLoginFactory.create(
user=user,
ip_address=REMOTE_ADDR,
status="pending",
created=datetime.datetime(1970, 1, 1),
expires=datetime.datetime(1970, 1, 1),
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(services, "send_unrecognized_login_email", send_email)
token_service = pretend.stub(dumps=lambda d: "fake_token", max_age=60)
user_service.request = pretend.stub(
db=user_service.db,
remote_addr=REMOTE_ADDR,
headers={
"User-Agent": (
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) "
"Gecko/20100101 Firefox/15.0.1"
)
},
find_service=lambda *a, **kw: token_service,
)
assert not user_service.device_is_known(user.id, user_service.request)
assert send_email.calls == [
pretend.call(
user_service.request,
user,
ip_address=REMOTE_ADDR,
user_agent="Firefox (Ubuntu)",
token="fake_token",
)
]
@pytest.mark.parametrize("ua_string", [None, "no bueno", "Python-urllib/3.7"])
def test_device_is_not_known_bad_user_agent(
self, user_service, monkeypatch, ua_string
):
user = UserFactory.create(with_verified_primary_email=True)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(services, "send_unrecognized_login_email", send_email)
token_service = pretend.stub(dumps=lambda d: "fake_token", max_age=60)
headers = {}
if ua_string:
headers["User-Agent"] = ua_string
user_service.request = pretend.stub(
db=user_service.db,
remote_addr=REMOTE_ADDR,
headers=headers,
find_service=lambda *a, **kw: token_service,
)
assert not user_service.device_is_known(user.id, user_service.request)
assert send_email.calls == [
pretend.call(
user_service.request,
user,
ip_address=REMOTE_ADDR,
user_agent=ua_string or "Unknown User-Agent",
token="fake_token",
)
]
|
TestDeviceIsKnown
|
python
|
pytorch__pytorch
|
test/profiler/test_memory_profiler.py
|
{
"start": 11851,
"end": 34208
}
|
class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.maxDiff = None
@staticmethod
def formatSchemas(
prof: torch.profiler.profile, indent: int = 12
) -> tuple[tuple[str, tuple[bool, ...]], ...]:
tree = prof.profiler.kineto_results.experimental_event_tree()
out: list[tuple[str, tuple[bool, ...]]] = []
for node in _utils.traverse_dfs(tree):
if node.tag == _EventType.TorchOp:
e = node.extra_fields
schemas = _memory_profiler.SchemaMatcher.match_schemas(e)
name = node.name
if len(schemas) == 1:
name = f"{name}.{schemas[0].overload_name}"
elif len(schemas) > 1:
name = f"{name}.{{{', '.join(s.overload_name for s in schemas)}}}"
out.append((name, _memory_profiler.SchemaMatcher.inputs_are_mutable(e)))
return tuple(out)
@staticmethod
def _run_and_format_data_flow(
inputs: dict[str, torch.Tensor],
f: Callable[..., Optional[dict[str, torch.Tensor]]],
indent: int = 12,
) -> str:
with profile() as prof:
outputs = f(**inputs) or {}
gc.collect()
memory_profile = prof._memory_profile()
graph = memory_profile._data_flow_graph
storage_to_id = {key.storage.ptr: key.id for key in graph._active_version}
lines: list[str] = []
for name, t in it.chain(inputs.items(), outputs.items()):
lines.append(f"{name + ':':<8} T{storage_to_id[t.storage().data_ptr()]}")
if t.grad is not None:
grad_id = storage_to_id[t.grad.storage().data_ptr()]
lines.append(f"{name + '.grad:':<9} T{grad_id}")
if lines:
lines.append("")
for node in graph.flow_nodes:
destroyed = {k for k, v in node._edges.items() if v.is_deletion}
inputs: list[str] = []
for key, (_, v) in node.inputs.items():
inputs.append(f"T{key.id}(v{v}{'*' if key in destroyed else ''})")
outputs = [f"T{key.id}(v{v})" for key, v in node.outputs.items()]
if inputs or outputs:
event_name = node._event.name.replace("torch::autograd::", "")
lines.append(
f"{event_name:<25} {', '.join(inputs):<15} -> {', '.join(outputs)}"
)
return textwrap.indent("\n".join([l.rstrip() for l in lines]), " " * indent)
def test_match_schemas(self) -> None:
with profile() as prof:
x = torch.ones((1,)).mul(2).add_(2)
_ = torch.sin(x, out=torch.empty_like(x))
self.assertEqual(
self.formatSchemas(prof),
(
("aten::ones.", (False,) * 5),
("aten::empty.memory_format", (False,) * 6),
#
# fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
("aten::fill_.Scalar", (True, False)),
("aten::mul.Tensor", (False, False)),
("aten::to.dtype", (False,) * 5),
("aten::_to_copy.", (False,) * 7),
("aten::empty_strided.", (False,) * 6),
#
# copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
("aten::copy_.", (True, False, False)),
#
# add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
("aten::add_.Tensor", (True, False, False)),
("aten::to.dtype", (False,) * 5),
("aten::_to_copy.", (False,) * 7),
("aten::empty_strided.", (False,) * 6),
#
# copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
("aten::copy_.", (True, False, False)),
("aten::empty_like.", (False,) * 6),
("aten::empty_strided.", (False,) * 6),
#
# sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
("aten::sin.out", (False, True)),
),
)
def test_match_schemas_backward(self) -> None:
x = torch.ones((1,))
w = torch.ones((1,), requires_grad=True)
with profile() as prof:
torch.mul(x, w).backward()
self.assertEqual(
self.formatSchemas(prof),
(
("aten::mul.Tensor", (False, False)),
("aten::ones_like.", (False,) * 6),
("aten::empty_like.", (False,) * 6),
("aten::empty_strided.", (False,) * 6),
#
# fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
("aten::fill_.Scalar", (True, False)),
("autograd::engine::evaluate_function: MulBackward0", ()),
("MulBackward0", (None,)),
("aten::mul.Tensor", (False, False)),
(
"autograd::engine::evaluate_function: torch::autograd::AccumulateGrad",
(),
),
("torch::autograd::AccumulateGrad", (None,)),
("aten::detach.", (False,)),
("detach", (None,)),
),
)
def test_match_schemas_tensorlist(self) -> None:
x = torch.ones((1,))
y = torch.ones((1,))
with profile() as prof:
torch.cat([x, y], axis=0)
self.assertEqual(
self.formatSchemas(prof),
(("aten::cat.", (False, False)),),
)
def test_data_flow_graph_with_annotations(self) -> None:
def f(x, y):
# torch._C._jit_get_schemas_for_operator will reject any name that
# is missing a namespace. (denoted by the presence of "::") We want
# to check that we skip both annotations which have no schema
# (return empty tuple from SchemaMatcher.lookup_schemas) and
# annotations which cannot have schema (return None from
# SchemaMatcher.lookup_schemas).
with torch.profiler.record_function("Namespaced::Annotation"):
with torch.profiler.record_function("My Annotation"):
x.zero_()
y.zero_()
return {"x0": torch.ones_like(x), "y0": torch.zeros_like(y)}
inputs = {"x": torch.ones((1,)), "y": torch.ones((1,))}
self.assertExpectedInline(
self._run_and_format_data_flow(inputs, f),
"""\
x: T0
y: T1
x0: T2
y0: T3
aten::zero_ T0(v0) -> T0(v1)
aten::zero_ T1(v0) -> T1(v1)
aten::ones_like T0(v1) -> T2(v0)
aten::zeros_like T1(v1) -> T3(v0)""",
)
def test_data_flow_graph_non_op_allocations(self) -> None:
def f(x):
x.mul(2)
# The python arg parser will convert the python scalar `2` to a Tensor
# to pass to `aten::mul`. As a result there is no op that "owns" the
# allocation. The Tensor deletions also do not happen in an op; they
# are collected as a result of the Python objects going out of scope.
self.assertExpectedInline(
self._run_and_format_data_flow({"x": torch.ones((1,))}, f),
"""\
x: T1
[memory] -> T0(v0)
aten::mul T0(v0), T1(v0) ->
[memory] T0(v0*) ->""",
)
def test_data_flow_graph_simple(self) -> None:
inputs = {"x": torch.ones((25,)), "y": torch.ones((25,), requires_grad=True)}
def f0(x, y):
z = x.mul(y)
return {"z": z.view_as(z)}
def f1(x, y): # noqa: F841
with torch.no_grad():
return f0(x, y)
self.assertExpectedInline(
self._run_and_format_data_flow(inputs, f0),
"""\
x: T0
y: T1
z: T2
aten::mul T0(v0), T1(v0) -> T2(v0)
aten::view_as T2(v0) ->""",
)
# Out of place is identical regardless of Autograd.
self.assertExpectedInline(
self._run_and_format_data_flow(inputs, f0),
"""\
x: T0
y: T1
z: T2
aten::mul T0(v0), T1(v0) -> T2(v0)
aten::view_as T2(v0) ->""",
)
def test_data_flow_graph_simple_inplace(self) -> None:
inputs = {"x": torch.ones((25,)), "y": torch.ones((25,), requires_grad=True)}
def f0(x, y):
x.mul_(y)
def f1(x, y):
with torch.no_grad():
return f0(x, y)
# When Autograd is enabled a second Tensor `T2` is created to store
# the values of T0(v0) which are needed for backwards.
self.assertExpectedInline(
self._run_and_format_data_flow(inputs, f0),
"""\
x: T0
y: T1
aten::mul_ T0(v0), T1(v0) -> T0(v1), T2(v0)""",
)
self.assertExpectedInline(
self._run_and_format_data_flow(inputs, f1),
"""\
x: T0
y: T1
aten::mul_ T0(v0), T1(v0) -> T0(v1)""",
)
def test_data_flow_graph_simple_backward(self) -> None:
inputs = {
"x": torch.ones((1,)),
"w": torch.ones((1,), requires_grad=True),
}
self.assertExpectedInline(
self._run_and_format_data_flow(
inputs, lambda x, w: (x * w).sin().backward()
),
"""\
x: T0
w: T1
w.grad: T7
aten::mul T0(v0), T1(v0) -> T2(v0)
aten::sin T2(v0) -> T3(v0)
aten::ones_like T3(v0) -> T4(v0)
SinBackward0 T2(v0), T4(v0) -> T6(v0)
[memory] T2(v0*) ->
MulBackward0 T0(v0), T6(v0) -> T7(v0)
[memory] T6(v0*) ->
AccumulateGrad T7(v0) ->
[memory] T4(v0*) ->
[memory] T3(v0*) ->""",
)
def test_data_flow_graph_complicated(self) -> None:
def f():
x = torch.ones((25,))
y = x.mul(2).add_(2)
z = torch.sin(y, out=torch.empty_like(y))
return {"x": x, "y": y, "z": z}
# T1 is the `2` in `.mul(2)`. The Python arg parser automatically
# converts Scalar arguments to Tensors. The same is true for `T4`
# and `.add_(2)`.
self.assertExpectedInline(
self._run_and_format_data_flow({}, f),
"""\
x: T0
y: T3
z: T6
aten::ones -> T0(v0)
[memory] -> T1(v0)
aten::mul T0(v0), T1(v0) -> T3(v0)
[memory] T1(v0*) ->
[memory] -> T4(v0)
aten::add_ T3(v0), T4(v0) -> T3(v1)
[memory] T4(v0*) ->
aten::empty_like T3(v1) -> T6(v0)
aten::sin T3(v1), T6(v0) -> T6(v1)""",
)
with profile() as prof:
f()
# `aten::mul` creates a temporary Tensor (T2), which is why the output
# is has ID three rather than two.
mul_node = prof._memory_profile()._data_flow_graph.flow_nodes[2]
self.assertEqual(mul_node._event.name, "aten::mul")
self.assertEqual(len(mul_node.intermediates), 1)
self.assertEqual(mul_node.intermediates[0].id, 2)
def test_data_flow_graph_stacked(self) -> None:
inputs = {
"x": torch.ones((25,)),
"w0": torch.ones((1,), requires_grad=True),
"w1": torch.ones((1,), requires_grad=True),
}
def f(x, w0, w1):
return x.mul(w0).relu().mul(w1).relu().sum()
def f_fwd(**kwargs):
with torch.no_grad():
return {"loss": f(**kwargs)}
def f_fwd_bwd(**kwargs):
loss = f(**kwargs)
loss.backward()
return {"loss": loss}
self.assertExpectedInline(
self._run_and_format_data_flow(inputs, f_fwd),
"""\
x: T0
w0: T1
w1: T4
loss: T7
aten::mul T0(v0), T1(v0) -> T2(v0)
aten::relu T2(v0) -> T3(v0)
[memory] T2(v0*) ->
aten::mul T3(v0), T4(v0) -> T5(v0)
[memory] T3(v0*) ->
aten::relu T5(v0) -> T6(v0)
[memory] T5(v0*) ->
aten::sum T6(v0) -> T7(v0)
[memory] T6(v0*) ->""",
)
self.assertExpectedInline(
self._run_and_format_data_flow(inputs, f_fwd_bwd),
"""\
x: T0
w0: T1
w0.grad: T15
w1: T4
w1.grad: T12
loss: T7
aten::mul T0(v0), T1(v0) -> T2(v0)
aten::relu T2(v0) -> T3(v0)
[memory] T2(v0*) ->
aten::mul T3(v0), T4(v0) -> T5(v0)
aten::relu T5(v0) -> T6(v0)
[memory] T5(v0*) ->
aten::sum T6(v0) -> T7(v0)
aten::ones_like T7(v0) -> T8(v0)
SumBackward0 T8(v0) ->
ReluBackward0 T6(v0), T8(v0) -> T9(v0)
[memory] T6(v0*) ->
MulBackward0 T3(v0), T4(v0), T9(v0) -> T10(v0), T11(v0)
aten::sum T10(v0) -> T12(v0)
[memory] T10(v0*) ->
[memory] T9(v0*) ->
AccumulateGrad T12(v0) ->
ReluBackward0 T3(v0), T11(v0) -> T13(v0)
[memory] T11(v0*) ->
[memory] T3(v0*) ->
MulBackward0 T0(v0), T13(v0) -> T14(v0)
aten::sum T14(v0) -> T15(v0)
[memory] T14(v0*) ->
[memory] T13(v0*) ->
AccumulateGrad T15(v0) ->
[memory] T8(v0*) ->""",
)
# Second time grads are already initialized.
self.assertExpectedInline(
self._run_and_format_data_flow(inputs, f_fwd_bwd),
"""\
x: T0
w0: T1
w0.grad: T17
w1: T4
w1.grad: T13
loss: T7
aten::mul T0(v0), T1(v0) -> T2(v0)
aten::relu T2(v0) -> T3(v0)
[memory] T2(v0*) ->
aten::mul T3(v0), T4(v0) -> T5(v0)
aten::relu T5(v0) -> T6(v0)
[memory] T5(v0*) ->
aten::sum T6(v0) -> T7(v0)
aten::ones_like T7(v0) -> T8(v0)
SumBackward0 T8(v0) ->
ReluBackward0 T6(v0), T8(v0) -> T9(v0)
[memory] T6(v0*) ->
MulBackward0 T3(v0), T4(v0), T9(v0) -> T10(v0), T11(v0)
aten::sum T10(v0) -> T12(v0)
[memory] T10(v0*) ->
[memory] T9(v0*) ->
AccumulateGrad T12(v0*), T13(v0) -> T13(v1)
ReluBackward0 T3(v0), T11(v0) -> T14(v0)
[memory] T11(v0*) ->
[memory] T3(v0*) ->
MulBackward0 T0(v0), T14(v0) -> T15(v0)
aten::sum T15(v0) -> T16(v0)
[memory] T15(v0*) ->
[memory] T14(v0*) ->
AccumulateGrad T16(v0*), T17(v0) -> T17(v1)
[memory] T8(v0*) ->""",
)
return
x = torch.ones((25,))
w0 = torch.ones((1,), requires_grad=True)
w1 = torch.ones((1,), requires_grad=True)
with profile() as prof_no_grad:
with torch.no_grad():
x.mul(w0).relu().mul(w1).relu().sum()
# TODO: one with `.logsumexp(dim=0)`
self.assertExpectedInline(
self._format_graph(prof_no_grad),
"""\
aten::mul T0(v0), T1(v0) -> T2(v0)
aten::relu T2(v0) -> T3(v0)
[memory] T2(v0*) ->
aten::mul T3(v0), T4(v0) -> T5(v0)
[memory] T3(v0*) ->
aten::relu T5(v0) -> T6(v0)
[memory] T5(v0*) ->
aten::sum T6(v0) -> T7(v0)
[memory] T6(v0*) ->
[memory] T7(v0*) ->""",
)
with profile() as prof_grad:
loss = x.mul(w0).relu().mul(w1).relu().sum()
loss.backward()
self.assertExpectedInline(
self._format_graph(prof_grad),
"""\
aten::mul T0(v0), T1(v0) -> T2(v0)
aten::relu T2(v0) -> T3(v0)
[memory] T2(v0*) ->
aten::mul T3(v0), T4(v0) -> T5(v0)
aten::relu T5(v0) -> T6(v0)
[memory] T5(v0*) ->
aten::sum T6(v0) -> T7(v0)
aten::ones_like T7(v0) -> T8(v0)
SumBackward0 T8(v0) -> T8(v1)
ReluBackward0 T6(v0), T8(v1) -> T8(v2), T9(v0)
[memory] T6(v0*) ->
MulBackward0 T3(v0), T4(v0), T9(v0) -> T9(v1), T10(v0), T11(v0)
aten::sum T10(v0) -> T12(v0)
[memory] T10(v0*) ->
[memory] T9(v1*) ->
AccumulateGrad T12(v0) -> T12(v1)
ReluBackward0 T3(v0), T11(v0) -> T11(v1), T13(v0)
[memory] T11(v1*) ->
[memory] T3(v0*) ->
MulBackward0 T0(v0), T13(v0) -> T13(v1), T14(v0)
aten::sum T14(v0) -> T15(v0)
[memory] T14(v0*) ->
[memory] T13(v1*) ->
AccumulateGrad T15(v0) -> T15(v1)
[memory] T8(v2*) ->""",
)
# Second time grads are already initialized.
with profile() as prof_grad:
loss = x.mul(w0).relu().mul(w1).relu().sum()
loss.backward()
self.assertExpectedInline(
self._format_graph(prof_grad),
"""\
aten::mul T0(v0), T1(v0) -> T2(v0)
aten::relu T2(v0) -> T3(v0)
[memory] T2(v0*) ->
aten::mul T3(v0), T4(v0) -> T5(v0)
aten::relu T5(v0) -> T6(v0)
[memory] T5(v0*) ->
aten::sum T6(v0) -> T7(v0)
aten::ones_like T7(v0) -> T8(v0)
SumBackward0 T8(v0) -> T8(v1)
ReluBackward0 T6(v0), T8(v1) -> T8(v2), T9(v0)
[memory] T6(v0*) ->
MulBackward0 T3(v0), T4(v0), T9(v0) -> T9(v1), T10(v0), T11(v0)
aten::sum T10(v0) -> T12(v0)
[memory] T10(v0*) ->
[memory] T9(v1*) ->
AccumulateGrad T12(v0*), T13(v0) -> T13(v1)
ReluBackward0 T3(v0), T11(v0) -> T11(v1), T14(v0)
[memory] T11(v1*) ->
[memory] T3(v0*) ->
MulBackward0 T0(v0), T14(v0) -> T14(v1), T15(v0)
aten::sum T15(v0) -> T16(v0)
[memory] T15(v0*) ->
[memory] T14(v1*) ->
AccumulateGrad T16(v0*), T17(v0) -> T17(v1)
[memory] T8(v2*) ->""",
)
@skipIfTorchDynamo("TorchDynamo changes Python calls that memory profiling relies on.")
|
TestDataFlow
|
python
|
walkccc__LeetCode
|
solutions/2186. Minimum Number of Steps to Make Two Strings Anagram II/2186.py
|
{
"start": 0,
"end": 188
}
|
class ____:
def minSteps(self, s: str, t: str) -> int:
count = collections.Counter(s)
count.subtract(collections.Counter(t))
return sum([abs(c) for c in count.values()])
|
Solution
|
python
|
coleifer__peewee
|
tests/regressions.py
|
{
"start": 8090,
"end": 8823
}
|
class ____(ModelTestCase):
def setUp(self):
super(TestInsertFromSQL, self).setUp()
self.database.execute_sql('create table if not exists user_src '
'(name TEXT);')
tbl = Table('user_src').bind(self.database)
tbl.insert(name='foo').execute()
def tearDown(self):
super(TestInsertFromSQL, self).tearDown()
self.database.execute_sql('drop table if exists user_src')
@requires_models(User)
def test_insert_from_sql(self):
query_src = SQL('SELECT name FROM user_src')
User.insert_from(query=query_src, fields=[User.username]).execute()
self.assertEqual([u.username for u in User.select()], ['foo'])
|
TestInsertFromSQL
|
python
|
doocs__leetcode
|
solution/0100-0199/0152.Maximum Product Subarray/Solution.py
|
{
"start": 0,
"end": 276
}
|
class ____:
def maxProduct(self, nums: List[int]) -> int:
ans = f = g = nums[0]
for x in nums[1:]:
ff, gg = f, g
f = max(x, ff * x, gg * x)
g = min(x, ff * x, gg * x)
ans = max(ans, f)
return ans
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/cohere2/modular_cohere2.py
|
{
"start": 18706,
"end": 18854
}
|
class ____(CohereForCausalLM):
pass
__all__ = ["Cohere2Config", "Cohere2ForCausalLM", "Cohere2Model", "Cohere2PreTrainedModel"]
|
Cohere2ForCausalLM
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/import7.py
|
{
"start": 115,
"end": 267
}
|
class ____:
# This should generate an error.
from .import5 import *
def func1():
# This should generate an error.
from .import5 import *
|
A
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/hooks/test_kms_system.py
|
{
"start": 1599,
"end": 4302
}
|
class ____(GoogleSystemTest):
@provide_gcp_context(GCP_KMS_KEY)
def test_encrypt(self):
with TemporaryDirectory() as tmp_dir:
kms_hook = CloudKMSHook()
content = kms_hook.encrypt(
key_name=(
f"projects/{kms_hook.project_id}/locations/global/keyRings/"
f"{GCP_KMS_KEYRING_NAME}/cryptoKeys/{GCP_KMS_KEY_NAME}"
),
plaintext=b"TEST-SECRET",
)
with open(f"{tmp_dir}/mysecret.txt.encrypted", "wb") as encrypted_file:
encrypted_file.write(base64.b64decode(content))
self.execute_cmd(
[
"gcloud",
"kms",
"decrypt",
"--location",
"global",
"--keyring",
GCP_KMS_KEYRING_NAME,
"--key",
GCP_KMS_KEY_NAME,
"--ciphertext-file",
f"{tmp_dir}/mysecret.txt.encrypted",
"--plaintext-file",
f"{tmp_dir}/mysecret.txt",
]
)
with open(f"{tmp_dir}/mysecret.txt", "rb") as secret_file:
secret = secret_file.read()
assert secret == b"TEST-SECRET"
@provide_gcp_context(GCP_KMS_KEY)
def test_decrypt(self):
with TemporaryDirectory() as tmp_dir:
with open(f"{tmp_dir}/mysecret.txt", "w") as secret_file:
secret_file.write("TEST-SECRET")
self.execute_cmd(
[
"gcloud",
"kms",
"encrypt",
"--location",
"global",
"--keyring",
GCP_KMS_KEYRING_NAME,
"--key",
GCP_KMS_KEY_NAME,
"--plaintext-file",
f"{tmp_dir}/mysecret.txt",
"--ciphertext-file",
f"{tmp_dir}/mysecret.txt.encrypted",
]
)
with open(f"{tmp_dir}/mysecret.txt.encrypted", "rb") as encrypted_file:
encrypted_secret = base64.b64encode(encrypted_file.read()).decode()
kms_hook = CloudKMSHook()
content = kms_hook.decrypt(
key_name=(
f"projects/{kms_hook.project_id}/locations/global/keyRings/"
f"{GCP_KMS_KEYRING_NAME}/cryptoKeys/{GCP_KMS_KEY_NAME}"
),
ciphertext=encrypted_secret,
)
assert content == b"TEST-SECRET"
|
TestKmsHookSystem
|
python
|
RaRe-Technologies__gensim
|
gensim/models/phrases.py
|
{
"start": 31338,
"end": 34068
}
|
class ____(_PhrasesTransformation):
"""Minimal state & functionality exported from a trained :class:`~gensim.models.phrases.Phrases` model.
The goal of this class is to cut down memory consumption of `Phrases`, by discarding model state
not strictly needed for the phrase detection task.
Use this instead of `Phrases` if you do not need to update the bigram statistics with new documents any more.
"""
def __init__(self, phrases_model):
"""
Parameters
----------
phrases_model : :class:`~gensim.models.phrases.Phrases`
Trained phrases instance, to extract all phrases from.
Notes
-----
After the one-time initialization, a :class:`~gensim.models.phrases.FrozenPhrases` will be much
smaller and faster than using the full :class:`~gensim.models.phrases.Phrases` model.
Examples
----------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.models.word2vec import Text8Corpus
>>> from gensim.models.phrases import Phrases, ENGLISH_CONNECTOR_WORDS
>>>
>>> # Load corpus and train a model.
>>> sentences = Text8Corpus(datapath('testcorpus.txt'))
>>> phrases = Phrases(sentences, min_count=1, threshold=1, connector_words=ENGLISH_CONNECTOR_WORDS)
>>>
>>> # Export a FrozenPhrases object that is more efficient but doesn't allow further training.
>>> frozen_phrases = phrases.freeze()
>>> print(frozen_phrases[sent])
[u'trees_graph', u'minors']
"""
self.threshold = phrases_model.threshold
self.min_count = phrases_model.min_count
self.delimiter = phrases_model.delimiter
self.scoring = phrases_model.scoring
self.connector_words = phrases_model.connector_words
logger.info('exporting phrases from %s', phrases_model)
start = time.time()
self.phrasegrams = phrases_model.export_phrases()
self.add_lifecycle_event("created", msg=f"exported {self} from {phrases_model} in {time.time() - start:.2f}s")
def __str__(self):
return "%s<%i phrases, min_count=%s, threshold=%s>" % (
self.__class__.__name__, len(self.phrasegrams), self.min_count, self.threshold,
)
def score_candidate(self, word_a, word_b, in_between):
phrase = self.delimiter.join([word_a] + in_between + [word_b])
score = self.phrasegrams.get(phrase, NEGATIVE_INFINITY)
if score > self.threshold:
return phrase, score
return None, None
Phraser = FrozenPhrases # alias for backward compatibility
|
FrozenPhrases
|
python
|
walkccc__LeetCode
|
solutions/2203. Minimum Weighted Subgraph With the Required Paths/2203.py
|
{
"start": 0,
"end": 1046
}
|
class ____:
def minimumWeight(
self,
n: int,
edges: list[list[int]],
src1: int,
src2: int,
dest: int,
) -> int:
graph = [[] for _ in range(n)]
reversedGraph = [[] for _ in range(n)]
for u, v, w in edges:
graph[u].append((v, w))
reversedGraph[v].append((u, w))
fromSrc1 = self._dijkstra(graph, src1)
fromSrc2 = self._dijkstra(graph, src2)
fromDest = self._dijkstra(reversedGraph, dest)
minWeight = min(a + b + c for a, b, c in zip(fromSrc1, fromSrc2, fromDest))
return -1 if minWeight == math.inf else minWeight
def _dijkstra(
self,
graph: list[list[tuple[int, int]]],
src: int,
) -> list[int]:
dist = [math.inf] * len(graph)
dist[src] = 0
minHeap = [(dist[src], src)] # (d, u)
while minHeap:
d, u = heapq.heappop(minHeap)
if d > dist[u]:
continue
for v, w in graph[u]:
if d + w < dist[v]:
dist[v] = d + w
heapq.heappush(minHeap, (dist[v], v))
return dist
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/legacy_tf_layers/convolutional.py
|
{
"start": 29027,
"end": 34195
}
|
class ____(keras_layers.SeparableConv1D, base.Layer):
"""Depthwise separable 1D convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial
dimensions of the filters.
strides: A single integer specifying the strides
of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: A single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SeparableConv1D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
|
SeparableConv1D
|
python
|
pytorch__pytorch
|
torch/_export/serde/schema.py
|
{
"start": 10543,
"end": 11011
}
|
class ____:
inputs: Annotated[list[Argument], 10]
outputs: Annotated[list[Argument], 20]
# These are serialized by calling pytree.treespec_loads
# And deserialized by calling pytree.treespec_dumps
in_spec: Annotated[str, 30]
out_spec: Annotated[str, 40]
# This field is used to prettify the graph placeholders
# after we Ser/Der and retrace
forward_arg_names: Annotated[Optional[list[str]], 50] = None
@dataclass
|
ModuleCallSignature
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-jira/integration_tests/fixtures/data_generator/streams.py
|
{
"start": 6788,
"end": 8373
}
|
class ____(IssueRemoteLinks, GeneratorMixin):
"""
https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-issue-remote-links/#api-rest-api-3-issue-issueidorkey-remotelink-post
"""
def generate(self):
issues_stream = Issues(authenticator=self._session.auth, domain=self._domain)
for issue in issues_stream.read_records(sync_mode=SyncMode.full_refresh):
payload = json.dumps(
{
"application": {"name": "My Acme Tracker", "type": "com.acme.tracker"},
"globalId": "system=https://www.mycompany.com/support&id=1",
"relationship": "causes",
"object": {
"summary": "Customer support issue",
"icon": {"url16x16": "https://www.mycompany.com/support/ticket.png", "title": "Support Ticket"},
"title": "TSTSUP-111",
"url": "https://www.mycompany.com/support?id=1",
"status": {
"icon": {
"url16x16": "https://www.mycompany.com/support/resolved.png",
"link": "https://www.mycompany.com/support?id=1&details=closed",
"title": "Case Closed",
},
"resolved": True,
},
},
}
)
self.generate_record(payload, stream_slice={"key": issue["key"]})
|
IssueRemoteLinksGenerator
|
python
|
keon__algorithms
|
algorithms/tree/bst/bst.py
|
{
"start": 3095,
"end": 3735
}
|
class ____(unittest.TestCase):
def setUp(self):
self.tree = BST()
self.tree.insert(10)
self.tree.insert(15)
self.tree.insert(6)
self.tree.insert(4)
self.tree.insert(9)
self.tree.insert(12)
self.tree.insert(24)
self.tree.insert(7)
self.tree.insert(20)
self.tree.insert(30)
self.tree.insert(18)
def test_search(self):
self.assertTrue(self.tree.search(24))
self.assertFalse(self.tree.search(50))
def test_size(self):
self.assertEqual(11, self.tree.size())
if __name__ == '__main__':
unittest.main()
|
TestSuite
|
python
|
numba__numba
|
numba/tests/test_flow_control.py
|
{
"start": 3463,
"end": 9023
}
|
class ____(TestCase):
def run_test(self, pyfunc, x_operands, y_operands,
flags=enable_pyobj_flags):
cfunc = jit((types.intp, types.intp), **flags)(pyfunc)
for x, y in itertools.product(x_operands, y_operands):
pyerr = None
cerr = None
try:
pyres = pyfunc(x, y)
except Exception as e:
pyerr = e
try:
cres = cfunc(x, y)
except Exception as e:
if pyerr is None:
raise
cerr = e
self.assertEqual(type(pyerr), type(cerr))
else:
if pyerr is not None:
self.fail("Invalid for pure-python but numba works\n" +
pyerr)
self.assertEqual(pyres, cres)
def test_for_loop1(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase1, [-10, 0, 10], [0], flags=flags)
def test_for_loop1_npm(self):
self.test_for_loop1(flags=no_pyobj_flags)
def test_for_loop2(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase2, [-10, 0, 10], [-10, 0, 10],
flags=flags)
def test_for_loop2_npm(self):
self.test_for_loop2(flags=no_pyobj_flags)
def test_for_loop3(self, flags=enable_pyobj_flags):
"""
List requires pyobject
"""
self.run_test(for_loop_usecase3, [1], [2],
flags=flags)
def test_for_loop3_npm(self):
self.test_for_loop3(flags=no_pyobj_flags)
def test_for_loop4(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase4, [10], [10], flags=flags)
def test_for_loop4_npm(self):
self.test_for_loop4(flags=no_pyobj_flags)
def test_for_loop5(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase5, [100], [50], flags=flags)
def test_for_loop5_npm(self):
self.test_for_loop5(flags=no_pyobj_flags)
def test_for_loop6(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase6, [100], [50], flags=flags)
def test_for_loop6_npm(self):
self.test_for_loop6(flags=no_pyobj_flags)
def test_for_loop7(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase7, [5], [0], flags=flags)
def test_for_loop7_npm(self):
self.test_for_loop7(flags=no_pyobj_flags)
def test_for_loop8(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase8, [0, 1], [0, 2, 10], flags=flags)
def test_for_loop8_npm(self):
self.test_for_loop8(flags=no_pyobj_flags)
def test_for_loop9(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase9, [0, 1], [0, 2, 10], flags=flags)
def test_for_loop9_npm(self):
self.test_for_loop9(flags=no_pyobj_flags)
def test_for_loop10(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase10, [5], [2, 7], flags=flags)
def test_for_loop10_npm(self):
self.test_for_loop10(flags=no_pyobj_flags)
def test_while_loop1(self, flags=enable_pyobj_flags):
self.run_test(while_loop_usecase1, [10], [0], flags=flags)
def test_while_loop1_npm(self):
self.test_while_loop1(flags=no_pyobj_flags)
def test_while_loop2(self, flags=enable_pyobj_flags):
self.run_test(while_loop_usecase2, [10], [0], flags=flags)
def test_while_loop2_npm(self):
self.test_while_loop2(flags=no_pyobj_flags)
def test_while_loop3(self, flags=enable_pyobj_flags):
self.run_test(while_loop_usecase3, [10], [10], flags=flags)
def test_while_loop3_npm(self):
self.test_while_loop3(flags=no_pyobj_flags)
def test_while_loop4(self, flags=enable_pyobj_flags):
self.run_test(while_loop_usecase4, [10], [0], flags=flags)
def test_while_loop4_npm(self):
self.test_while_loop4(flags=no_pyobj_flags)
def test_while_loop5(self, flags=enable_pyobj_flags):
self.run_test(while_loop_usecase5, [0, 5, 10], [0, 5, 10], flags=flags)
def test_while_loop5_npm(self):
self.test_while_loop5(flags=no_pyobj_flags)
def test_ifelse1(self, flags=enable_pyobj_flags):
self.run_test(ifelse_usecase1, [-1, 0, 1], [-1, 0, 1], flags=flags)
def test_ifelse1_npm(self):
self.test_ifelse1(flags=no_pyobj_flags)
def test_ifelse2(self, flags=enable_pyobj_flags):
self.run_test(ifelse_usecase2, [-1, 0, 1], [-1, 0, 1], flags=flags)
def test_ifelse2_npm(self):
self.test_ifelse2(flags=no_pyobj_flags)
def test_ifelse3(self, flags=enable_pyobj_flags):
self.run_test(ifelse_usecase3, [-1, 0, 1], [-1, 0, 1], flags=flags)
def test_ifelse3_npm(self):
self.test_ifelse3(flags=no_pyobj_flags)
def test_ifelse4(self, flags=enable_pyobj_flags):
self.run_test(ifelse_usecase4, [-1, 0, 1], [-1, 0, 1], flags=flags)
def test_ifelse4_npm(self):
self.test_ifelse4(flags=no_pyobj_flags)
def test_ternary_ifelse1(self, flags=enable_pyobj_flags):
self.run_test(ternary_ifelse_usecase1, [-1, 0, 1], [-1, 0, 1],
flags=flags)
def test_ternary_ifelse1_npm(self):
self.test_ternary_ifelse1(flags=no_pyobj_flags)
def test_double_infinite_loop(self, flags=enable_pyobj_flags):
self.run_test(double_infinite_loop, [10], [0],
flags=flags)
def test_double_infinite_loop_npm(self):
self.test_double_infinite_loop(flags=no_pyobj_flags)
|
TestFlowControl
|
python
|
apache__thrift
|
test/py/TestClient.py
|
{
"start": 16872,
"end": 17379
}
|
class ____(MultiplexedOptionalTest):
def get_protocol(self, transport):
wrapped_proto = make_pedantic(TJSONProtocol.TJSONProtocolFactory().getProtocol(transport))
return TMultiplexedProtocol.TMultiplexedProtocol(wrapped_proto, "ThriftTest")
def get_protocol2(self, transport):
wrapped_proto = make_pedantic(TJSONProtocol.TJSONProtocolFactory().getProtocol(transport))
return TMultiplexedProtocol.TMultiplexedProtocol(wrapped_proto, "SecondService")
|
MultiplexedJSONTest
|
python
|
pytorch__pytorch
|
test/test_dataloader.py
|
{
"start": 117363,
"end": 119267
}
|
class ____(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1, persistent_workers=True):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
""",
]
)
def test_dataset_not_reset(self):
dataset = DummyDataset()
pin_memory_configs = [False]
if TEST_CUDA:
pin_memory_configs.append(True)
for pin_memory in pin_memory_configs:
dataloader = self._get_data_loader(
dataset, num_workers=2, pin_memory=pin_memory
)
dataset.start = 0
for i in range(10):
for _ in dataloader:
pass
# Changing the start value here doesn't have any effect in the dataset
# cached by the workers. since they are not recreated between epochs
# and can cache values safely
dataset.start = i
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "Needs fork")
def test_early_exit(self):
import subprocess
proc = subprocess.check_output(
[
sys.executable,
"-c",
"""\
import torch
from torch.utils.data import DataLoader, IterableDataset
|
RandomDataset
|
python
|
PyCQA__pylint
|
tests/functional/u/unsupported/unsupported_assignment_operation.py
|
{
"start": 1763,
"end": 1894
}
|
class ____(LibSubscriptable):
pass
MaybeSubscriptable()[0] = 42
# subscriptable classes (through metaclasses)
|
MaybeSubscriptable
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/callbacks/callback.py
|
{
"start": 804,
"end": 11010
}
|
class ____:
r"""Abstract base class used to build new callbacks.
Subclass this class and override any of the relevant hooks
"""
@property
def state_key(self) -> str:
"""Identifier for the state of the callback.
Used to store and retrieve a callback's state from the checkpoint dictionary by
``checkpoint["callbacks"][state_key]``. Implementations of a callback need to provide a unique state key if 1)
the callback has state and 2) it is desired to maintain the state of multiple instances of that callback.
"""
return self.__class__.__qualname__
@property
def _legacy_state_key(self) -> type["Callback"]:
"""State key for checkpoints saved prior to version 1.5.0."""
return type(self)
def _generate_state_key(self, **kwargs: Any) -> str:
"""Formats a set of key-value pairs into a state key string with the callback class name prefixed. Useful for
defining a :attr:`state_key`.
Args:
**kwargs: A set of key-value pairs. Must be serializable to :class:`str`.
"""
return f"{self.__class__.__qualname__}{repr(kwargs)}"
def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: str) -> None:
"""Called when fit, validate, test, predict, or tune begins."""
def teardown(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: str) -> None:
"""Called when fit, validate, test, predict, or tune ends."""
def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when fit begins."""
def on_fit_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when fit ends."""
def on_sanity_check_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the validation sanity check starts."""
def on_sanity_check_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the validation sanity check ends."""
def on_train_batch_start(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", batch: Any, batch_idx: int
) -> None:
"""Called when the train batch begins."""
def on_train_batch_end(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs: STEP_OUTPUT, batch: Any, batch_idx: int
) -> None:
"""Called when the train batch ends.
Note:
The value ``outputs["loss"]`` here will be the normalized value w.r.t ``accumulate_grad_batches`` of the
loss returned from ``training_step``.
"""
def on_train_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the train epoch begins."""
def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the train epoch ends.
To access all batch outputs at the end of the epoch, you can cache step outputs as an attribute of the
:class:`lightning.pytorch.core.LightningModule` and access them in this hook:
.. code-block:: python
class MyLightningModule(L.LightningModule):
def __init__(self):
super().__init__()
self.training_step_outputs = []
def training_step(self):
loss = ...
self.training_step_outputs.append(loss)
return loss
class MyCallback(L.Callback):
def on_train_epoch_end(self, trainer, pl_module):
# do something with all training_step outputs, for example:
epoch_mean = torch.stack(pl_module.training_step_outputs).mean()
pl_module.log("training_epoch_mean", epoch_mean)
# free up the memory
pl_module.training_step_outputs.clear()
"""
def on_validation_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the val epoch begins."""
def on_validation_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the val epoch ends."""
def on_test_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the test epoch begins."""
def on_test_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the test epoch ends."""
def on_predict_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the predict epoch begins."""
def on_predict_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the predict epoch ends."""
def on_validation_batch_start(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
batch: Any,
batch_idx: int,
dataloader_idx: int = 0,
) -> None:
"""Called when the validation batch begins."""
def on_validation_batch_end(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
outputs: STEP_OUTPUT,
batch: Any,
batch_idx: int,
dataloader_idx: int = 0,
) -> None:
"""Called when the validation batch ends."""
def on_test_batch_start(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
batch: Any,
batch_idx: int,
dataloader_idx: int = 0,
) -> None:
"""Called when the test batch begins."""
def on_test_batch_end(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
outputs: STEP_OUTPUT,
batch: Any,
batch_idx: int,
dataloader_idx: int = 0,
) -> None:
"""Called when the test batch ends."""
def on_predict_batch_start(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
batch: Any,
batch_idx: int,
dataloader_idx: int = 0,
) -> None:
"""Called when the predict batch begins."""
def on_predict_batch_end(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
outputs: Any,
batch: Any,
batch_idx: int,
dataloader_idx: int = 0,
) -> None:
"""Called when the predict batch ends."""
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the train begins."""
def on_train_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the train ends."""
def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the validation loop begins."""
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the validation loop ends."""
def on_test_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the test begins."""
def on_test_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the test ends."""
def on_predict_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the predict begins."""
def on_predict_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when predict ends."""
def on_exception(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", exception: BaseException) -> None:
"""Called when any trainer execution is interrupted by an exception."""
def state_dict(self) -> dict[str, Any]:
"""Called when saving a checkpoint, implement to generate callback's ``state_dict``.
Returns:
A dictionary containing callback state.
"""
return {}
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
"""Called when loading a checkpoint, implement to reload callback state given callback's ``state_dict``.
Args:
state_dict: the callback state returned by ``state_dict``.
"""
pass
def on_save_checkpoint(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: dict[str, Any]
) -> None:
r"""Called when saving a checkpoint to give you a chance to store anything else you might want to save.
Args:
trainer: the current :class:`~lightning.pytorch.trainer.trainer.Trainer` instance.
pl_module: the current :class:`~lightning.pytorch.core.LightningModule` instance.
checkpoint: the checkpoint dictionary that will be saved.
"""
def on_load_checkpoint(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: dict[str, Any]
) -> None:
r"""Called when loading a model checkpoint, use to reload state.
Args:
trainer: the current :class:`~lightning.pytorch.trainer.trainer.Trainer` instance.
pl_module: the current :class:`~lightning.pytorch.core.LightningModule` instance.
checkpoint: the full checkpoint dictionary that got loaded by the Trainer.
"""
def on_before_backward(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", loss: Tensor) -> None:
"""Called before ``loss.backward()``."""
def on_after_backward(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called after ``loss.backward()`` and before optimizers are stepped."""
def on_before_optimizer_step(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", optimizer: Optimizer
) -> None:
"""Called before ``optimizer.step()``."""
def on_before_zero_grad(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", optimizer: Optimizer) -> None:
"""Called before ``optimizer.zero_grad()``."""
|
Callback
|
python
|
pyparsing__pyparsing
|
examples/bf.py
|
{
"start": 2742,
"end": 2843
}
|
class ____(Instruction):
def execute(self, bf_engine: BFEngine):
bf_engine.ptr -= 1
|
DecrPtr
|
python
|
davidhalter__jedi
|
jedi/inference/lazy_value.py
|
{
"start": 512,
"end": 632
}
|
class ____(AbstractLazyValue):
"""data is a ValueSet."""
def infer(self):
return self.data
|
LazyKnownValues
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_subject_access_review_status.py
|
{
"start": 383,
"end": 7409
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'allowed': 'bool',
'denied': 'bool',
'evaluation_error': 'str',
'reason': 'str'
}
attribute_map = {
'allowed': 'allowed',
'denied': 'denied',
'evaluation_error': 'evaluationError',
'reason': 'reason'
}
def __init__(self, allowed=None, denied=None, evaluation_error=None, reason=None, local_vars_configuration=None): # noqa: E501
"""V1SubjectAccessReviewStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._allowed = None
self._denied = None
self._evaluation_error = None
self._reason = None
self.discriminator = None
self.allowed = allowed
if denied is not None:
self.denied = denied
if evaluation_error is not None:
self.evaluation_error = evaluation_error
if reason is not None:
self.reason = reason
@property
def allowed(self):
"""Gets the allowed of this V1SubjectAccessReviewStatus. # noqa: E501
Allowed is required. True if the action would be allowed, false otherwise. # noqa: E501
:return: The allowed of this V1SubjectAccessReviewStatus. # noqa: E501
:rtype: bool
"""
return self._allowed
@allowed.setter
def allowed(self, allowed):
"""Sets the allowed of this V1SubjectAccessReviewStatus.
Allowed is required. True if the action would be allowed, false otherwise. # noqa: E501
:param allowed: The allowed of this V1SubjectAccessReviewStatus. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and allowed is None: # noqa: E501
raise ValueError("Invalid value for `allowed`, must not be `None`") # noqa: E501
self._allowed = allowed
@property
def denied(self):
"""Gets the denied of this V1SubjectAccessReviewStatus. # noqa: E501
Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true. # noqa: E501
:return: The denied of this V1SubjectAccessReviewStatus. # noqa: E501
:rtype: bool
"""
return self._denied
@denied.setter
def denied(self, denied):
"""Sets the denied of this V1SubjectAccessReviewStatus.
Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true. # noqa: E501
:param denied: The denied of this V1SubjectAccessReviewStatus. # noqa: E501
:type: bool
"""
self._denied = denied
@property
def evaluation_error(self):
"""Gets the evaluation_error of this V1SubjectAccessReviewStatus. # noqa: E501
EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. # noqa: E501
:return: The evaluation_error of this V1SubjectAccessReviewStatus. # noqa: E501
:rtype: str
"""
return self._evaluation_error
@evaluation_error.setter
def evaluation_error(self, evaluation_error):
"""Sets the evaluation_error of this V1SubjectAccessReviewStatus.
EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. # noqa: E501
:param evaluation_error: The evaluation_error of this V1SubjectAccessReviewStatus. # noqa: E501
:type: str
"""
self._evaluation_error = evaluation_error
@property
def reason(self):
"""Gets the reason of this V1SubjectAccessReviewStatus. # noqa: E501
Reason is optional. It indicates why a request was allowed or denied. # noqa: E501
:return: The reason of this V1SubjectAccessReviewStatus. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1SubjectAccessReviewStatus.
Reason is optional. It indicates why a request was allowed or denied. # noqa: E501
:param reason: The reason of this V1SubjectAccessReviewStatus. # noqa: E501
:type: str
"""
self._reason = reason
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SubjectAccessReviewStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1SubjectAccessReviewStatus):
return True
return self.to_dict() != other.to_dict()
|
V1SubjectAccessReviewStatus
|
python
|
pytorch__pytorch
|
test/test_decomp.py
|
{
"start": 42535,
"end": 50699
}
|
class ____(TestCase):
@onlyNativeDeviceTypes
@skipIfCrossRef
def test_contiguous_softmax(self, device):
size = (2, 4, 3, 3)
stride = (9, 18, 3, 1)
dtype = torch.float32
x = torch.randn(size, dtype=dtype, device=device)
x = torch.as_strided(x, size, stride)
ref = torch.ops.aten._softmax(x, -1, False)
res = torch._decomp.decompositions._softmax(x, -1, False)
self.assertEqual(ref.stride(), res.stride())
@onlyNativeDeviceTypes
@skipIfCrossRef
def test_contiguous_log_softmax(self, device):
size = (2, 4, 3, 3)
stride = (9, 18, 3, 1)
dtype = torch.float32
x = torch.randn(size, dtype=dtype, device=device)
x = torch.as_strided(x, size, stride)
ref = torch.ops.aten._log_softmax(x, -1, False)
res = torch._decomp.decompositions._log_softmax(x, -1, False)
self.assertEqual(ref.stride(), res.stride())
@onlyCUDA
def test_exponential_non_inf(self, device):
inp = torch.empty((4, 400, 256), device=device)
with torch._dynamo.utils.preserve_rng_state():
exp_ref = inp.exponential_()
exp = torch._refs.exponential(inp)
self.assertEqual(exp, exp_ref)
self.assertFalse(exp.isinf().any())
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@skipIfCrossRef
@onlyCUDA
def test_amp_batch_norm_backward(self):
device = "cuda"
grad_out = torch.randn((1, 2, 16, 16), dtype=torch.float16, device=device)
x = torch.randn((1, 2, 16, 16), dtype=torch.float16, device=device)
weight = torch.randn((2,), dtype=torch.float32, device=device)
rmean = torch.randn((2,), dtype=torch.float32, device=device)
rvar = torch.randn((2,), dtype=torch.float32, device=device)
mean = torch.randn((0,), dtype=torch.float32, device=device)
ref = torch.ops.aten.native_batch_norm_backward(
grad_out,
x,
weight,
rmean,
rvar,
mean,
mean,
False,
1e-05,
[True, True, True],
)
res = torch._decomp.decompositions.native_batch_norm_backward(
grad_out,
x,
weight,
rmean,
rvar,
mean,
mean,
False,
1e-05,
[True, True, True],
)
for a, b in zip(ref, res):
self.assertEqual(a.stride(), b.stride())
self.assertEqual(a.dtype, b.dtype)
@onlyNativeDeviceTypes
@skipIfCrossRef
def test_elu_backward(self, device):
size = (2, 4, 3, 3)
dtype = torch.float32
grad_out = torch.randn(size, dtype=dtype, device=device)
out = torch.randn(size, dtype=dtype, device=device)
ref = torch.ops.aten.elu_backward(grad_out, 1.0, 1, 1, True, out)
res = torch._decomp.decompositions.elu_backward(grad_out, 1.0, 1, 1, True, out)
self.assertEqual(ref, res)
@onlyNativeDeviceTypes
@skipIfCrossRef
def test_threshold_backward_dtype(self, device):
grad = torch.randint(10, (4,), device=device)
input_tensor = torch.randint(10, (4,), device=device)
ref = torch.ops.aten.threshold_backward(grad, input_tensor, 1)
res = torch._decomp.decompositions.threshold_backward(grad, input_tensor, 1)
self.assertEqual(ref.dtype, res.dtype)
@onlyNativeDeviceTypes
@skipIfCrossRef
def test_weight_norm_interface(self, device):
g = torch.randn((3, 10, 10), device=device)
v = torch.randn((1, 1, 10), device=device)
ref = torch.ops.aten._weight_norm_interface(g, v, 2)
res = torch._decomp.decompositions._weight_norm_interface(g, v, 2)
self.assertTrue(torch.allclose(ref[0], res[0]))
self.assertTrue(torch.allclose(ref[1], res[1]))
inp = torch.rand([30, 10], device=device)
inp2 = torch.rand([30, 1], device=device)
self.assertEqual(
torch.ops.aten._weight_norm_interface(inp, inp2),
torch._decomp.decompositions._weight_norm_interface(inp, inp2),
)
@onlyCPU
@skipIfCrossRef
@skipOps(
"DecompOneOffTests",
"test_sdpa",
[
xfail(
"nn.functional.scaled_dot_product_attention",
dtypes=[torch.half],
),
],
)
@ops(_sdpa_op_info)
def test_sdpa(self, device, dtype, op):
# SDPA doesn't support float16, this is aligned with aten/src/ATen/native/transformers/attention.cpp. If we
# add support for float16 over there we should update this test as well.
query_layer = torch.randn(1, 128, 100, 64, device=device, dtype=dtype)
key_layer = torch.randn(1, 128, 100, 64, device=device, dtype=dtype)
value_layer = torch.randn(1, 128, 100, 64, device=device, dtype=dtype)
masks = [None, torch.ones((1, 1, 100, 100), device=device, dtype=torch.bool)]
atol, rtol = dtype_precisions[dtype]
for mask in masks:
is_causal = mask is None
decomposed_res = (
torch._decomp.decompositions.scaled_dot_product_flash_attention_for_cpu(
query_layer, key_layer, value_layer, 0.0, is_causal, attn_mask=mask
)
)
actual_res = decomposed_res[0]
# Output has form (N, H, L, E), but should be continuous on (L, N, H, E)
# in order for subsequent view(L * N, H * E) to be valid.
# So permute(2, 0, 1, 3) before checking that tensor is contiguous
self.assertTrue(actual_res.permute(2, 0, 1, 3).is_contiguous())
eager_res = op(
query_layer,
key_layer,
value_layer,
attn_mask=mask,
dropout_p=0.0,
is_causal=is_causal,
)
self.assertTrue(torch.allclose(actual_res, eager_res, atol=atol, rtol=rtol))
@onlyCPU
def test_native_layer_norm_cpu_decomp(self, device):
def f(x, w, b):
return torch.ops.aten.native_layer_norm.default(x, [1, 2, 3], w, b, eps=0.5)
x = torch.randn(1, 2, 3, dtype=torch.bfloat16, device="cpu")
w = torch.randn(1, 2, 3, dtype=torch.bfloat16, requires_grad=True, device="cpu")
b = torch.randn(1, 2, 3, dtype=torch.bfloat16, requires_grad=True, device="cpu")
out_ref = f(x, w, b)
from torch._subclasses.fake_tensor import FakeTensorMode
with enable_python_dispatcher(), FakeTensorMode():
x = torch.randn(1, 2, 3, dtype=torch.bfloat16, device="cpu")
w = torch.randn(
1, 2, 3, dtype=torch.bfloat16, requires_grad=True, device="cpu"
)
b = torch.randn(
1, 2, 3, dtype=torch.bfloat16, requires_grad=True, device="cpu"
)
out = f(x, w, b)
for o_ref, o in zip(out_ref, out):
self.assertEqual(o_ref.dtype, o.dtype)
@onlyCUDA
@unittest.skipIf(not SM70OrLater, "triton")
def test_rms_norm_decomp_cuda(self, device):
@torch.compile
def rms_norm_sinh(a, b, c):
output = torch.nn.functional.rms_norm(a, b, c)
return torch.sinh(output)
normalized_shape_arg = (3, 3, 3)
input_tensor = torch.randn(3, 3, 3, device=device, requires_grad=True)
weight_tensor = torch.randn(3, 3, 3, device=device, requires_grad=True)
def forward_pass_fn():
return rms_norm_sinh(input_tensor, normalized_shape_arg, weight_tensor)
model_output, generated_codes = torch._inductor.utils.run_fw_bw_and_get_code(
forward_pass_fn
)
# check RMSNorm was fused with sinh
self.assertTrue("triton_per_fused__fused_rms_norm_sinh" in generated_codes[0])
self.assertTrue(
"triton_per_fused__fused_rms_norm__fused_rms_norm_backward_cosh_mul"
in generated_codes[1]
)
instantiate_device_type_tests(DecompOneOffTests, globals())
|
DecompOneOffTests
|
python
|
pypa__pipenv
|
pipenv/vendor/zipp/compat/overlay.py
|
{
"start": 500,
"end": 805
}
|
class ____(types.SimpleNamespace):
def __hash__(self):
return hash(tuple(vars(self)))
zipfile = HashableNamespace(**vars(importlib.import_module('zipfile')))
zipfile.Path = zipp.Path
zipfile._path = zipp
sys.modules[__name__ + '.zipfile'] = zipfile # type: ignore[assignment]
|
HashableNamespace
|
python
|
realpython__materials
|
python-protocol/animals_v2.py
|
{
"start": 0,
"end": 263
}
|
class ____:
def __init__(self, name):
self.name = name
def eat(self):
print(f"{self.name} is eating.")
def drink(self):
print(f"{self.name} is drinking.")
def make_sound(self):
print(f"{self.name} is barking.")
|
Dog
|
python
|
PyCQA__pylint
|
tests/functional/u/undefined/undefined_variable.py
|
{
"start": 5096,
"end": 5245
}
|
class ____:
myattr = 1
# Different base_scope scope but still applies
mylambda2 = lambda: [LambdaClass2.myattr for _ in [1, 2]]
|
LambdaClass2
|
python
|
wandb__wandb
|
wandb/sdk/internal/job_builder.py
|
{
"start": 2449,
"end": 2726
}
|
class ____(TypedDict, total=False):
_version: str
source_type: str
source: Union[GitSourceDict, ArtifactSourceDict, ImageSourceDict]
input_types: Dict[str, Any]
output_types: Dict[str, Any]
runtime: Optional[str]
services: Dict[str, str]
|
JobSourceDict
|
python
|
apache__airflow
|
providers/google/tests/unit/google/marketing_platform/operators/test_campaign_manager.py
|
{
"start": 2958,
"end": 7424
}
|
class ____:
def setup_method(self):
with create_session() as session:
session.query(TI).delete()
def teardown_method(self):
with create_session() as session:
session.query(TI).delete()
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.http")
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.tempfile")
@mock.patch(
"airflow.providers.google.marketing_platform.operators.campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.GCSHook")
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.BaseOperator")
def test_execute(
self,
mock_base_op,
gcs_hook_mock,
hook_mock,
tempfile_mock,
http_mock,
):
http_mock.MediaIoBaseDownload.return_value.next_chunk.return_value = (
None,
True,
)
tempfile_mock.NamedTemporaryFile.return_value.__enter__.return_value.name = TEMP_FILE_NAME
mock_context = {"task_instance": mock.Mock()}
op = GoogleCampaignManagerDownloadReportOperator(
profile_id=PROFILE_ID,
report_id=REPORT_ID,
file_id=FILE_ID,
bucket_name=BUCKET_NAME,
report_name=REPORT_NAME,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=mock_context)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
api_version=API_VERSION,
impersonation_chain=None,
)
hook_mock.return_value.get_report_file.assert_called_once_with(
profile_id=PROFILE_ID, report_id=REPORT_ID, file_id=FILE_ID
)
gcs_hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
gcs_hook_mock.return_value.upload.assert_called_once_with(
bucket_name=BUCKET_NAME,
object_name=REPORT_NAME + ".gz",
gzip=True,
filename=TEMP_FILE_NAME,
mime_type="text/csv",
)
mock_context["task_instance"].xcom_push.assert_called_once_with(
key="report_name", value=REPORT_NAME + ".gz"
)
@pytest.mark.parametrize(
"test_bucket_name",
[BUCKET_NAME, f"gs://{BUCKET_NAME}", "XComArg", "{{ ti.xcom_pull(task_ids='taskflow_op') }}"],
)
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.http")
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.tempfile")
@mock.patch(
"airflow.providers.google.marketing_platform.operators.campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.GCSHook")
def test_set_bucket_name(
self,
gcs_hook_mock,
hook_mock,
tempfile_mock,
http_mock,
test_bucket_name,
dag_maker,
):
http_mock.MediaIoBaseDownload.return_value.next_chunk.return_value = (
None,
True,
)
tempfile_mock.NamedTemporaryFile.return_value.__enter__.return_value.name = TEMP_FILE_NAME
with dag_maker(dag_id="test_set_bucket_name", start_date=DEFAULT_DATE) as dag:
if BUCKET_NAME not in test_bucket_name:
@dag.task(task_id="taskflow_op")
def f():
return BUCKET_NAME
taskflow_op = f()
GoogleCampaignManagerDownloadReportOperator(
profile_id=PROFILE_ID,
report_id=REPORT_ID,
file_id=FILE_ID,
bucket_name=test_bucket_name if test_bucket_name != "XComArg" else taskflow_op,
report_name=REPORT_NAME,
api_version=API_VERSION,
task_id="test_task",
)
dr = dag_maker.create_dagrun()
for ti in dr.get_task_instances():
ti.run()
gcs_hook_mock.return_value.upload.assert_called_once_with(
bucket_name=BUCKET_NAME,
object_name=REPORT_NAME + ".gz",
gzip=True,
filename=TEMP_FILE_NAME,
mime_type="text/csv",
)
|
TestGoogleCampaignManagerDownloadReportOperator
|
python
|
pytorch__pytorch
|
torch/distributed/_shard/sharding_spec/api.py
|
{
"start": 1022,
"end": 1469
}
|
class ____(PlacementSpec):
"""
Associates placement of an entity with a single device.
Args:
device(:class:`torch.distributed._remote_device`): The device to place the entity on.
"""
device: torch.distributed._remote_device
def __post_init__(self):
if not isinstance(self.device, torch.distributed._remote_device):
self.device = torch.distributed._remote_device(self.device)
|
DevicePlacementSpec
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/prompts/few_shot_with_templates.py
|
{
"start": 378,
"end": 7804
}
|
class ____(StringPromptTemplate):
"""Prompt template that contains few shot examples."""
examples: list[dict] | None = None
"""Examples to format into the prompt.
Either this or example_selector should be provided."""
example_selector: Any = None
"""ExampleSelector to choose the examples to format into the prompt.
Either this or examples should be provided."""
example_prompt: PromptTemplate
"""PromptTemplate used to format an individual example."""
suffix: StringPromptTemplate
"""A PromptTemplate to put after the examples."""
example_separator: str = "\n\n"
"""String separator used to join the prefix, the examples, and suffix."""
prefix: StringPromptTemplate | None = None
"""A PromptTemplate to put before the examples."""
template_format: PromptTemplateFormat = "f-string"
"""The format of the prompt template.
Options are: 'f-string', 'jinja2', 'mustache'."""
validate_template: bool = False
"""Whether or not to try validating the template."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "prompts", "few_shot_with_templates"]`
"""
return ["langchain", "prompts", "few_shot_with_templates"]
@model_validator(mode="before")
@classmethod
def check_examples_and_selector(cls, values: dict) -> Any:
"""Check that one and only one of examples/example_selector are provided."""
examples = values.get("examples")
example_selector = values.get("example_selector")
if examples and example_selector:
msg = "Only one of 'examples' and 'example_selector' should be provided"
raise ValueError(msg)
if examples is None and example_selector is None:
msg = "One of 'examples' and 'example_selector' should be provided"
raise ValueError(msg)
return values
@model_validator(mode="after")
def template_is_valid(self) -> Self:
"""Check that prefix, suffix, and input variables are consistent."""
if self.validate_template:
input_variables = self.input_variables
expected_input_variables = set(self.suffix.input_variables)
expected_input_variables |= set(self.partial_variables)
if self.prefix is not None:
expected_input_variables |= set(self.prefix.input_variables)
missing_vars = expected_input_variables.difference(input_variables)
if missing_vars:
msg = (
f"Got input_variables={input_variables}, but based on "
f"prefix/suffix expected {expected_input_variables}"
)
raise ValueError(msg)
else:
self.input_variables = sorted(
set(self.suffix.input_variables)
| set(self.prefix.input_variables if self.prefix else [])
- set(self.partial_variables)
)
return self
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
def _get_examples(self, **kwargs: Any) -> list[dict]:
if self.examples is not None:
return self.examples
if self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
raise ValueError
async def _aget_examples(self, **kwargs: Any) -> list[dict]:
if self.examples is not None:
return self.examples
if self.example_selector is not None:
return await self.example_selector.aselect_examples(kwargs)
raise ValueError
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
```python
prompt.format(variable1="foo")
```
"""
kwargs = self._merge_partial_and_user_variables(**kwargs)
# Get the examples to use.
examples = self._get_examples(**kwargs)
# Format the examples.
example_strings = [
self.example_prompt.format(**example) for example in examples
]
# Create the overall prefix.
if self.prefix is None:
prefix = ""
else:
prefix_kwargs = {
k: v for k, v in kwargs.items() if k in self.prefix.input_variables
}
for k in prefix_kwargs:
kwargs.pop(k)
prefix = self.prefix.format(**prefix_kwargs)
# Create the overall suffix
suffix_kwargs = {
k: v for k, v in kwargs.items() if k in self.suffix.input_variables
}
for k in suffix_kwargs:
kwargs.pop(k)
suffix = self.suffix.format(
**suffix_kwargs,
)
pieces = [prefix, *example_strings, suffix]
template = self.example_separator.join([piece for piece in pieces if piece])
# Format the template with the input variables.
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
async def aformat(self, **kwargs: Any) -> str:
"""Async format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
kwargs = self._merge_partial_and_user_variables(**kwargs)
# Get the examples to use.
examples = await self._aget_examples(**kwargs)
# Format the examples.
example_strings = [
# We can use the sync method here as PromptTemplate doesn't block
self.example_prompt.format(**example)
for example in examples
]
# Create the overall prefix.
if self.prefix is None:
prefix = ""
else:
prefix_kwargs = {
k: v for k, v in kwargs.items() if k in self.prefix.input_variables
}
for k in prefix_kwargs:
kwargs.pop(k)
prefix = await self.prefix.aformat(**prefix_kwargs)
# Create the overall suffix
suffix_kwargs = {
k: v for k, v in kwargs.items() if k in self.suffix.input_variables
}
for k in suffix_kwargs:
kwargs.pop(k)
suffix = await self.suffix.aformat(
**suffix_kwargs,
)
pieces = [prefix, *example_strings, suffix]
template = self.example_separator.join([piece for piece in pieces if piece])
# Format the template with the input variables.
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "few_shot_with_templates"
def save(self, file_path: Path | str) -> None:
"""Save the prompt to a file.
Args:
file_path: The path to save the prompt to.
Raises:
ValueError: If example_selector is provided.
"""
if self.example_selector:
msg = "Saving an example selector is not currently supported"
raise ValueError(msg)
return super().save(file_path)
|
FewShotPromptWithTemplates
|
python
|
charliermarsh__ruff
|
crates/ruff_python_formatter/resources/test/fixtures/ruff/docstring.py
|
{
"start": 1859,
"end": 1942
}
|
class ____:
b""" has leading whitespace"""
first_statement = 1
|
ByteDocstring
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1252429,
"end": 1252677
}
|
class ____(sgqlc.types.Type, Node, AuditEntry, EnterpriseAuditEntryData, OrganizationAuditEntryData):
"""Audit log entry for a org.invite_to_business event."""
__schema__ = github_schema
__field_names__ = ()
|
OrgInviteToBusinessAuditEntry
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1089353,
"end": 1094899
}
|
class ____(sgqlc.types.Type, Node):
"""A check suite."""
__schema__ = github_schema
__field_names__ = (
"app",
"branch",
"check_runs",
"commit",
"conclusion",
"created_at",
"creator",
"database_id",
"matching_pull_requests",
"push",
"repository",
"resource_path",
"status",
"updated_at",
"url",
"workflow_run",
)
app = sgqlc.types.Field(App, graphql_name="app")
"""The GitHub App which created this check suite."""
branch = sgqlc.types.Field("Ref", graphql_name="branch")
"""The name of the branch for this check suite."""
check_runs = sgqlc.types.Field(
CheckRunConnection,
graphql_name="checkRuns",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("filter_by", sgqlc.types.Arg(CheckRunFilter, graphql_name="filterBy", default=None)),
)
),
)
"""The check runs associated with a check suite.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `filter_by` (`CheckRunFilter`): Filters the check runs by this
type.
"""
commit = sgqlc.types.Field(sgqlc.types.non_null("Commit"), graphql_name="commit")
"""The commit for this check suite"""
conclusion = sgqlc.types.Field(CheckConclusionState, graphql_name="conclusion")
"""The conclusion of this check suite."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
creator = sgqlc.types.Field("User", graphql_name="creator")
"""The user who triggered the check suite."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
matching_pull_requests = sgqlc.types.Field(
PullRequestConnection,
graphql_name="matchingPullRequests",
args=sgqlc.types.ArgDict(
(
(
"states",
sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(PullRequestState)), graphql_name="states", default=None),
),
("labels", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="labels", default=None)),
("head_ref_name", sgqlc.types.Arg(String, graphql_name="headRefName", default=None)),
("base_ref_name", sgqlc.types.Arg(String, graphql_name="baseRefName", default=None)),
("order_by", sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of open pull requests matching the check suite.
Arguments:
* `states` (`[PullRequestState!]`): A list of states to filter the
pull requests by.
* `labels` (`[String!]`): A list of label names to filter the pull
requests by.
* `head_ref_name` (`String`): The head ref name to filter the pull
requests by.
* `base_ref_name` (`String`): The base ref name to filter the pull
requests by.
* `order_by` (`IssueOrder`): Ordering options for pull requests
returned from the connection.
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
push = sgqlc.types.Field("Push", graphql_name="push")
"""The push that triggered this check suite."""
repository = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="repository")
"""The repository associated with this check suite."""
resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="resourcePath")
"""The HTTP path for this check suite"""
status = sgqlc.types.Field(sgqlc.types.non_null(CheckStatusState), graphql_name="status")
"""The status of this check suite."""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
"""The HTTP URL for this check suite"""
workflow_run = sgqlc.types.Field("WorkflowRun", graphql_name="workflowRun")
"""The workflow run associated with this check suite."""
|
CheckSuite
|
python
|
readthedocs__readthedocs.org
|
readthedocs/organizations/forms.py
|
{
"start": 4246,
"end": 5664
}
|
class ____(OrganizationForm):
"""
Simple organization creation form.
This trims down the number of inputs required to create a new organization.
This is used on the initial organization signup, to keep signup terse.
:param user: User instance, responsible for ownership of Organization
:type user: django.contrib.auth.models.User
"""
class Meta:
model = Organization
fields = ["name", "slug", "email"]
labels = {
"name": _("Organization Name"),
"email": _("Billing Email"),
}
help_texts = {
"slug": "Used in URLs for your projects when not using a custom domain. It cannot be changed later.",
}
url = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def _create_default_teams(organization):
organization.teams.create(name="Admins", access=ADMIN_ACCESS)
organization.teams.create(name="Read Only", access=READ_ONLY_ACCESS)
def save(self, commit=True):
org = super().save(commit)
# If not committing, we can't save M2M fields
if not commit:
return org
# Add default teams
OrganizationOwner.objects.create(
owner=self.user,
organization=org,
)
self._create_default_teams(org)
return org
|
OrganizationSignupFormBase
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/auth.py
|
{
"start": 6683,
"end": 8293
}
|
class ____(Request):
"""
Edit a users' auth data properties
:param user: User ID
:type user: str
:param role: The new user's role within the company
:type role: str
"""
_service = "auth"
_action = "edit_user"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"role": {
"description": "The new user's role within the company",
"enum": ["admin", "superuser", "user", "annotator"],
"type": ["string", "null"],
},
"user": {"description": "User ID", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(self, user: Optional[str] = None, role: Optional[str] = None, **kwargs: Any) -> None:
super(EditUserRequest, self).__init__(**kwargs)
self.user = user
self.role = role
@schema_property("user")
def user(self) -> Optional[str]:
return self._property_user
@user.setter
def user(self, value: Optional[str]) -> None:
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property("role")
def role(self) -> Optional[str]:
return self._property_role
@role.setter
def role(self, value: Optional[str]) -> None:
if value is None:
self._property_role = None
return
self.assert_isinstance(value, "role", six.string_types)
self._property_role = value
|
EditUserRequest
|
python
|
django__django
|
tests/apps/apps.py
|
{
"start": 412,
"end": 476
}
|
class ____(AppConfig):
name = "there is no such app"
|
NoSuchApp
|
python
|
dask__dask
|
dask/tests/test_expr.py
|
{
"start": 3859,
"end": 4524
}
|
class ____(Expr):
def _layer(self) -> dict:
return {"foo": DataNode("foo", 42)}
def test_prohibit_reuse():
once = FooExpr()
ProhibitReuse._ALLOWED_TYPES.append(FooExpr)
try:
dsk = _ExprSequence(once, ProhibitReuse(once)).optimize().__dask_graph__()
assert len(dsk) == 2
first = dsk.pop("foo")()
key, val = dsk.popitem()
assert key.startswith("foo") and key != "foo"
# We don't want to chain anything but actually _hide_ the task
assert not val.dependencies
# Task is wrapped
assert val() is first
finally:
ProhibitReuse._ALLOWED_TYPES.remove(FooExpr)
|
FooExpr
|
python
|
huggingface__transformers
|
src/transformers/models/ministral/modeling_ministral.py
|
{
"start": 11622,
"end": 12167
}
|
class ____(PreTrainedModel):
config: MinistralConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["MinistralDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": MinistralDecoderLayer,
"attentions": MinistralAttention,
}
|
MinistralPreTrainedModel
|
python
|
huggingface__transformers
|
src/transformers/convert_slow_tokenizer.py
|
{
"start": 14066,
"end": 15178
}
|
class ____(Converter):
def converted(self) -> Tokenizer:
tokenizer_info_str = "#version:"
token_suffix = "</w>"
vocab = self.original_tokenizer.encoder
merges = list(self.original_tokenizer.bpe_ranks.keys())
if tokenizer_info_str in merges[0][0]:
merges = merges[1:]
tokenizer = Tokenizer(
BPE(
vocab,
merges,
dropout=None,
unk_token=self.original_tokenizer.unk_token,
end_of_word_suffix=token_suffix,
)
)
tokenizer.normalizer = normalizers.BertNormalizer(lowercase=False, strip_accents=False)
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
tokenizer.decoder = decoders.BPEDecoder(suffix=token_suffix)
tokenizer.post_processor = processors.BertProcessing(
sep=(self.original_tokenizer.sep_token, self.original_tokenizer.sep_token_id),
cls=(self.original_tokenizer.cls_token, self.original_tokenizer.cls_token_id),
)
return tokenizer
|
HerbertConverter
|
python
|
dagster-io__dagster
|
examples/docs_projects/project_mini/src/project_mini/defs/resource_caching/expensive_resource_cache.py
|
{
"start": 121,
"end": 1045
}
|
class ____(dg.ConfigurableResource):
@lru_cache(maxsize=128)
def addition(self, num1: int, num2: int) -> int:
time.sleep(5)
return num1 + num2
# highlight-end
@dg.asset
def expensive_asset_cache(
expensive_resource_cache: ExpensiveResourceCache,
) -> dg.MaterializeResult:
value = expensive_resource_cache.addition(1, 2)
value = expensive_resource_cache.addition(1, 2)
value = expensive_resource_cache.addition(1, 2)
return dg.MaterializeResult(metadata={"addition": value})
@dg.asset(
deps=[expensive_asset_cache],
)
def another_expensive_asset_cache(
expensive_resource_cache: ExpensiveResourceCache,
) -> dg.MaterializeResult:
value = expensive_resource_cache.addition(1, 2)
value = expensive_resource_cache.addition(1, 2)
value = expensive_resource_cache.addition(1, 2)
return dg.MaterializeResult(metadata={"addition": value})
|
ExpensiveResourceCache
|
python
|
pennersr__django-allauth
|
allauth/account/adapter.py
|
{
"start": 1703,
"end": 36568
}
|
class ____(BaseAdapter):
"""The adapter class allows you to override various functionality of the
``allauth.account`` app. To do so, point ``settings.ACCOUNT_ADAPTER`` to
your own class that derives from ``DefaultAccountAdapter`` and override the
behavior by altering the implementation of the methods according to your own
needs.
"""
error_messages = {
"account_inactive": _("This account is currently inactive."),
"cannot_remove_primary_email": _(
"You cannot remove your primary email address."
),
"duplicate_email": _(
"This email address is already associated with this account."
),
"email_password_mismatch": _(
"The email address and/or password you specified are not correct."
),
"phone_password_mismatch": _(
"The phone number and/or password you specified are not correct."
),
"email_taken": _("A user is already registered with this email address."),
"enter_current_password": _("Please type your current password."),
"incorrect_code": _("Incorrect code."),
"incorrect_password": _("Incorrect password."),
"invalid_or_expired_key": _("Invalid or expired key."),
"invalid_login": _("Invalid login."),
"invalid_password_reset": _("The password reset token was invalid."),
"max_email_addresses": _("You cannot add more than %d email addresses."),
"phone_taken": _("A user is already registered with this phone number."),
"too_many_login_attempts": _(
"Too many failed login attempts. Try again later."
),
"unknown_email": _("The email address is not assigned to any user account."),
"unknown_phone": _("The phone number is not assigned to any user account."),
"unverified_primary_email": _("Your primary email address must be verified."),
"username_blacklisted": _(
"Username can not be used. Please use other username."
),
"username_password_mismatch": _(
"The username and/or password you specified are not correct."
),
"username_taken": AbstractUser._meta.get_field("username").error_messages[
"unique"
],
"select_only_one": _("Please select only one."),
"same_as_current": _("The new value must be different from the current one."),
"rate_limited": _("Be patient, you are sending too many requests."),
}
def stash_verified_email(self, request, email):
request.session["account_verified_email"] = email
def unstash_verified_email(self, request):
ret = request.session.get("account_verified_email")
request.session["account_verified_email"] = None
return ret
def is_email_verified(self, request, email):
"""
Checks whether or not the email address is already verified
beyond allauth scope, for example, by having accepted an
invitation before signing up.
"""
ret = False
verified_email = request.session.get("account_verified_email")
if verified_email:
ret = verified_email.lower() == email.lower()
return ret
def can_delete_email(self, email_address) -> bool:
"""
Returns whether or not the given email address can be deleted.
"""
from allauth.account.models import EmailAddress
if not email_address.pk:
return True
has_other = (
EmailAddress.objects.filter(user_id=email_address.user_id)
.exclude(pk=email_address.pk)
.exists()
)
login_by_email = app_settings.LOGIN_METHODS == {app_settings.LoginMethod.EMAIL}
if email_address.primary:
if has_other:
# Don't allow, let the user mark one of the others as primary
# first.
return False
elif login_by_email:
# Last email & login is by email, prevent dangling account.
return False
return True
elif has_other:
# Account won't be dangling.
return True
elif login_by_email:
# This is the last email.
return False
else:
return True
def format_email_subject(self, subject) -> str:
"""
Formats the given email subject.
"""
prefix = app_settings.EMAIL_SUBJECT_PREFIX
if prefix is None:
site = get_current_site(context.request)
prefix = "[{name}] ".format(name=site.name)
return prefix + force_str(subject)
def get_from_email(self):
"""
This is a hook that can be overridden to programmatically
set the 'from' email address for sending emails
"""
return settings.DEFAULT_FROM_EMAIL
def render_mail(self, template_prefix, email, context, headers=None):
"""
Renders an email to `email`. `template_prefix` identifies the
email that is to be sent, e.g. "account/email/email_confirmation"
"""
to = [email] if isinstance(email, str) else email
subject = render_to_string("{0}_subject.txt".format(template_prefix), context)
# remove superfluous line breaks
subject = " ".join(subject.splitlines()).strip()
subject = self.format_email_subject(subject)
from_email = self.get_from_email()
bodies = {}
html_ext = app_settings.TEMPLATE_EXTENSION
for ext in [html_ext, "txt"]:
try:
template_name = "{0}_message.{1}".format(template_prefix, ext)
bodies[ext] = render_to_string(
template_name,
context,
globals()["context"].request,
).strip()
except TemplateDoesNotExist:
if ext == "txt" and not bodies:
# We need at least one body
raise
if "txt" in bodies:
msg = EmailMultiAlternatives(
subject, bodies["txt"], from_email, to, headers=headers
)
if html_ext in bodies:
msg.attach_alternative(bodies[html_ext], "text/html")
else:
msg = EmailMessage(
subject, bodies[html_ext], from_email, to, headers=headers
)
msg.content_subtype = "html" # Main content is now text/html
return msg
def send_mail(self, template_prefix: str, email: str, context: dict) -> None:
request = globals()["context"].request
ctx = {
"request": request,
"email": email,
"current_site": get_current_site(request),
}
ctx.update(context)
msg = self.render_mail(template_prefix, email, ctx)
msg.send()
def get_signup_redirect_url(self, request):
"""
Returns the default URL to redirect to directly after signing up.
"""
return resolve_url(app_settings.SIGNUP_REDIRECT_URL)
def get_login_redirect_url(self, request):
"""
Returns the default URL to redirect to after logging in. Note
that URLs passed explicitly (e.g. by passing along a `next`
GET parameter) take precedence over the value returned here.
"""
assert request.user.is_authenticated # nosec
url = getattr(settings, "LOGIN_REDIRECT_URLNAME", None)
if url:
warnings.warn(
"LOGIN_REDIRECT_URLNAME is deprecated, simply"
" use LOGIN_REDIRECT_URL with a URL name",
DeprecationWarning,
)
else:
url = settings.LOGIN_REDIRECT_URL
return resolve_url(url)
def get_logout_redirect_url(self, request):
"""
Returns the URL to redirect to after the user logs out. Note that
this method is also invoked if you attempt to log out while no users
is logged in. Therefore, request.user is not guaranteed to be an
authenticated user.
"""
return resolve_url(app_settings.LOGOUT_REDIRECT_URL)
def get_email_verification_redirect_url(self, email_address):
"""
The URL to return to after email verification.
"""
get_url = getattr(self, "get_email_confirmation_redirect_url", None)
if get_url:
# Deprecated.
return get_url(self.request)
if self.request.user.is_authenticated:
if app_settings.EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL:
return app_settings.EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL
else:
return self.get_login_redirect_url(self.request)
else:
return app_settings.EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL
def get_password_change_redirect_url(self, request):
"""
The URL to redirect to after a successful password change/set.
NOTE: Not called during the password reset flow.
"""
return reverse("account_change_password")
def is_open_for_signup(self, request):
"""
Checks whether or not the site is open for signups.
Next to simply returning True/False you can also intervene the
regular flow by raising an ImmediateHttpResponse
"""
return True
def new_user(self, request):
"""
Instantiates a new User instance.
"""
user = get_user_model()()
return user
def populate_username(self, request, user):
"""
Fills in a valid username, if required and missing. If the
username is already present it is assumed to be valid
(unique).
"""
from .utils import user_email, user_field, user_username
first_name = user_field(user, "first_name")
last_name = user_field(user, "last_name")
email = user_email(user)
username = user_username(user)
if app_settings.USER_MODEL_USERNAME_FIELD:
user_username(
user,
username
or self.generate_unique_username(
[first_name, last_name, email, username, "user"]
),
)
def generate_unique_username(self, txts, regex=None):
return generate_unique_username(txts, regex)
def save_user(self, request, user, form, commit=True):
"""
Saves a new `User` instance using information provided in the
signup form.
"""
from .utils import user_email, user_field, user_username
data = form.cleaned_data
first_name = data.get("first_name")
last_name = data.get("last_name")
email = data.get("email")
username = data.get("username")
user_email(user, email)
user_username(user, username)
if first_name:
user_field(user, "first_name", first_name)
if last_name:
user_field(user, "last_name", last_name)
if "password1" in data:
password = data["password1"]
elif "password" in data:
password = data["password"]
else:
password = None
if password:
user.set_password(password)
else:
user.set_unusable_password()
self.populate_username(request, user)
if commit:
user.save()
if getattr(form, "_has_phone_field", False):
phone = form.cleaned_data.get("phone")
if phone:
self.set_phone(user, phone, False)
return user
def clean_username(self, username, shallow=False):
"""
Validates the username. You can hook into this if you want to
(dynamically) restrict what usernames can be chosen.
"""
for validator in app_settings.USERNAME_VALIDATORS:
validator(username)
# TODO: Add regexp support to USERNAME_BLACKLIST
username_blacklist_lower = [
ub.lower() for ub in app_settings.USERNAME_BLACKLIST
]
if username.lower() in username_blacklist_lower:
raise self.validation_error("username_blacklisted")
# Skipping database lookups when shallow is True, needed for unique
# username generation.
if not shallow:
from .utils import filter_users_by_username
if filter_users_by_username(username).exists():
raise self.validation_error("username_taken")
return username
def clean_email(self, email: str) -> str:
"""
Validates an email value. You can hook into this if you want to
(dynamically) restrict what email addresses can be chosen.
"""
return email
def clean_password(self, password, user=None):
"""
Validates a password. You can hook into this if you want to
restric the allowed password choices.
"""
min_length = app_settings.PASSWORD_MIN_LENGTH
if min_length:
MinimumLengthValidator(min_length).validate(password)
validate_password(password, user)
return password
def clean_phone(self, phone: str) -> str:
"""
Validates a phone number. You can hook into this if you want to
(dynamically) restrict what phone numbers can be chosen.
"""
return phone
def validate_unique_email(self, email):
return email
def add_message(
self,
request,
level,
message_template=None,
message_context=None,
extra_tags="",
message=None,
):
"""
Wrapper of `django.contrib.messages.add_message`, that reads
the message text from a template.
"""
if is_headless_request(request):
return
if "django.contrib.messages" in settings.INSTALLED_APPS:
if message:
messages.add_message(request, level, message, extra_tags=extra_tags)
return
try:
if message_context is None:
message_context = {}
escaped_message = render_to_string(
message_template,
message_context,
context.request,
).strip()
if escaped_message:
message = html.unescape(escaped_message)
messages.add_message(request, level, message, extra_tags=extra_tags)
except TemplateDoesNotExist:
pass
def ajax_response(self, request, response, redirect_to=None, form=None, data=None):
resp = {}
status = response.status_code
if redirect_to:
status = HTTPStatus.OK
resp["location"] = redirect_to
if form:
if request.method == "POST":
if form.is_valid():
status = HTTPStatus.OK
else:
status = HTTPStatus.BAD_REQUEST
else:
status = HTTPStatus.OK
resp["form"] = self.ajax_response_form(form)
if hasattr(response, "render"):
response.render()
resp["html"] = response.content.decode("utf8")
if data is not None:
resp["data"] = data
return HttpResponse(
json.dumps(resp), status=status, content_type="application/json"
)
def ajax_response_form(self, form):
form_spec = {
"fields": {},
"field_order": [],
"errors": form.non_field_errors(),
}
for field in form:
field_spec = {
"label": force_str(field.label),
"value": field.value(),
"help_text": force_str(field.help_text),
"errors": [force_str(e) for e in field.errors],
"widget": {
"attrs": {
k: force_str(v) for k, v in field.field.widget.attrs.items()
}
},
}
form_spec["fields"][field.html_name] = field_spec
form_spec["field_order"].append(field.html_name)
return form_spec
def pre_login(
self,
request,
user,
*,
email_verification,
signal_kwargs,
email,
signup,
redirect_url,
):
if not user.is_active:
return self.respond_user_inactive(request, user)
def post_login(
self,
request,
user,
*,
email_verification,
signal_kwargs,
email,
signup,
redirect_url,
):
from .utils import get_login_redirect_url
if is_headless_request(request):
from allauth.headless.base.response import AuthenticationResponse
response = AuthenticationResponse(request)
else:
response = HttpResponseRedirect(
get_login_redirect_url(request, redirect_url, signup=signup)
)
if signal_kwargs is None:
signal_kwargs = {}
signals.user_logged_in.send(
sender=user.__class__,
request=request,
response=response,
user=user,
**signal_kwargs,
)
self.add_message(
request,
messages.SUCCESS,
"account/messages/logged_in.txt",
{"user": user},
)
return response
def login(self, request, user):
# HACK: This is not nice. The proper Django way is to use an
# authentication backend
if not hasattr(user, "backend"):
from .auth_backends import AuthenticationBackend
backends = get_backends()
backend = None
for b in backends:
if isinstance(b, AuthenticationBackend):
# prefer our own backend
backend = b
break
elif not backend and hasattr(b, "get_user"):
# Pick the first valid one
backend = b
backend_path = ".".join([backend.__module__, backend.__class__.__name__])
user.backend = backend_path
django_login(request, user)
def logout(self, request):
django_logout(request)
def confirm_email(self, request, email_address):
"""
Marks the email address as confirmed on the db
"""
from allauth.account.internal.flows import email_verification
return email_verification.verify_email(request, email_address)
def set_password(self, user, password) -> None:
"""
Sets the password for the user.
"""
user.set_password(password)
user.save()
def get_user_search_fields(self):
ret = []
User = get_user_model()
candidates = [
app_settings.USER_MODEL_USERNAME_FIELD,
"first_name",
"last_name",
"email",
]
for candidate in candidates:
try:
User._meta.get_field(candidate)
ret.append(candidate)
except FieldDoesNotExist:
pass
return ret
def is_safe_url(self, url):
from django.utils.http import url_has_allowed_host_and_scheme
# get_host already validates the given host, so no need to check it again
allowed_hosts = {context.request.get_host()} | set(settings.ALLOWED_HOSTS)
# Include hosts derived from CSRF_TRUSTED_ORIGINS
trusted_hosts = {
urlparse(origin).netloc for origin in settings.CSRF_TRUSTED_ORIGINS
}
allowed_hosts.update(trusted_hosts)
# ALLOWED_HOSTS supports wildcards, and subdomains using a '.' prefix.
# But, `url_has_allowed_host_and_scheme()` doesn't support that. So,
# let's check the domain using the ALLOWED_HOSTS logic, and if valid,
# add it as allowed so that we can then call
# `url_has_allowed_host_and_scheme()`.
parsed_host = urlparse(url).netloc
if parsed_host:
if validate_host(parsed_host, allowed_hosts):
allowed_hosts.add(parsed_host)
return url_has_allowed_host_and_scheme(url, allowed_hosts=allowed_hosts)
def send_password_reset_mail(self, user, email, context):
"""
Method intended to be overridden in case you need to customize the logic
used to determine whether a user is permitted to request a password reset.
For example, if you are enforcing something like "social only" authentication
in your app, you may want to intervene here by checking `user.has_usable_password`
"""
return self.send_mail("account/email/password_reset_key", email, context)
def get_reset_password_from_key_url(self, key):
"""
Method intended to be overridden in case the password reset email
needs to be adjusted.
"""
from allauth.account.internal import flows
return flows.password_reset.get_reset_password_from_key_url(self.request, key)
def get_email_confirmation_url(self, request, emailconfirmation):
"""Constructs the email confirmation (activation) url.
Note that if you have architected your system such that email
confirmations are sent outside of the request context `request`
can be `None` here.
"""
from allauth.account.internal import flows
return flows.email_verification.get_email_verification_url(
request, emailconfirmation
)
def should_send_confirmation_mail(self, request, email_address, signup) -> bool:
return True
def send_account_already_exists_mail(self, email: str) -> None:
from allauth.account.internal import flows
signup_url = flows.signup.get_signup_url(context.request)
password_reset_url = flows.password_reset.get_reset_password_url(
context.request
)
ctx = {
"signup_url": signup_url,
"password_reset_url": password_reset_url,
}
self.send_mail("account/email/account_already_exists", email, ctx)
def send_confirmation_mail(self, request, emailconfirmation, signup):
ctx = {
"user": emailconfirmation.email_address.user,
}
if app_settings.EMAIL_VERIFICATION_BY_CODE_ENABLED:
ctx.update({"code": emailconfirmation.key})
else:
ctx.update(
{
"key": emailconfirmation.key,
"activate_url": self.get_email_confirmation_url(
request, emailconfirmation
),
}
)
if signup:
email_template = "account/email/email_confirmation_signup"
else:
email_template = "account/email/email_confirmation"
self.send_mail(email_template, emailconfirmation.email_address.email, ctx)
def respond_user_inactive(self, request, user):
return headed_redirect_response("account_inactive")
def respond_email_verification_sent(self, request, user):
return headed_redirect_response("account_email_verification_sent")
def _get_login_attempts_cache_key(self, request, **credentials):
site = get_current_site(request)
login = credentials.get("email", credentials.get("username", "")).lower()
return "{site}:{login}".format(site=site.domain, login=login)
def _delete_login_attempts_cached_email(self, request, **credentials):
cache_key = self._get_login_attempts_cache_key(request, **credentials)
# Here, we wipe the login failed rate limit, completely. This is safe,
# as we only do this on a succesful password reset, which is rate limited
# on itself (e.g. sending of email etc.).
ratelimit.clear(
request,
config=app_settings.RATE_LIMITS,
action="login_failed",
key=cache_key,
)
def _rollback_login_failed_rl_usage(self) -> None:
usage = getattr(self, "_login_failed_rl_usage", None)
if usage:
usage.rollback()
def pre_authenticate(self, request, **credentials):
cache_key = self._get_login_attempts_cache_key(request, **credentials)
self._login_failed_rl_usage = ratelimit.consume(
request,
config=app_settings.RATE_LIMITS,
action="login_failed",
key=cache_key,
)
if not self._login_failed_rl_usage:
raise self.validation_error("too_many_login_attempts")
def authenticate(self, request, **credentials):
"""Only authenticates, does not actually login. See `login`"""
from allauth.account.auth_backends import AuthenticationBackend
self.pre_authenticate(request, **credentials)
AuthenticationBackend.unstash_authenticated_user()
user = authenticate(request, **credentials)
alt_user = AuthenticationBackend.unstash_authenticated_user()
user = user or alt_user
if user:
# On a succesful login, we cannot just wipe the login failed rate
# limit. That consists of 2 parts, a per IP limit, and, a per
# key(email) limit. Wiping it completely would allow an attacker to
# insert periodic successful logins during a brute force
# process. So instead, we are rolling back our consumption.
self._rollback_login_failed_rl_usage()
else:
self.authentication_failed(request, **credentials)
return user
def authentication_failed(self, request, **credentials):
pass
def reauthenticate(self, user, password):
from allauth.account.models import EmailAddress
from allauth.account.utils import user_username
credentials = {"password": password}
username = user_username(user)
if username:
credentials["username"] = username
email = EmailAddress.objects.get_primary_email(user)
if email:
credentials["email"] = email
if app_settings.LoginMethod.PHONE in app_settings.LOGIN_METHODS:
phone_verified = self.get_phone(user)
if phone_verified:
credentials["phone"] = phone_verified[0]
reauth_user = self.authenticate(context.request, **credentials)
return reauth_user is not None and reauth_user.pk == user.pk
def is_ajax(self, request):
return any(
[
request.META.get("HTTP_X_REQUESTED_WITH") == "XMLHttpRequest",
request.content_type == "application/json",
request.META.get("HTTP_ACCEPT") == "application/json",
]
)
def get_client_ip(self, request) -> str:
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip_value = x_forwarded_for.split(",")[0]
else:
ip_value = request.META["REMOTE_ADDR"]
# Try to parse the value as an IP address to make sure it's a valid one.
try:
ip_addr = ipaddress.ip_address(ip_value)
except ValueError:
raise PermissionDenied(f"Invalid IP address: {ip_value!r}")
else:
return str(ip_addr)
def get_http_user_agent(self, request: HttpRequest) -> str:
return request.META.get("HTTP_USER_AGENT", "Unspecified")
def generate_emailconfirmation_key(self, email):
key = get_random_string(64).lower()
return key
def get_login_stages(self):
ret = []
ret.append("allauth.account.stages.LoginByCodeStage")
ret.append("allauth.account.stages.PhoneVerificationStage")
ret.append("allauth.account.stages.EmailVerificationStage")
if allauth_app_settings.MFA_ENABLED:
from allauth.mfa import app_settings as mfa_settings
ret.append("allauth.mfa.stages.AuthenticateStage")
if mfa_settings._TRUST_STAGE_ENABLED:
ret.append("allauth.mfa.stages.TrustStage")
if mfa_settings.PASSKEY_SIGNUP_ENABLED:
ret.append("allauth.mfa.webauthn.stages.PasskeySignupStage")
return ret
def get_reauthentication_methods(self, user):
"""The order of the methods returned matters. The first method is the
default when using the `@reauthentication_required` decorator.
"""
from allauth.account.internal.flows.reauthentication import (
get_reauthentication_flows,
)
flow_by_id = {f["id"]: f for f in get_reauthentication_flows(user)}
ret = []
if "reauthenticate" in flow_by_id:
entry = {
"id": "reauthenticate",
"description": _("Use your password"),
"url": reverse("account_reauthenticate"),
}
ret.append(entry)
if "mfa_reauthenticate" in flow_by_id:
types = flow_by_id["mfa_reauthenticate"]["types"]
if "recovery_codes" in types or "totp" in types:
entry = {
"id": "mfa_reauthenticate",
"description": _("Use authenticator app or code"),
"url": reverse("mfa_reauthenticate"),
}
ret.append(entry)
if "webauthn" in types:
entry = {
"id": "mfa_reauthenticate:webauthn",
"description": _("Use a security key"),
"url": reverse("mfa_reauthenticate_webauthn"),
}
ret.append(entry)
return ret
def send_notification_mail(self, template_prefix, user, context=None, email=None):
from allauth.account.models import EmailAddress
if not app_settings.EMAIL_NOTIFICATIONS:
return
if not email:
email = EmailAddress.objects.get_primary_email(user)
if not email:
return
ctx = {
"timestamp": timezone.now(),
"ip": self.get_client_ip(self.request),
"user_agent": self.get_http_user_agent(self.request)[
:HTTP_USER_AGENT_MAX_LENGTH
],
}
if context:
ctx.update(context)
self.send_mail(template_prefix, email, ctx)
def generate_login_code(self) -> str:
"""
Generates a new login code.
"""
return generate_user_code()
def generate_password_reset_code(self) -> str:
"""
Generates a new password reset code.
"""
return generate_user_code(length=8)
def generate_email_verification_code(self) -> str:
"""
Generates a new email verification code.
"""
return generate_user_code()
def generate_phone_verification_code(self, *, user, phone: str) -> str:
"""
Generates a new phone verification code.
"""
return generate_user_code()
def _generate_phone_verification_code_compat(self, *, user, phone: str) -> str:
sig = inspect.signature(self.generate_phone_verification_code)
if len(sig.parameters) == 0:
warnings.warn(
"generate_phone_verification_code(self) is deprecated, use generate_phone_verification_code(self, *, user, phone)",
DeprecationWarning,
)
return self.generate_phone_verification_code() # type: ignore[call-arg]
return self.generate_phone_verification_code(user=user, phone=phone)
def is_login_by_code_required(self, login) -> bool:
"""
Returns whether or not login-by-code is required for the given
login.
"""
from allauth.account import authentication
method = None
records = authentication.get_authentication_records(self.request)
if records:
method = records[-1]["method"]
if method == "code":
return False
value = app_settings.LOGIN_BY_CODE_REQUIRED
if isinstance(value, bool):
return value
if not value:
return False
return method is None or method in value
def phone_form_field(self, **kwargs):
"""
Returns a form field used to input phone numbers.
"""
from allauth.account.fields import PhoneField
return PhoneField(**kwargs)
def send_unknown_account_sms(self, phone: str, **kwargs) -> None:
"""
In case enumeration prevention is enabled, and, a verification code
is requested for an unlisted phone number, this method is invoked to
send a text explaining that no account is on file.
"""
pass
def send_account_already_exists_sms(self, phone: str) -> None:
pass
def send_verification_code_sms(self, user, phone: str, code: str, **kwargs):
"""
Sends a verification code.
"""
raise NotImplementedError
@property
def _has_phone_impl(self) -> bool:
"""
Checks whether the phone number adapter is fully implemented.
"""
methods = (
"send_verification_code_sms",
"set_phone",
"get_phone",
"set_phone_verified",
"get_user_by_phone",
)
return all(
getattr(self.__class__, method) != getattr(DefaultAccountAdapter, method)
for method in methods
)
def set_phone(self, user, phone: str, verified: bool):
"""
Sets the phone number (and verified status) for the given user.
"""
raise NotImplementedError
def get_phone(self, user) -> typing.Optional[typing.Tuple[str, bool]]:
"""
Returns the phone number stored for the given user. A tuple of the
phone number itself, and whether or not the phone number was verified is
returned.
"""
raise NotImplementedError
def set_phone_verified(self, user, phone: str):
"""
Marks the specified phone number for the given user as
verified. Note that the user is already expected to have
the phone number attached to the account.
"""
raise NotImplementedError
def get_user_by_phone(self, phone: str):
"""
Looks up a user given the specified phone number. Returns ``None`` if no user
was found.
"""
raise NotImplementedError
def get_adapter(request=None) -> DefaultAccountAdapter:
return import_attribute(app_settings.ADAPTER)(request)
|
DefaultAccountAdapter
|
python
|
python-pillow__Pillow
|
src/PIL/TiffImagePlugin.py
|
{
"start": 74172,
"end": 85002
}
|
class ____(io.BytesIO):
fieldSizes = [
0, # None
1, # byte
1, # ascii
2, # short
4, # long
8, # rational
1, # sbyte
1, # undefined
2, # sshort
4, # slong
8, # srational
4, # float
8, # double
4, # ifd
2, # unicode
4, # complex
8, # long8
]
Tags = {
273, # StripOffsets
288, # FreeOffsets
324, # TileOffsets
519, # JPEGQTables
520, # JPEGDCTables
521, # JPEGACTables
}
def __init__(self, fn: StrOrBytesPath | IO[bytes], new: bool = False) -> None:
self.f: IO[bytes]
if is_path(fn):
self.name = fn
self.close_fp = True
try:
self.f = open(fn, "w+b" if new else "r+b")
except OSError:
self.f = open(fn, "w+b")
else:
self.f = cast(IO[bytes], fn)
self.close_fp = False
self.beginning = self.f.tell()
self.setup()
def setup(self) -> None:
# Reset everything.
self.f.seek(self.beginning, os.SEEK_SET)
self.whereToWriteNewIFDOffset: int | None = None
self.offsetOfNewPage = 0
self.IIMM = iimm = self.f.read(4)
self._bigtiff = b"\x2b" in iimm
if not iimm:
# empty file - first page
self.isFirst = True
return
self.isFirst = False
if iimm not in PREFIXES:
msg = "Invalid TIFF file header"
raise RuntimeError(msg)
self.setEndian("<" if iimm.startswith(II) else ">")
if self._bigtiff:
self.f.seek(4, os.SEEK_CUR)
self.skipIFDs()
self.goToEnd()
def finalize(self) -> None:
if self.isFirst:
return
# fix offsets
self.f.seek(self.offsetOfNewPage)
iimm = self.f.read(4)
if not iimm:
# Make it easy to finish a frame without committing to a new one.
return
if iimm != self.IIMM:
msg = "IIMM of new page doesn't match IIMM of first page"
raise RuntimeError(msg)
if self._bigtiff:
self.f.seek(4, os.SEEK_CUR)
ifd_offset = self._read(8 if self._bigtiff else 4)
ifd_offset += self.offsetOfNewPage
assert self.whereToWriteNewIFDOffset is not None
self.f.seek(self.whereToWriteNewIFDOffset)
self._write(ifd_offset, 8 if self._bigtiff else 4)
self.f.seek(ifd_offset)
self.fixIFD()
def newFrame(self) -> None:
# Call this to finish a frame.
self.finalize()
self.setup()
def __enter__(self) -> AppendingTiffWriter:
return self
def __exit__(self, *args: object) -> None:
if self.close_fp:
self.close()
def tell(self) -> int:
return self.f.tell() - self.offsetOfNewPage
def seek(self, offset: int, whence: int = io.SEEK_SET) -> int:
"""
:param offset: Distance to seek.
:param whence: Whether the distance is relative to the start,
end or current position.
:returns: The resulting position, relative to the start.
"""
if whence == os.SEEK_SET:
offset += self.offsetOfNewPage
self.f.seek(offset, whence)
return self.tell()
def goToEnd(self) -> None:
self.f.seek(0, os.SEEK_END)
pos = self.f.tell()
# pad to 16 byte boundary
pad_bytes = 16 - pos % 16
if 0 < pad_bytes < 16:
self.f.write(bytes(pad_bytes))
self.offsetOfNewPage = self.f.tell()
def setEndian(self, endian: str) -> None:
self.endian = endian
self.longFmt = f"{self.endian}L"
self.shortFmt = f"{self.endian}H"
self.tagFormat = f"{self.endian}HH" + ("Q" if self._bigtiff else "L")
def skipIFDs(self) -> None:
while True:
ifd_offset = self._read(8 if self._bigtiff else 4)
if ifd_offset == 0:
self.whereToWriteNewIFDOffset = self.f.tell() - (
8 if self._bigtiff else 4
)
break
self.f.seek(ifd_offset)
num_tags = self._read(8 if self._bigtiff else 2)
self.f.seek(num_tags * (20 if self._bigtiff else 12), os.SEEK_CUR)
def write(self, data: Buffer, /) -> int:
return self.f.write(data)
def _fmt(self, field_size: int) -> str:
try:
return {2: "H", 4: "L", 8: "Q"}[field_size]
except KeyError:
msg = "offset is not supported"
raise RuntimeError(msg)
def _read(self, field_size: int) -> int:
(value,) = struct.unpack(
self.endian + self._fmt(field_size), self.f.read(field_size)
)
return value
def readShort(self) -> int:
return self._read(2)
def readLong(self) -> int:
return self._read(4)
@staticmethod
def _verify_bytes_written(bytes_written: int | None, expected: int) -> None:
if bytes_written is not None and bytes_written != expected:
msg = f"wrote only {bytes_written} bytes but wanted {expected}"
raise RuntimeError(msg)
def _rewriteLast(
self, value: int, field_size: int, new_field_size: int = 0
) -> None:
self.f.seek(-field_size, os.SEEK_CUR)
if not new_field_size:
new_field_size = field_size
bytes_written = self.f.write(
struct.pack(self.endian + self._fmt(new_field_size), value)
)
self._verify_bytes_written(bytes_written, new_field_size)
def rewriteLastShortToLong(self, value: int) -> None:
self._rewriteLast(value, 2, 4)
def rewriteLastShort(self, value: int) -> None:
return self._rewriteLast(value, 2)
def rewriteLastLong(self, value: int) -> None:
return self._rewriteLast(value, 4)
def _write(self, value: int, field_size: int) -> None:
bytes_written = self.f.write(
struct.pack(self.endian + self._fmt(field_size), value)
)
self._verify_bytes_written(bytes_written, field_size)
def writeShort(self, value: int) -> None:
self._write(value, 2)
def writeLong(self, value: int) -> None:
self._write(value, 4)
def close(self) -> None:
self.finalize()
if self.close_fp:
self.f.close()
def fixIFD(self) -> None:
num_tags = self._read(8 if self._bigtiff else 2)
for i in range(num_tags):
tag, field_type, count = struct.unpack(
self.tagFormat, self.f.read(12 if self._bigtiff else 8)
)
field_size = self.fieldSizes[field_type]
total_size = field_size * count
fmt_size = 8 if self._bigtiff else 4
is_local = total_size <= fmt_size
if not is_local:
offset = self._read(fmt_size) + self.offsetOfNewPage
self._rewriteLast(offset, fmt_size)
if tag in self.Tags:
cur_pos = self.f.tell()
logger.debug(
"fixIFD: %s (%d) - type: %s (%d) - type size: %d - count: %d",
TiffTags.lookup(tag).name,
tag,
TYPES.get(field_type, "unknown"),
field_type,
field_size,
count,
)
if is_local:
self._fixOffsets(count, field_size)
self.f.seek(cur_pos + fmt_size)
else:
self.f.seek(offset)
self._fixOffsets(count, field_size)
self.f.seek(cur_pos)
elif is_local:
# skip the locally stored value that is not an offset
self.f.seek(fmt_size, os.SEEK_CUR)
def _fixOffsets(self, count: int, field_size: int) -> None:
for i in range(count):
offset = self._read(field_size)
offset += self.offsetOfNewPage
new_field_size = 0
if self._bigtiff and field_size in (2, 4) and offset >= 2**32:
# offset is now too large - we must convert long to long8
new_field_size = 8
elif field_size == 2 and offset >= 2**16:
# offset is now too large - we must convert short to long
new_field_size = 4
if new_field_size:
if count != 1:
msg = "not implemented"
raise RuntimeError(msg) # XXX TODO
# simple case - the offset is just one and therefore it is
# local (not referenced with another offset)
self._rewriteLast(offset, field_size, new_field_size)
# Move back past the new offset, past 'count', and before 'field_type'
rewind = -new_field_size - 4 - 2
self.f.seek(rewind, os.SEEK_CUR)
self.writeShort(new_field_size) # rewrite the type
self.f.seek(2 - rewind, os.SEEK_CUR)
else:
self._rewriteLast(offset, field_size)
def fixOffsets(
self, count: int, isShort: bool = False, isLong: bool = False
) -> None:
if isShort:
field_size = 2
elif isLong:
field_size = 4
else:
field_size = 0
return self._fixOffsets(count, field_size)
def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
append_images = list(im.encoderinfo.get("append_images", []))
if not hasattr(im, "n_frames") and not append_images:
return _save(im, fp, filename)
cur_idx = im.tell()
try:
with AppendingTiffWriter(fp) as tf:
for ims in [im] + append_images:
encoderinfo = ims._attach_default_encoderinfo(im)
if not hasattr(ims, "encoderconfig"):
ims.encoderconfig = ()
nfr = getattr(ims, "n_frames", 1)
for idx in range(nfr):
ims.seek(idx)
ims.load()
_save(ims, tf, filename)
tf.newFrame()
ims.encoderinfo = encoderinfo
finally:
im.seek(cur_idx)
#
# --------------------------------------------------------------------
# Register
Image.register_open(TiffImageFile.format, TiffImageFile, _accept)
Image.register_save(TiffImageFile.format, _save)
Image.register_save_all(TiffImageFile.format, _save_all)
Image.register_extensions(TiffImageFile.format, [".tif", ".tiff"])
Image.register_mime(TiffImageFile.format, "image/tiff")
|
AppendingTiffWriter
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/hooks/vertex_ai/test_experiment_service.py
|
{
"start": 6948,
"end": 8768
}
|
class ____:
def setup_method(self):
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_no_default_project_id
):
self.hook = ExperimentRunHook(gcp_conn_id=TEST_GCP_CONN_ID)
@mock.patch(EXPERIMENT_SERVICE_STRING.format("aiplatform.ExperimentRun"))
def test_create_experiment_run(self, mock_experiment_run) -> None:
self.hook.create_experiment_run(
project_id=TEST_PROJECT_ID,
location=TEST_REGION,
experiment_name=TEST_EXPERIMENT_NAME,
experiment_run_name=TEST_EXPERIMENT_RUN_NAME,
experiment_run_tensorboard=TEST_TENSORBOARD,
)
mock_experiment_run.create.assert_called_with(
project=TEST_PROJECT_ID,
location=TEST_REGION,
experiment=TEST_EXPERIMENT_NAME,
run_name=TEST_EXPERIMENT_RUN_NAME,
state=aiplatform.gapic.Execution.State.NEW,
tensorboard=TEST_TENSORBOARD,
)
@mock.patch(EXPERIMENT_SERVICE_STRING.format("aiplatform.ExperimentRun"))
def test_delete_experiment_run(self, mock_experiment_run) -> None:
self.hook.delete_experiment_run(
project_id=TEST_PROJECT_ID,
location=TEST_REGION,
experiment_name=TEST_EXPERIMENT_NAME,
experiment_run_name=TEST_EXPERIMENT_RUN_NAME,
)
mock_experiment_run.assert_called_with(
project=TEST_PROJECT_ID,
location=TEST_REGION,
experiment=TEST_EXPERIMENT_NAME,
run_name=TEST_EXPERIMENT_RUN_NAME,
)
mock_experiment_run.return_value.delete.assert_called_with(
delete_backing_tensorboard_run=TEST_DELETE_BACKING_TENSORBOARD_RUNS
)
|
TestExperimentRunWithoutDefaultProjectIdHook
|
python
|
apache__airflow
|
providers/standard/src/airflow/providers/standard/sensors/external_task.py
|
{
"start": 2404,
"end": 3655
}
|
class ____(BaseOperatorLink):
"""
Operator link for ExternalTaskSensor and ExternalTaskMarker.
It allows users to access DAG waited with ExternalTaskSensor or cleared by ExternalTaskMarker.
"""
name = "External DAG"
def get_link(self, operator: BaseOperator, *, ti_key: TaskInstanceKey) -> str:
if TYPE_CHECKING:
assert isinstance(operator, (ExternalTaskMarker, ExternalTaskSensor))
external_dag_id = operator.external_dag_id
if not AIRFLOW_V_3_0_PLUS:
from airflow.models.renderedtifields import RenderedTaskInstanceFields
if template_fields := RenderedTaskInstanceFields.get_templated_fields(ti_key):
external_dag_id: str = template_fields.get("external_dag_id", operator.external_dag_id) # type: ignore[no-redef]
if AIRFLOW_V_3_0_PLUS:
from airflow.utils.helpers import build_airflow_dagrun_url
return build_airflow_dagrun_url(dag_id=external_dag_id, run_id=ti_key.run_id)
from airflow.utils.helpers import build_airflow_url_with_query # type:ignore[attr-defined]
query = {"dag_id": external_dag_id, "run_id": ti_key.run_id}
return build_airflow_url_with_query(query)
|
ExternalDagLink
|
python
|
psf__black
|
tests/data/cases/form_feeds.py
|
{
"start": 696,
"end": 1657
}
|
class ____:
def __init__(self):
pass
def something(self):
pass
#
pass
pass #
a = 1
#
pass
a = 1
a = [
]
# as internal whitespace of a comment is allowed but why
"form feed literal in a string is okay"
# form feeds at the very end get removed.
# output
# Warning! This file contains form feeds (ASCII 0x0C, often represented by \f or ^L).
# These may be invisible in your editor: ensure you can see them before making changes here.
# There's one at the start that'll get stripped
# Comment and statement processing is different enough that we'll test variations of both
# contexts here
#
#
#
#
#
#
#
#
#
#
#
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
pass
# form feed after a dedent
def foo():
pass
pass
# form feeds are prohibited inside blocks, or on a line with nonwhitespace
def bar(a=1, b: bool = False):
pass
|
Baz
|
python
|
fluentpython__example-code
|
14-it-generator/sentence_iter.py
|
{
"start": 525,
"end": 1498
}
|
class ____:
def __init__(self, words):
self.words = words # <3>
self.index = 0 # <4>
def __next__(self):
try:
word = self.words[self.index] # <5>
except IndexError:
raise StopIteration() # <6>
self.index += 1 # <7>
return word # <8>
def __iter__(self): # <9>
return self
# END SENTENCE_ITER
def main():
import sys
import warnings
try:
filename = sys.argv[1]
word_number = int(sys.argv[2])
except (IndexError, ValueError):
print('Usage: %s <file-name> <word-number>' % sys.argv[0])
sys.exit(1)
with open(filename, 'rt', encoding='utf-8') as text_file:
s = Sentence(text_file.read())
for n, word in enumerate(s, 1):
if n == word_number:
print(word)
break
else:
warnings.warn('last word is #%d, "%s"' % (n, word))
if __name__ == '__main__':
main()
|
SentenceIterator
|
python
|
pydata__xarray
|
xarray/core/types.py
|
{
"start": 11708,
"end": 11905
}
|
class ____(Protocol):
def acquire(self, *args, **kwargs) -> Any: ...
def release(self) -> None: ...
def __enter__(self) -> Any: ...
def __exit__(self, *args, **kwargs) -> None: ...
|
Lock
|
python
|
python-excel__xlwt
|
xlwt/BIFFRecords.py
|
{
"start": 67533,
"end": 68087
}
|
class ____(BiffRecord):
"""
This record is part of the Calculation Settings Block. It specifies the maximum
number of times the formulas should be iteratively calculated. This is a fail-safe
against mutually recursive formulas locking up a spreadsheet application.
Record CALCCOUNT, BIFF2-BIFF8:
Offset Size Contents
0 2 Maximum number of iterations allowed in circular references
"""
_REC_ID = 0x000C
def __init__(self, calc_count):
self._rec_data = pack('<H', calc_count)
|
CalcCountRecord
|
python
|
urllib3__urllib3
|
src/urllib3/response.py
|
{
"start": 6282,
"end": 8440
}
|
class ____:
"""Memory-efficient bytes buffer
To return decoded data in read() and still follow the BufferedIOBase API, we need a
buffer to always return the correct amount of bytes.
This buffer should be filled using calls to put()
Our maximum memory usage is determined by the sum of the size of:
* self.buffer, which contains the full data
* the largest chunk that we will copy in get()
The worst case scenario is a single chunk, in which case we'll make a full copy of
the data inside get().
"""
def __init__(self) -> None:
self.buffer: typing.Deque[bytes] = collections.deque()
self._size: int = 0
def __len__(self) -> int:
return self._size
def put(self, data: bytes) -> None:
self.buffer.append(data)
self._size += len(data)
def get(self, n: int) -> bytes:
if n == 0:
return b""
elif not self.buffer:
raise RuntimeError("buffer is empty")
elif n < 0:
raise ValueError("n should be > 0")
fetched = 0
ret = io.BytesIO()
while fetched < n:
remaining = n - fetched
chunk = self.buffer.popleft()
chunk_length = len(chunk)
if remaining < chunk_length:
left_chunk, right_chunk = chunk[:remaining], chunk[remaining:]
ret.write(left_chunk)
self.buffer.appendleft(right_chunk)
self._size -= remaining
break
else:
ret.write(chunk)
self._size -= chunk_length
fetched += chunk_length
if not self.buffer:
break
return ret.getvalue()
def get_all(self) -> bytes:
buffer = self.buffer
if not buffer:
assert self._size == 0
return b""
if len(buffer) == 1:
result = buffer.pop()
else:
ret = io.BytesIO()
ret.writelines(buffer.popleft() for _ in range(len(buffer)))
result = ret.getvalue()
self._size = 0
return result
|
BytesQueueBuffer
|
python
|
tensorflow__tensorflow
|
tensorflow/python/compiler/xla/experimental/xla_sharding.py
|
{
"start": 1032,
"end": 23389
}
|
class ____(object):
"""A class to support adding sharding attributes to Ops.
Use the factory constructors and then call apply_to_tensor:
Sharding.replicate().apply_to_tensor(tensor)
"""
def __init__(self, proto=None):
"""Do not use this constructor; use the factory functions below."""
self._proto = proto
@classmethod
def replicate(cls):
"""Returns a replicated sharding attribute.
This causes an op to be computed in its entirety independently on all
cores in the XLA device.
"""
return Sharding(
proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.REPLICATED))
@classmethod
def manual(cls):
"""Returns a manuall sharding attribute.
This means the op is manually partitioned by the user and XLA will not
change the shapes.
"""
return Sharding(
proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.MANUAL))
@classmethod
def assign_device(cls, core):
"""Returns an AssignDevice sharding attribute.
This causes an op to be computed in its entirety only on one core in
the XLA device.
Args:
core: The core to assign this Op to.
"""
return Sharding(
proto=xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.MAXIMAL,
tile_assignment_dimensions=[1],
tile_assignment_devices=[core]))
@classmethod
def tile(cls, tile_assignment):
"""Returns a Tiled sharding attribute.
This causes an op to be partially computed on multiple cores in the
XLA device.
Args:
tile_assignment: An np.ndarray describing the topology of the tiling and
which device will compute which part of the topology.
Raises:
TypeError: tile_assignment was not of np.array type.
TODO(jmolloy): This concept is nefarious and is not
something we really want to expose to users (especially as the
contract for tile_assignment is very strict).
"""
if not isinstance(tile_assignment, _np.ndarray):
raise TypeError('Tile assignment must be of type np.ndarray')
dims = list(tile_assignment.shape)
flattened_devices = tile_assignment.reshape(-1, order='C')
return Sharding(
proto=xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.OTHER,
tile_assignment_dimensions=dims,
tile_assignment_devices=list(flattened_devices)))
@classmethod
def subgroup_tile(cls, tile_assignment, subgroup_modes):
"""Returns a subgroup manual sharding attribute.
This is similar to tile(), but tile_assignment has one or more dimension
than the tensor, and subgroup_modes define the sharding types in the last
dimensions of tile_assignment.
Args:
tile_assignment: An np.ndarray describing the topology of the tiling and
which device will compute which part of the topology.
subgroup_modes: sharding types for the dimension more than the tensor
shape rank.
Raises:
TypeError: tile_assignment was not of np.array type or subgroup_modes
has unsupported sharding type.
"""
if not isinstance(tile_assignment, _np.ndarray):
raise TypeError('SubgroupTile assignment must be of type np.ndarray')
if not isinstance(subgroup_modes, list):
raise TypeError('subgroup_modes in subgroup manual must be of type list')
if len(tile_assignment.shape) < len(subgroup_modes):
raise TypeError('SubgroupTile assignment must have rank larger than'
' length of subgroup_modes')
for sharding_type in subgroup_modes:
if sharding_type not in [
xla_data_pb2.OpSharding.REPLICATED, xla_data_pb2.OpSharding.MANUAL
]:
raise TypeError(
'Each sharding_type in subgroup_modes in subgroup manual must '
'be of type xla_data_pb2.OpSharding.REPLICATED'
' or xla_data_pb2.OpSharding.MANUAL')
dims = list(tile_assignment.shape)
flattened_devices = tile_assignment.reshape(-1, order='C')
return Sharding(
proto=xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.OTHER,
tile_assignment_dimensions=dims,
tile_assignment_devices=list(flattened_devices),
last_tile_dims=list(subgroup_modes)))
@classmethod
def partial_tile(cls, tile_assignment):
"""Returns a partially tiled sharding attribute.
This is similar to tile(), but tile_assignment has one more dimension than
the tensor, and tiles in the last dimension of tile_assignment are
replicated.
Args:
tile_assignment: An np.ndarray describing the topology of the tiling and
which device will compute which part of the topology.
Raises:
TypeError: tile_assignment was not of np.array type.
"""
if not isinstance(tile_assignment, _np.ndarray):
raise TypeError('PartialTile assignment must be of type np.ndarray')
dims = list(tile_assignment.shape)
flattened_devices = tile_assignment.reshape(-1, order='C')
return Sharding(
proto=xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.OTHER,
tile_assignment_dimensions=dims,
tile_assignment_devices=list(flattened_devices),
replicate_on_last_tile_dim=True))
@classmethod
def split(cls, tensor, split_dimension, num_devices, input_shape=None):
"""Returns a Sharding that splits a tensor across a dimension.
This creates a Tiled attribute, similar to tile(), but easier to use for the
common case of tiling a tensor N ways in one dimension.
Args:
tensor: A tf.Tensor to split.
split_dimension: The dimension number to split.
num_devices: The number of cores to split `tensor` over.
input_shape: The shape of the original tensor.
Raises:
ValueError: The tensor to split was smaller in the split dimension than
the number of devices to split over.
"""
if input_shape:
shape = input_shape
else:
shape = tensor.shape.as_list()
if (shape[split_dimension] is not None and
shape[split_dimension] < num_devices):
raise ValueError('Split dimension was smaller than the required number '
'of splits: shape=%r, dimension=%r, num_devices=%r' %
(shape, split_dimension, num_devices))
tile_assignment_dims = [1] * len(shape)
tile_assignment_dims[split_dimension] = num_devices
return Sharding(
proto=xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.OTHER,
tile_assignment_dimensions=tile_assignment_dims,
tile_assignment_devices=range(num_devices)))
def apply_to_tensor(
self,
tensor,
assign_tuple_sharding=False,
use_sharding_op=False,
unspecified_dims=None,
sharding_v2_proto=None,
):
"""Applies this Sharding attribute to `tensor`.
Args:
tensor: A tf.Tensor to split.
assign_tuple_sharding: If the sharding type should be a tuple.
use_sharding_op: Whether to create a sharding op on `tensor`.
unspecified_dims: An optional list of dimensions unspecified.
sharding_v2_proto: The v2 sharding proto to use.
Returns:
The tensor with Sharding attribute.
"""
if unspecified_dims:
assert use_sharding_op and not assign_tuple_sharding
proto = self._proto
# If passed a tf.BaseResourceVariable instead of a tf.Tensor, simply store
# the sharding proto on the tf.BaseResourceVariable object. An XlaShardingOp
# will be created down the line whenever a ReadVariableOp is created by the
# tf.BaseResourceVariable.
if (
isinstance(tensor, resource_variable_ops.BaseResourceVariable)
and context.xla_sharding_for_resource_variables_enabled()
):
if assign_tuple_sharding:
proto = self._create_tuple_proto(num_outputs=1)
# pylint: disable=protected-access
tensor._set_xla_sharding(proto)
return tensor
if use_sharding_op:
if assign_tuple_sharding:
proto = self._create_tuple_proto(num_outputs=1)
tensor = tf2xla.sharding(tensor, sharding=proto.SerializeToString())
else:
tensor = tf2xla.sharding(
tensor,
sharding=proto.SerializeToString(),
unspecified_dims=unspecified_dims or [],
)
elif assign_tuple_sharding or len(tensor.op.outputs) > 1:
proto = self._get_or_create_tuple_proto(tensor.op)
# We can't mutate an element of old_proto.tuple_shardings, so create
# a new proto.
tuple_shardings = list(proto.tuple_shardings)
tuple_shardings[tensor.value_index] = self._proto
proto = xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.TUPLE, tuple_shardings=tuple_shardings)
# TODO(jmolloy): This need to be seriously revisited before declaring this
# API available for public use.
# pylint: disable=protected-access
tensor.op._set_attr('_XlaSharding',
attr_value_pb2.AttrValue(s=proto.SerializeToString()))
if sharding_v2_proto:
tensor.op._set_attr(
'_XlaShardingV2',
attr_value_pb2.AttrValue(s=sharding_v2_proto.SerializeToString()),
)
return tensor
def apply_to_operation(self, operation):
"""Applies this Sharding attribute to `operation`.
Args:
operation: A tf.Operation to add sharding annotation.
"""
attr_value = attr_value_pb2.AttrValue(s=self._proto.SerializeToString())
# pylint: disable=protected-access
operation._set_attr('_XlaSharding', attr_value)
@property
def proto(self):
"""Return the sharding protobuf of type xla_data_pb2.OpSharding."""
return self._proto
def _get_or_create_tuple_proto(self, op):
try:
attr = op.get_attr('_XlaSharding')
proto = xla_data_pb2.OpSharding()
proto.ParseFromString(attr)
return proto
except ValueError:
return self._create_tuple_proto(len(op.outputs))
def _create_tuple_proto(self, num_outputs):
shardings = [
xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.REPLICATED)
] * num_outputs
return xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.TUPLE, tuple_shardings=shardings)
def copy_sharding(from_tensor, to_tensor, use_sharding_op=False):
"""Copies the a tensor's sharding to another.
Args:
from_tensor: Source tensor. Must be the sole output of an op.
to_tensor: the tensor the annotate with the copy.
use_sharding_op: whether to create a sharding op on `to_tensor`.
Returns:
A tensor with sharding annotation copied from `from_tensor`.
"""
sharding = get_tensor_sharding(from_tensor)
if sharding is None:
return to_tensor
# If passed a tf.BaseResourceVariable instead of a tf.Tensor, simply store the
# sharding proto on the tf.BaseResourceVariable object. An XlaShardingOp
# will be created down the line whenever a ReadVariableOp is created by the
# tf.BaseResourceVariable.
if (
isinstance(to_tensor, resource_variable_ops.BaseResourceVariable)
and context.xla_sharding_for_resource_variables_enabled()
):
proto = xla_data_pb2.OpSharding()
proto.ParseFromString(sharding)
# pylint: disable=protected-access
to_tensor._set_xla_sharding(proto)
return to_tensor
if use_sharding_op:
to_tensor = tf2xla.sharding(to_tensor, sharding=sharding)
attr_value = attr_value_pb2.AttrValue(s=sharding)
# pylint: disable=protected-access
to_tensor.op._set_attr('_XlaSharding', attr_value)
return to_tensor
# Helpers for the above factory functions that allow easy application of
# shardings, for example:
# tensor = xla_sharding.replicate(tensor)
def replicate(tensor, assign_tuple_sharding=False, use_sharding_op=False):
return Sharding.replicate().apply_to_tensor(
tensor,
assign_tuple_sharding=assign_tuple_sharding,
use_sharding_op=use_sharding_op)
def assign_device(tensor,
device,
assign_tuple_sharding=False,
use_sharding_op=False):
"""Returns a tensor that has AssignDevice sharding attribute."""
return Sharding.assign_device(device).apply_to_tensor(
tensor,
assign_tuple_sharding=assign_tuple_sharding,
use_sharding_op=use_sharding_op)
def tile(tensor,
tile_assignment,
assign_tuple_sharding=False,
use_sharding_op=False,
unspecified_dims=None):
"""Returns a tensor that has tiled sharding.
Args:
tensor: A tf.Tensor to shard.
tile_assignment: An np.ndarray describing the topology of the tiling and
which device will compute which part of the topology.
assign_tuple_sharding: If the sharding type should be a tuple.
use_sharding_op: If true, adds a sharding op to set the sharding.
unspecified_dims: An optional list of dimensions unspecified.
"""
return Sharding.tile(tile_assignment).apply_to_tensor(
tensor,
assign_tuple_sharding=assign_tuple_sharding,
use_sharding_op=use_sharding_op,
unspecified_dims=unspecified_dims or [])
def split(tensor,
split_dimension,
num_devices,
assign_tuple_sharding=False,
use_sharding_op=False,
input_shape=None):
"""Returns a tensor that is split along the given dimension.
Args:
tensor: A tf.Tensor to split.
split_dimension: The dimension to split.
num_devices: The number of devices to partition the dimension.
assign_tuple_sharding: If the sharding type should be a tuple.
use_sharding_op: If true, adds a sharding op to set the sharding.
input_shape: The full shape of the input tensor.
"""
return Sharding.split(tensor, split_dimension, num_devices,
input_shape).apply_to_tensor(
tensor,
assign_tuple_sharding=assign_tuple_sharding,
use_sharding_op=use_sharding_op)
def partial_tile(tensor,
tile_assignment,
use_sharding_op=False,
unspecified_dims=None):
"""Returns a tensor that has tiled sharding.
Args:
tensor: A tf.Tensor to shard.
tile_assignment: An np.ndarray describing the topology of the tiling and
which device will compute which part of the topology. It must have one
more dimension than tensor, and the last dimension represents partially
replicated tiles.
use_sharding_op: If true, adds a sharding op to set the sharding.
unspecified_dims: An optional list of dimensions unspecified.
"""
return Sharding.partial_tile(tile_assignment).apply_to_tensor(
tensor,
use_sharding_op=use_sharding_op,
unspecified_dims=unspecified_dims or [])
def get_op_sharding(op):
"""Returns sharding attribute of an op.
Args:
op: a TensorFlow op.
Returns:
The attribute representing XLA sharding on this op.
"""
try:
return op.get_attr('_XlaSharding')
except ValueError:
return None
except AttributeError:
# AttributeError: 'DistributedVarOp' object has no attribute 'get_attr'.
return None
def get_tensor_sharding(tensor):
"""Returns sharding attribute of a Tensor.
Args:
tensor: a Tensor.
Returns:
The attribute representing XLA sharding on tensor's op.
"""
# If passed a tf.BaseResourceVariable instead of a tf.Tensor, simply get the
# sharding proto set on the _xla_sharding field of the tf.BaseResourceVariable
# object.
if (
isinstance(tensor, resource_variable_ops.BaseResourceVariable)
and context.xla_sharding_for_resource_variables_enabled()
):
# pylint: disable=protected-access
sharding = tensor._get_xla_sharding()
if sharding is None:
return None
else:
return sharding.SerializeToString()
try:
return get_op_sharding(tensor.op)
except AttributeError:
# AttributeError: Tensor.op is meaningless when eager execution is enabled.
return None
def get_sharding_tile_shape(sharding):
"""Returns the tile assignment shape for a sharded Tensor.
Args:
sharding: a serialized OpSharding message describing the layout of a
sharded Tensor.
Returns:
A list, for each dimension of the sharded Tensor, of the number of shards
into which it has been split. Returns None if the input indicates no tile
assignments.
"""
if sharding is None:
return None
sharding_message = xla_data_pb2.OpSharding()
sharding_message.ParseFromString(sharding)
if sharding_message.tile_assignment_dimensions:
return sharding_message.tile_assignment_dimensions
else:
return None
def auto_to_manual_spmd_partition(tensor,
manual_sharding,
single_dim=-1,
unspecified_dims=None):
"""Switches from automatic SPMD partitioning to manual partitioning.
Converts a full-shaped tensor (to be automatically partitioned by SPMD
partitioner) to a shard-shaped tensor to be consumed by manually partitioned
ops.
Args:
tensor: A tf.Tensor in full shape.
manual_sharding: A serialized string of OpSharding to be used in manual
partitioning.
single_dim: If >= 0, the conversion will happen only on this dim in
subgroups.
unspecified_dims: An optional list of dimensions unspecified.
Returns:
A shard-shaped tensor to be consumed by manually partitioned ops.
"""
return tf2xla.spmd_full_to_shard_shape(
tensor,
manual_sharding=manual_sharding,
dim=single_dim,
unspecified_dims=unspecified_dims or [])
def manual_to_auto_spmd_partition(tensor,
manual_sharding,
full_shape,
single_dim=-1,
unspecified_dims=None):
"""Switches from manual partitioning to automatic SPMD partitioning.
Converts a shard-shaped tensor (manually partitioned in SPMD-style) to a
full-shaped tensor to be partitioned automatically by the SPMD partitioner.
Args:
tensor: A tf.Tensor in shard shape.
manual_sharding: a serialized string of OpSharding to be used in manual
partitioning.
full_shape: the shape of tensor before partitioning.
single_dim: If >= 0, the conversion will happen only on this dim in
subgroups.
unspecified_dims: An optional list of dimensions unspecified.
Returns:
A full-shaped tensor to be partitioned automatically by the SPMD
partitioner.
"""
return tf2xla.spmd_shard_to_full_shape(
tensor,
manual_sharding=manual_sharding,
full_shape=full_shape,
dim=single_dim,
unspecified_dims=unspecified_dims or [])
def mesh_split_sharding(device_mesh,
tensor_split_dims_mapping,
manual_mesh_dims=None):
"""Returns a Sharding object representing sharding along multiple dimensions.
Args:
device_mesh: An np.ndarray describing the topology of the device mesh and
each element is the ID of the device in the topology.
tensor_split_dims_mapping: A list of integers that map each tensor axis to
the device mesh axis along which it is sharded. Its length is the tensor
rank, and tensor_split_dims_mapping[i] is device mesh axis for tensor
dimension i. Use -1 for tensor dimensions that are not sharded.
manual_mesh_dims: An optional list of mesh dims for manual subgroups.
Raises:
ValueError: The number of tensor split dimensions is larger than device mesh
rank.
"""
manual_mesh_dims = manual_mesh_dims or []
permutation = [d for d in tensor_split_dims_mapping if d >= 0
] + manual_mesh_dims
if len(permutation) > len(device_mesh.shape):
raise ValueError(
'Number of tensor split dimensions (%r) is larger than device mesh '
'rank (%r). tensor_split_dims_mapping: %r, device_mesh.shape: %r' %
(len(permutation), len(
device_mesh.shape), tensor_split_dims_mapping, device_mesh.shape))
# Append replicated dimensions to the end.
transpose_permutation = permutation + [
d for d in range(len(device_mesh.shape)) if d not in permutation
]
tile_assignment = _np.transpose(device_mesh, transpose_permutation)
tile_shape = [
1 if d < 0 else device_mesh.shape[d]
for d in (tensor_split_dims_mapping + manual_mesh_dims)
]
subgroup_modes = [xla_data_pb2.OpSharding.MANUAL] * len(manual_mesh_dims)
partial = len(permutation) < len(device_mesh.shape)
if partial:
tile_shape.append(_np.prod(device_mesh.shape) // _np.prod(tile_shape))
subgroup_modes.append(xla_data_pb2.OpSharding.REPLICATED)
tile_assignment = _np.reshape(tile_assignment, tile_shape)
if manual_mesh_dims:
return Sharding.subgroup_tile(tile_assignment, subgroup_modes)
if partial:
return Sharding.partial_tile(tile_assignment)
return Sharding.tile(tile_assignment)
def mesh_split(tensor,
device_mesh,
tensor_split_dims_mapping,
use_sharding_op=False,
manual_mesh_dims=None,
unspecified_dims=None):
"""Returns a tensor that is split along multiple dimensions in a device mesh.
Args:
tensor: A tf.Tensor to split.
device_mesh: An np.ndarray describing the topology of the device mesh and
each element is the ID of the device in the topology.
tensor_split_dims_mapping: A list of integers that map each tensor axis to
the device mesh axis along which it is sharded. Its length is the tensor
rank, and tensor_split_dims_mapping[i] is device mesh axis for tensor
dimension i. Use -1 for tensor dimensions that are not sharded.
use_sharding_op: If true, adds a sharding op to set the sharding.
manual_mesh_dims: An optional list of mesh dims for manual subgroups.
unspecified_dims: An optional list of dimensions unspecified.
Raises:
ValueError: The number of tensor split dimensions is larger than device mesh
rank.
"""
sharding = mesh_split_sharding(device_mesh, tensor_split_dims_mapping,
manual_mesh_dims)
return sharding.apply_to_tensor(
tensor,
use_sharding_op=use_sharding_op,
unspecified_dims=unspecified_dims or [])
|
Sharding
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/trainer/optimization/test_multiple_optimizers.py
|
{
"start": 786,
"end": 3004
}
|
class ____(BoringModel):
def configure_optimizers(self):
opt_a = torch.optim.SGD(self.layer.parameters(), lr=0.001)
opt_b = torch.optim.SGD(self.layer.parameters(), lr=0.001)
return opt_a, opt_b
def test_multiple_optimizers_automatic_optimization_raises():
"""Test that multiple optimizers in automatic optimization is not allowed."""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx):
return super().training_step(batch, batch_idx)
model = TestModel()
model.automatic_optimization = True
trainer = pl.Trainer(logger=False, enable_checkpointing=False)
with pytest.raises(RuntimeError, match="Remove the `optimizer_idx` argument from `training_step`"):
trainer.fit(model)
class TestModel(BoringModel):
def configure_optimizers(self):
return torch.optim.Adam(self.parameters()), torch.optim.Adam(self.parameters())
model = TestModel()
model.automatic_optimization = True
trainer = pl.Trainer(logger=False, enable_checkpointing=False)
with pytest.raises(RuntimeError, match="multiple optimizers is only supported with manual optimization"):
trainer.fit(model)
def test_multiple_optimizers_manual(tmp_path):
class TestModel(MultiOptModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx):
self.training_step_called = True
# manual optimization
opt_a, opt_b = self.optimizers()
loss_1 = self.step(batch[0])
# fake generator
self.manual_backward(loss_1)
opt_a.step()
opt_a.zero_grad()
# fake discriminator
loss_2 = self.step(batch[0])
self.manual_backward(loss_2)
opt_b.step()
opt_b.zero_grad()
model = TestModel()
model.val_dataloader = None
trainer = pl.Trainer(
default_root_dir=tmp_path, limit_train_batches=2, max_epochs=1, log_every_n_steps=1, enable_model_summary=False
)
trainer.fit(model)
assert model.training_step_called
|
MultiOptModel
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/internal/conjecture/shrinking/integer.py
|
{
"start": 624,
"end": 2218
}
|
class ____(Shrinker):
"""Attempts to find a smaller integer. Guaranteed things to try ``0``,
``1``, ``initial - 1``, ``initial - 2``. Plenty of optimisations beyond
that but those are the guaranteed ones.
"""
def short_circuit(self):
for i in range(2):
if self.consider(i):
return True
self.mask_high_bits()
if self.size > 8:
# see if we can squeeze the integer into a single byte.
self.consider(self.current >> (self.size - 8))
self.consider(self.current & 0xFF)
return self.current == 2
def check_invariants(self, value):
assert value >= 0
def left_is_better(self, left, right):
return left < right
def run_step(self):
self.shift_right()
self.shrink_by_multiples(2)
self.shrink_by_multiples(1)
def shift_right(self):
base = self.current
find_integer(lambda k: k <= self.size and self.consider(base >> k))
def mask_high_bits(self):
base = self.current
n = base.bit_length()
@find_integer
def try_mask(k):
if k >= n:
return False
mask = (1 << (n - k)) - 1
return self.consider(mask & base)
@property
def size(self) -> int:
return self.current.bit_length()
def shrink_by_multiples(self, k):
base = self.current
@find_integer
def shrunk(n):
attempt = base - n * k
return attempt >= 0 and self.consider(attempt)
return shrunk > 0
|
Integer
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py
|
{
"start": 2593,
"end": 4581
}
|
class ____(test_base.DatasetTestBase, parameterized.TestCase):
def _testDataset(self, dataset, function, predicate):
expected_output = []
for x in range(10):
r = function(x)
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if self.evaluate(b):
expected_output.append(r)
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_test_combinations()))
def testMapAndFilterFusion(self, function, predicate):
dataset = dataset_ops.Dataset.range(10).apply(
testing.assert_next(["Map", "Filter",
"Map"])).map(function).filter(predicate)
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_filter_fusion = True
dataset = dataset.with_options(options)
self._testDataset(dataset, function, predicate)
@combinations.generate(test_base.default_test_combinations())
def testCapturedInputs(self):
a = constant_op.constant(3, dtype=dtypes.int64)
b = constant_op.constant(4, dtype=dtypes.int64)
some_tensor = math_ops.mul(a, b)
function = lambda x: x * x
def predicate(y):
return math_ops.less(math_ops.cast(y, dtypes.int64), some_tensor)
# We currently do not support functions with captured inputs.
dataset = dataset_ops.Dataset.range(10).apply(
testing.assert_next(["Map", "Filter"])).map(function).filter(predicate)
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_filter_fusion = True
dataset = dataset.with_options(options)
self._testDataset(dataset, function, predicate)
if __name__ == "__main__":
test.main()
|
MapAndFilterFusionTest
|
python
|
ray-project__ray
|
python/ray/data/_internal/datasource/image_datasink.py
|
{
"start": 125,
"end": 705
}
|
class ____(RowBasedFileDatasink):
def __init__(
self, path: str, column: str, file_format: str, **file_datasink_kwargs
):
super().__init__(path, file_format=file_format, **file_datasink_kwargs)
self.column = column
self.file_format = file_format
def write_row_to_file(self, row: Dict[str, Any], file: "pyarrow.NativeFile"):
from PIL import Image
image = Image.fromarray(row[self.column])
buffer = io.BytesIO()
image.save(buffer, format=self.file_format)
file.write(buffer.getvalue())
|
ImageDatasink
|
python
|
Textualize__rich
|
tests/test_protocol.py
|
{
"start": 361,
"end": 1861
}
|
class ____:
def __getattr__(self, name):
return 12
def __repr__(self) -> str:
return "Fake()"
def test_rich_cast_fake():
fake = Fake()
console = Console(file=io.StringIO())
console.print(fake)
assert console.file.getvalue() == "Fake()\n"
def test_rich_cast_container():
foo = Foo()
console = Console(file=io.StringIO(), legacy_windows=False)
console.print(Panel.fit(foo, padding=0))
assert console.file.getvalue() == "╭───╮\n│Foo│\n╰───╯\n"
def test_abc():
foo = Foo()
assert isinstance(foo, RichRenderable)
assert isinstance(Text("hello"), RichRenderable)
assert isinstance(Panel("hello"), RichRenderable)
assert not isinstance(foo, str)
assert not isinstance("foo", RichRenderable)
assert not isinstance([], RichRenderable)
def test_cast_deep():
class B:
def __rich__(self) -> Foo:
return Foo()
class A:
def __rich__(self) -> B:
return B()
console = Console(file=io.StringIO())
console.print(A())
assert console.file.getvalue() == "Foo\n"
def test_cast_recursive():
class B:
def __rich__(self) -> "A":
return A()
def __repr__(self) -> str:
return "<B>"
class A:
def __rich__(self) -> B:
return B()
def __repr__(self) -> str:
return "<A>"
console = Console(file=io.StringIO())
console.print(A())
assert console.file.getvalue() == "<B>\n"
|
Fake
|
python
|
getsentry__sentry
|
src/sentry/users/services/usersocialauth/impl.py
|
{
"start": 899,
"end": 4048
}
|
class ____(UserSocialAuthService):
def get_many(self, *, filter: UserSocialAuthFilterArgs) -> list[RpcUserSocialAuth]:
return self._FQ.get_many(filter=filter)
def get_one_or_none(self, *, filter: UserSocialAuthFilterArgs) -> RpcUserSocialAuth | None:
auths = self.get_many(filter=filter)
if len(auths) == 0:
return None
return auths[0]
def revoke_token(
self, *, filter: UserSocialAuthFilterArgs, drop_token: bool = True
) -> list[RpcUserSocialAuth]:
"""
Calls UserSocialAuth.revoke_token() on all matching results, returning the modified RpcUserSocialAuths.
"""
db_auths = self._FQ.query_many(filter=filter)
for db_auth in db_auths:
db_auth.revoke_token(drop_token=drop_token)
return self.get_many(filter=filter)
def refresh_token(self, *, filter: UserSocialAuthFilterArgs) -> list[RpcUserSocialAuth]:
"""
Calls UserSocialAuth.refresh_token() on all matching results, returning the modified RpcUserSocialAuths.
"""
db_auths = self._FQ.query_many(filter=filter)
for db_auth in db_auths:
db_auth.refresh_token()
return self.get_many(filter=filter)
def link_auth(self, *, usa: RpcUserSocialAuth, organization: RpcOrganization) -> bool:
try:
integration, _created = Integration.objects.get_or_create(
provider=usa.provider, external_id=usa.uid
)
integration.add_organization(organization, None, default_auth_id=usa.id)
except Exception as error:
sentry_sdk.capture_exception(error=error)
return False
return True
class _UserSocialAuthFilterQuery(
FilterQueryDatabaseImpl[UserSocialAuth, UserSocialAuthFilterArgs, RpcUserSocialAuth, None]
):
def apply_filters(
self, query: QuerySet[UserSocialAuth], filters: UserSocialAuthFilterArgs
) -> QuerySet[UserSocialAuth]:
if "id" in filters:
query = query.filter(id=filters["id"])
if "user_id" in filters:
query = query.filter(user_id=filters["user_id"])
if "provider" in filters:
query = query.filter(provider=filters["provider"])
if "uid" in filters:
query = query.filter(uid=filters["uid"])
return query
def base_query(self, select_related: bool = True) -> QuerySet[UserSocialAuth]:
return UserSocialAuth.objects.filter()
def filter_arg_validator(self) -> Callable[[UserSocialAuthFilterArgs], str | None]:
return self._filter_has_any_key_validator(
*UserSocialAuthFilterArgs.__annotations__.keys()
)
def serialize_api(self, serializer: None) -> Serializer:
raise NotImplementedError("API Serialization not supported for UserSocialAuthService")
def serialize_rpc(self, auth: UserSocialAuth) -> RpcUserSocialAuth:
return serialize_usersocialauth(auth=auth)
_FQ = _UserSocialAuthFilterQuery()
|
DatabaseBackedUserSocialAuthService
|
python
|
facebookresearch__faiss
|
faiss/python/extra_wrappers.py
|
{
"start": 12936,
"end": 20493
}
|
class ____:
"""Object that performs k-means clustering and manages the centroids.
The `Kmeans` class is essentially a wrapper around the C++ `Clustering` object.
Parameters
----------
d : int
dimension of the vectors to cluster
k : int
number of clusters
gpu: bool or int, optional
False: don't use GPU
True: use all GPUs
number: use this many GPUs
progressive_dim_steps:
use a progressive dimension clustering (with that number of steps)
Subsequent parameters are fields of the Clustring object. The most important are:
niter: int, optional
clustering iterations
nredo: int, optional
redo clustering this many times and keep best
verbose: bool, optional
spherical: bool, optional
do we want normalized centroids?
int_centroids: bool, optional
round centroids coordinates to integer
seed: int, optional
seed for the random number generator
"""
def __init__(self, d, k, **kwargs):
"""d: input dimension, k: nb of centroids. Additional
parameters are passed on the ClusteringParameters object,
including niter=25, verbose=False, spherical = False
"""
self.d = d
self.reset(k)
self.gpu = False
if "progressive_dim_steps" in kwargs:
self.cp = ProgressiveDimClusteringParameters()
else:
self.cp = ClusteringParameters()
for k, v in kwargs.items():
if k == 'gpu':
if v == True or v == -1:
v = get_num_gpus()
self.gpu = v
else:
# if this raises an exception, it means that it is a non-existent field
getattr(self.cp, k)
setattr(self.cp, k, v)
self.set_index()
def set_index(self):
d = self.d
if self.cp.__class__ == ClusteringParameters:
if self.cp.spherical:
self.index = IndexFlatIP(d)
else:
self.index = IndexFlatL2(d)
if self.gpu:
self.index = faiss.index_cpu_to_all_gpus(self.index, ngpu=self.gpu)
else:
if self.gpu:
fac = GpuProgressiveDimIndexFactory(ngpu=self.gpu)
else:
fac = ProgressiveDimIndexFactory()
self.fac = fac
def reset(self, k=None):
""" prepare k-means object to perform a new clustering, possibly
with another number of centroids """
if k is not None:
self.k = int(k)
self.centroids = None
self.obj = None
self.iteration_stats = None
def train(self, x, weights=None, init_centroids=None):
""" Perform k-means clustering.
On output of the function call:
- the centroids are in the centroids field of size (`k`, `d`).
- the objective value at each iteration is in the array obj (size `niter`)
- detailed optimization statistics are in the array iteration_stats.
Parameters
----------
x : array_like
Training vectors, shape (n, d), `dtype` must be float32 and n should
be larger than the number of clusters `k`.
weights : array_like
weight associated to each vector, shape `n`
init_centroids : array_like
initial set of centroids, shape (n, d)
Returns
-------
final_obj: float
final optimization objective
"""
x = np.ascontiguousarray(x, dtype='float32')
n, d = x.shape
assert d == self.d
if self.cp.__class__ == ClusteringParameters:
# regular clustering
clus = Clustering(d, self.k, self.cp)
if init_centroids is not None:
nc, d2 = init_centroids.shape
assert d2 == d
faiss.copy_array_to_vector(init_centroids.ravel(), clus.centroids)
clus.train(x, self.index, weights)
else:
# not supported for progressive dim
assert weights is None
assert init_centroids is None
assert not self.cp.spherical
clus = ProgressiveDimClustering(d, self.k, self.cp)
clus.train(n, swig_ptr(x), self.fac)
centroids = faiss.vector_float_to_array(clus.centroids)
self.centroids = centroids.reshape(self.k, d)
stats = clus.iteration_stats
stats = [stats.at(i) for i in range(stats.size())]
self.obj = np.array([st.obj for st in stats])
# copy all the iteration_stats objects to a python array
stat_fields = 'obj time time_search imbalance_factor nsplit'.split()
self.iteration_stats = [
{field: getattr(st, field) for field in stat_fields}
for st in stats
]
return self.obj[-1] if self.obj.size > 0 else 0.0
def assign(self, x):
x = np.ascontiguousarray(x, dtype='float32')
assert self.centroids is not None, "should train before assigning"
self.index.reset()
self.index.add(self.centroids)
D, I = self.index.search(x, 1)
return D.ravel(), I.ravel()
###########################################
# Packing and unpacking bitstrings
###########################################
def is_sequence(x):
return isinstance(x, collections.abc.Sequence)
pack_bitstrings_c = pack_bitstrings
def pack_bitstrings(a, nbit):
"""
Pack a set integers (i, j) where i=0:n and j=0:M into
n bitstrings.
Output is an uint8 array of size (n, code_size), where code_size is
such that at most 7 bits per code are wasted.
If nbit is an integer: all entries takes nbit bits.
If nbit is an array: entry (i, j) takes nbit[j] bits.
"""
n, M = a.shape
a = np.ascontiguousarray(a, dtype='int32')
if is_sequence(nbit):
nbit = np.ascontiguousarray(nbit, dtype='int32')
assert nbit.shape == (M,)
code_size = int((nbit.sum() + 7) // 8)
b = np.empty((n, code_size), dtype='uint8')
pack_bitstrings_c(
n, M, swig_ptr(nbit), swig_ptr(a), swig_ptr(b), code_size)
else:
code_size = (M * nbit + 7) // 8
b = np.empty((n, code_size), dtype='uint8')
pack_bitstrings_c(n, M, nbit, swig_ptr(a), swig_ptr(b), code_size)
return b
unpack_bitstrings_c = unpack_bitstrings
def unpack_bitstrings(b, M_or_nbits, nbit=None):
"""
Unpack a set integers (i, j) where i=0:n and j=0:M from
n bitstrings (encoded as uint8s).
Input is an uint8 array of size (n, code_size), where code_size is
such that at most 7 bits per code are wasted.
Two forms:
- when called with (array, M, nbit): there are M entries of size
nbit per row
- when called with (array, nbits): element (i, j) is encoded in
nbits[j] bits
"""
n, code_size = b.shape
if nbit is None:
nbit = np.ascontiguousarray(M_or_nbits, dtype='int32')
M = len(nbit)
min_code_size = int((nbit.sum() + 7) // 8)
assert code_size >= min_code_size
a = np.empty((n, M), dtype='int32')
unpack_bitstrings_c(
n, M, swig_ptr(nbit),
swig_ptr(b), code_size, swig_ptr(a))
else:
M = M_or_nbits
min_code_size = (M * nbit + 7) // 8
assert code_size >= min_code_size
a = np.empty((n, M), dtype='int32')
unpack_bitstrings_c(
n, M, nbit, swig_ptr(b), code_size, swig_ptr(a))
return a
|
Kmeans
|
python
|
falconry__falcon
|
tests/test_inspect.py
|
{
"start": 12428,
"end": 13293
}
|
class ____:
def test_inspect_visitor(self):
iv = inspect.InspectVisitor()
with pytest.raises(RuntimeError, match='This visitor does not support'):
iv.process(123)
with pytest.raises(RuntimeError, match='This visitor does not support'):
iv.process(inspect.RouteInfo('f', 'o', 'o', []))
def test_process(self):
class FooVisitor(inspect.InspectVisitor):
def visit_route(self, route):
return 'foo'
assert FooVisitor().process(inspect.RouteInfo('f', 'o', 'o', [])) == 'foo'
def test_string_visitor_class():
assert issubclass(inspect.StringVisitor, inspect.InspectVisitor)
sv = inspect.StringVisitor()
assert sv.verbose is False
assert sv.internal is False
assert sv.name == ''
@pytest.mark.parametrize('internal', (True, False))
|
TestInspectVisitor
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/templates.py
|
{
"start": 24994,
"end": 25405
}
|
class ____(Lexer):
"""
Lexer for handling Cheetah's special $ tokens in Python syntax.
"""
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
for pos, type_, value in pylexer.get_tokens_unprocessed(text):
if type_ == Token.Error and value == '$':
type_ = Comment.Preproc
yield pos, type_, value
|
CheetahPythonLexer
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/dataclassTransform2.py
|
{
"start": 1167,
"end": 1942
}
|
class ____(ModelBase, order=True):
id: int
name: str = model_field(default="None")
c1_1 = Customer1(id=3, name="Sue", other_name="Susan")
# This should generate an error because the class is frozen.
c1_1.id = 4
# This should generate an error because the class is kw_only.
c1_2 = Customer1(3, "Sue")
# This should generate an error because other_name is missing.
c1_3 = Customer1(id=3, name="John")
# This should generate an error because comparison methods are
# not synthesized.
v1 = c1_1 < c1_2
c2_1 = Customer2(id=0, name="John")
c2_2 = Customer2(id=1)
v2 = c2_1 < c2_2
# This should generate an error because Customer2 supports
# keyword-only parameters for its constructor.
c2_3 = Customer2(0, "John")
@dataclass_transform(frozen_default=True)
|
Customer2
|
python
|
falconry__falcon
|
falcon/_typing.py
|
{
"start": 6123,
"end": 6409
}
|
class ____(Protocol[_ReqT, _RespT]):
"""WSGI Middleware with resource handler."""
def process_resource(
self,
req: _ReqT,
resp: _RespT,
resource: Resource | None,
params: dict[str, Any],
) -> None: ...
|
WsgiMiddlewareWithProcessResource
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/job.py
|
{
"start": 1111,
"end": 28673
}
|
class ____:
http_client: HttpClient
base_url: str
query: ShopifyBulkQuery
job_termination_threshold: float
job_size: float
job_checkpoint_interval: int
parent_stream_name: Optional[str] = None
parent_stream_cursor: Optional[str] = None
# 10Mb chunk size to save the file
_retrieve_chunk_size: Final[int] = 1024 * 1024 * 10
_job_max_retries: Final[int] = 6
_job_backoff_time: int = 5
# running job logger constrain, every 100-ish message will be printed
_log_job_msg_frequency: Final[int] = 100
# running job log counter
_log_job_msg_count: int = field(init=False, default=0)
# attempt counter
_concurrent_attempt: int = field(init=False, default=0)
# sleep time per creation attempt
_concurrent_interval: Final[int] = 30
# max attempts for job creation
_concurrent_max_retry: Final[int] = 120
# currents: _job_id, _job_state, _job_created_at, _job_self_canceled
_job_id: Optional[str] = field(init=False, default=None)
_job_state: str | None = field(init=False, default=None) # this string is based on ShopifyBulkJobStatus
# completed and saved Bulk Job result filename
_job_result_filename: Optional[str] = field(init=False, default=None)
# date-time when the Bulk Job was created on the server
_job_created_at: Optional[str] = field(init=False, default=None)
# indicated whether or not we manually force-cancel the current job
_job_self_canceled: bool = field(init=False, default=False)
# time between job status checks
_job_check_interval: Final[int] = 3
# 0.1 ~= P2H, default value, lower boundary for slice size
_job_size_min: Final[float] = 0.1
# last running job object count
_job_last_rec_count: int = field(init=False, default=0)
# the flag to adjust the next slice from the checkpointed cursor vaue
_job_adjust_slice_from_checkpoint: bool = field(init=False, default=False)
# keeps the last checkpointed cursor value for supported streams
_job_last_checkpoint_cursor_value: str | None = field(init=False, default=None)
# expand slice factor
_job_size_expand_factor: int = field(init=False, default=2)
# reduce slice factor
_job_size_reduce_factor: int = field(init=False, default=2)
# whether or not the slicer should revert the previous start value
_job_should_revert_slice: bool = field(init=False, default=False)
# 2 sec is set as default value to cover the case with the empty-fast-completed jobs
_job_last_elapsed_time: float = field(init=False, default=2.0)
def __post_init__(self) -> None:
self._job_size = self.job_size
# The upper boundary for slice size is limited by the value from the config, default value is `P30D`
self._job_size_max = self.job_size
# Each job ideally should be executed within the specified time (in sec),
# to maximize the performance for multi-connection syncs and control the bulk job size within +- 1 hours (3600 sec),
# Ideally the source will balance on it's own rate, based on the time taken to return the data for the slice.
# This behaviour could be overidden by providing the `BULK Job termination threshold` option in the `config`.
self._job_max_elapsed_time = self.job_termination_threshold
# how many records should be collected before we use the checkpoining
self._job_checkpoint_interval = self.job_checkpoint_interval
# define Record Producer instance
self.record_producer: ShopifyBulkRecord = ShopifyBulkRecord(self.query, self.parent_stream_name, self.parent_stream_cursor)
@property
def _tools(self) -> BulkTools:
return BulkTools()
@property
def _job_state_to_fn_map(self) -> Mapping[str, Any]:
return {
ShopifyBulkJobStatus.CREATED.value: self._on_created_job,
ShopifyBulkJobStatus.CANCELING.value: self._on_canceling_job,
ShopifyBulkJobStatus.CANCELED.value: self._on_canceled_job,
ShopifyBulkJobStatus.COMPLETED.value: self._on_completed_job,
ShopifyBulkJobStatus.RUNNING.value: self._on_running_job,
ShopifyBulkJobStatus.TIMEOUT.value: self._on_timeout_job,
ShopifyBulkJobStatus.FAILED.value: self._on_failed_job,
ShopifyBulkJobStatus.ACCESS_DENIED.value: self._on_access_denied_job,
}
@property
def _job_size_adjusted_expand_factor(self, coef: float = 0.5) -> float:
"""
The Job Size expand factor is calculated using EMA (Expotentional Moving Average):
coef - the expantion coefficient
previous_expand_factor - previous factor value
Formula: expand_factor = coef * previous_expand_factor + (1 - coef)
"""
return coef * self._job_size_expand_factor + (1 - coef)
@property
def _job_size_adjusted_reduce_factor(self) -> float:
"""
The Job Size reduce factor is 2, by default.
"""
return self._job_size_reduce_factor
@property
def _job_elapsed_time_in_state(self) -> int:
"""
Returns the elapsed time taken while Job is in certain status/state.
"""
return (pdm.now() - pdm.parse(self._job_created_at)).in_seconds() if self._job_created_at else 0
@property
def _is_long_running_job(self) -> bool:
if self._job_elapsed_time_in_state:
if self._job_elapsed_time_in_state > self._job_max_elapsed_time:
# set the slicer to revert mode
self._job_should_revert_slice = True
return True
# reset slicer to normal mode
self._job_should_revert_slice = False
return False
@property
def _supports_checkpointing(self) -> bool:
"""
The flag to determine whether or not the BULK Stream supports the `BULK checkpointing`.
"""
return self.query.supports_checkpointing
@property
def _job_should_checkpoint(self) -> bool:
return self._supports_checkpointing and self._job_last_rec_count >= self._job_checkpoint_interval
@property
def _job_any_lines_collected(self) -> bool:
return self._job_last_rec_count > 0
def _expand_job_size(self) -> None:
self._job_size += self._job_size_adjusted_expand_factor
def _reduce_job_size(self) -> None:
self._job_size /= self._job_size_adjusted_reduce_factor
def _job_size_reduce_next(self) -> None:
# revert the flag
self._job_should_revert_slice = False
self._reduce_job_size()
def __adjust_job_size(self, job_current_elapsed_time: float) -> None:
if self._job_should_revert_slice:
pass
else:
if job_current_elapsed_time < 1 or job_current_elapsed_time < self._job_last_elapsed_time:
self._expand_job_size()
elif job_current_elapsed_time > self._job_last_elapsed_time < self._job_max_elapsed_time:
pass
# set the last job time
self._job_last_elapsed_time = job_current_elapsed_time
# check the job size slice interval are acceptable
self._job_size = max(self._job_size_min, min(self._job_size, self._job_size_max))
def __reset_state(self) -> None:
# reset the job state to default
self._job_state = None
# reset the filename to default
self._job_result_filename = None
# setting self-cancelation to default
self._job_self_canceled = False
# set the running job message counter to default
self._log_job_msg_count = 0
# set the running job object count to default
self._job_last_rec_count = 0
def _set_checkpointing(self) -> None:
# set the flag to adjust the next slice from the checkpointed cursor value
self._job_adjust_slice_from_checkpoint = True
def _reset_checkpointing(self) -> None:
# reseting the checkpoint flag, if bulk job has completed normally
self._job_adjust_slice_from_checkpoint = False
def _set_last_checkpoint_cursor_value(self, checkpointed_cursor: str) -> None:
"""
Sets the last checkpoint cursor value.
Args:
checkpointed_cursor (str): The cursor value to set as the last checkpoint. Defaults to None.
"""
self._job_last_checkpoint_cursor_value = checkpointed_cursor
def _checkpoint_cursor_has_collision(self, checkpointed_cursor: str) -> bool:
"""
Checks if the provided checkpointed cursor collides with the last checkpointed cursor value.
Args:
checkpointed_cursor (str): The cursor value to check for collision. Defaults to None.
Returns:
bool: True if the provided cursor collides with the last checkpointed cursor value, False otherwise.
"""
return self._job_last_checkpoint_cursor_value == checkpointed_cursor
def _job_completed(self) -> bool:
return self._job_state == ShopifyBulkJobStatus.COMPLETED.value
def _job_canceled(self) -> bool:
return self._job_state == ShopifyBulkJobStatus.CANCELED.value
def _job_failed(self) -> bool:
return self._job_state == ShopifyBulkJobStatus.FAILED.value
def _job_cancel(self) -> None:
_, canceled_response = self.http_client.send_request(
http_method="POST",
url=self.base_url,
json={"query": ShopifyBulkTemplates.cancel(self._job_id)},
request_kwargs={},
)
# mark the job was self-canceled
self._job_self_canceled = True
# check CANCELED Job health
self._job_healthcheck(canceled_response)
# sleep to ensure the cancelation
sleep(self._job_check_interval)
def _log_job_state_with_count(self) -> None:
"""
Print the status/state Job info message every N request, to minimize the noise in the logs.
"""
if self._log_job_msg_count < self._log_job_msg_frequency:
self._log_job_msg_count += 1
else:
message = f"Elapsed time: {self._job_elapsed_time_in_state} sec"
if self._job_last_rec_count > 0:
count_message = f". Rows collected: {self._job_last_rec_count}"
message = message + count_message
self._log_state(message)
self._log_job_msg_count = 0
def _log_state(self, message: Optional[str] = None) -> None:
pattern = f"Stream: `{self.http_client.name}`, the BULK Job: `{self._job_id}` is {self._job_state}"
if message:
LOGGER.info(f"{pattern}. {message}.")
else:
LOGGER.info(pattern)
def _job_get_result(self, response: Optional[requests.Response] = None) -> Optional[str]:
parsed_response = response.json().get("data", {}).get("node", {}) if response else None
# get `complete` or `partial` result from collected Bulk Job results
full_result_url = parsed_response.get("url") if parsed_response else None
partial_result_url = parsed_response.get("partialDataUrl") if parsed_response else None
job_result_url = full_result_url if full_result_url else partial_result_url
if job_result_url:
# save to local file using chunks to avoid OOM
filename = self._tools.filename_from_url(job_result_url)
_, response = self.http_client.send_request(http_method="GET", url=job_result_url, request_kwargs={"stream": True})
response.raise_for_status()
with open(filename, "wb") as file:
for chunk in response.iter_content(chunk_size=self._retrieve_chunk_size):
file.write(chunk)
# add `<end_of_file>` line to the bottom of the saved data for easy parsing
file.write(END_OF_FILE.encode())
return filename
def _job_get_checkpointed_result(self, response: Optional[requests.Response]) -> None:
if self._job_any_lines_collected or self._job_should_checkpoint:
# set the flag to adjust the next slice from the checkpointed cursor value
self._set_checkpointing()
# fetch the collected records from CANCELED Job on checkpointing
self._job_result_filename = self._job_get_result(response)
def _job_update_state(self, response: Optional[requests.Response] = None) -> None:
if response:
self._job_state = response.json().get("data", {}).get("node", {}).get("status")
self._job_last_rec_count = int(response.json().get("data", {}).get("node", {}).get("objectCount", 0))
if self._job_state == ShopifyBulkJobStatus.RUNNING.value:
self._log_job_state_with_count()
elif self._job_state in [ShopifyBulkJobStatus.CANCELED.value, ShopifyBulkJobStatus.CANCELING.value]:
# do not emit `CANCELED / CANCELING` Bulk Job status, while checkpointing
if not self._job_should_checkpoint:
self._log_job_state_with_count()
else:
self._log_state()
def _on_created_job(self, **kwargs) -> None:
pass
def _on_canceled_job(self, response: requests.Response) -> Optional[AirbyteTracedException]:
if not self._job_self_canceled:
raise ShopifyBulkExceptions.BulkJobCanceled(
f"The BULK Job: `{self._job_id}` exited with {self._job_state}, details: {response.text}"
)
else:
self._job_get_checkpointed_result(response)
def _on_canceling_job(self, **kwargs) -> None:
sleep(self._job_check_interval)
def _cancel_on_long_running_job(self) -> None:
LOGGER.info(
f"Stream: `{self.http_client.name}` the BULK Job: {self._job_id} runs longer than expected ({self._job_max_elapsed_time} sec). Retry with the reduced `Slice Size` after self-cancelation."
)
self._job_cancel()
def _cancel_on_checkpointing(self) -> None:
LOGGER.info(f"Stream: `{self.http_client.name}`, checkpointing after >= `{self._job_checkpoint_interval}` rows collected.")
# set the flag to adjust the next slice from the checkpointed cursor value
self._job_cancel()
def _on_running_job(self, **kwargs) -> None:
if self._is_long_running_job:
self._cancel_on_long_running_job()
elif self._job_should_checkpoint:
self._cancel_on_checkpointing()
else:
sleep(self._job_check_interval)
def _on_completed_job(self, response: Optional[requests.Response] = None) -> None:
self._job_result_filename = self._job_get_result(response)
def _on_failed_job(self, response: requests.Response) -> AirbyteTracedException | None:
if not self._supports_checkpointing:
raise ShopifyBulkExceptions.BulkJobFailed(
f"The BULK Job: `{self._job_id}` exited with {self._job_state}, details: {response.text}",
)
else:
# when the Bulk Job fails, usually there is a `partialDataUrl` available,
# we leverage the checkpointing in this case.
self._job_get_checkpointed_result(response)
def _on_timeout_job(self, **kwargs) -> AirbyteTracedException:
raise ShopifyBulkExceptions.BulkJobTimout(
f"The BULK Job: `{self._job_id}` exited with {self._job_state}, please reduce the `GraphQL BULK Date Range in Days` in SOURCES > Your Shopify Source > SETTINGS.",
)
def _on_access_denied_job(self, **kwagrs) -> AirbyteTracedException:
raise ShopifyBulkExceptions.BulkJobAccessDenied(
f"The BULK Job: `{self._job_id}` exited with {self._job_state}, please check your PERMISSION to fetch the data for this stream.",
)
def _on_job_with_errors(self, errors: List[Mapping[str, Any]]) -> AirbyteTracedException:
raise ShopifyBulkExceptions.BulkJobError(f"Could not validate the status of the BULK Job `{self._job_id}`. Errors: {errors}.")
def _on_non_handable_job_error(self, errors: List[Mapping[str, Any]]) -> AirbyteTracedException:
raise ShopifyBulkExceptions.BulkJobNonHandableError(f"The Stream: `{self.http_client.name}`, Non-handable error occured: {errors}")
def _get_server_errors(self, response: requests.Response) -> List[Optional[Mapping[str, Any]]]:
server_errors = response.json().get("errors", [])
return [server_errors] if isinstance(server_errors, str) else server_errors
def _get_user_errors(self, response: requests.Response) -> List[Optional[Mapping[str, Any]]]:
user_errors = response.json().get("data", {}).get("bulkOperationRunQuery", {}).get("userErrors", [])
return [user_errors] if isinstance(user_errors, str) else user_errors
def _collect_bulk_errors(self, response: requests.Response) -> List[Optional[Mapping[str, Any]]]:
try:
return self._get_server_errors(response) + self._get_user_errors(response)
except (Exception, JSONDecodeError) as e:
raise ShopifyBulkExceptions.BulkJobBadResponse(
f"Couldn't check the `response` for `errors`, status: {response.status_code}, response: `{response.text}`. Trace: {repr(e)}."
)
def _job_healthcheck(self, response: requests.Response) -> Optional[Exception]:
errors = self._collect_bulk_errors(response)
if self._job_state and errors:
self._on_job_with_errors(errors)
def _job_track_running(self) -> None:
_, response = self.http_client.send_request(
http_method="POST",
url=self.base_url,
json={"query": ShopifyBulkTemplates.status(self._job_id)},
request_kwargs={},
)
self._job_healthcheck(response)
self._job_update_state(response)
self._job_state_to_fn_map.get(self._job_state)(response=response)
def _has_running_concurrent_job(self, errors: Optional[Iterable[Mapping[str, Any]]] = None) -> bool:
"""
When concurrent BULK Job is already running for the same SHOP we receive:
Error example:
[
{
'code': 'OPERATION_IN_PROGRESS',
'field': None,
'message': 'A bulk query operation for this app and shop is already in progress: gid://shopify/BulkOperation/4039184154813.',
}
]
"""
# the errors are handled in `job_job_check_for_errors`
if errors:
for error in errors:
error_code = error.get("code", "") if isinstance(error, dict) else ""
if error_code == BulkOperationUserErrorCode.OPERATION_IN_PROGRESS.value:
return True
return False
def _has_reached_max_concurrency(self) -> bool:
return self._concurrent_attempt == self._concurrent_max_retry
def _should_switch_shop_name(self, response: requests.Response) -> bool:
"""
Sometimes the API returns the redirected response that points to the same Store but with different Name:
>> case:
-- The user inputs the `shop name` as "A":
while attempting to place the BULK Job
-- The response contains the redirected results to the `shop name` as "B", like:
response.url == "https://B.myshopify.com"
This redirection is related to:
1) `aliased` or `hidden` store names from being exposed
2) `migrated` store data to the `new store`, but referenced within the old one stil
reference issue: https://github.com/airbytehq/oncall/issues/5866
"""
if self.base_url != response.url:
self.base_url = response.url
return True
return False
@bulk_retry_on_exception()
def _job_check_state(self) -> None:
while not self._job_completed():
if self._job_canceled():
break
elif self._job_failed():
break
else:
self._job_track_running()
@bulk_retry_on_exception()
def create_job(self, stream_slice: Mapping[str, str], filter_field: str) -> None:
if stream_slice:
query = self.query.get(filter_field, stream_slice["start"], stream_slice["end"])
else:
query = self.query.get()
_, response = self.http_client.send_request(
http_method="POST",
url=self.base_url,
json={"query": ShopifyBulkTemplates.prepare(query)},
request_kwargs={},
)
errors = self._collect_bulk_errors(response)
if self._has_running_concurrent_job(errors):
# when the concurrent job takes place, another job could not be created
# we typically need to wait and retry, but no longer than 10 min. (see retry in `bulk_retry_on_exception`)
raise ShopifyBulkExceptions.BulkJobCreationFailedConcurrentError(f"Failed to create job for stream {self.http_client.name}")
elif self._should_switch_shop_name(response):
# assign new shop name, since the one that specified in `config` was redirected to the different one.
raise ShopifyBulkExceptions.BulkJobRedirectToOtherShopError(f"Switching the `store` name, redirected to: {response.url}")
else:
# There were no concurrent error for this job so even if there were other errors, we can reset this
self._concurrent_attempt = 0
if errors:
self._on_non_handable_job_error(errors)
self._job_process_created(response)
def _job_process_created(self, response: requests.Response) -> None:
"""
The Bulk Job with CREATED status, should be processed, before we move forward with Job Status Checks.
"""
bulk_response = response.json().get("data", {}).get("bulkOperationRunQuery", {}).get("bulkOperation", {}) if response else None
if bulk_response and bulk_response.get("status") == ShopifyBulkJobStatus.CREATED.value:
self._job_id = bulk_response.get("id")
self._job_created_at = bulk_response.get("createdAt")
self._job_state = ShopifyBulkJobStatus.CREATED.value
LOGGER.info(f"Stream: `{self.http_client.name}`, the BULK Job: `{self._job_id}` is {ShopifyBulkJobStatus.CREATED.value}")
def job_size_normalize(self, start: datetime, end: datetime) -> None:
# adjust slice size when it's bigger than the loop point when it should end,
# to preserve correct job size adjustments when this is the only job we need to run, based on STATE provided
requested_slice_size = (end - start).total_days()
self._job_size = requested_slice_size if requested_slice_size < self._job_size else self._job_size
def get_adjusted_job_start(self, slice_start: datetime) -> datetime:
step = self._job_size if self._job_size else self._job_size_min
return slice_start.add(days=step)
def _adjust_slice_end(
self, slice_end: datetime, checkpointed_cursor: Optional[str] = None, filter_checkpointed_cursor: Optional[str] = None
) -> datetime:
"""
Choose between the existing `slice_end` value or `checkpointed_cursor` value or `filter_checkpointed_cursor` value, if provided.
Optionally: raises the `transient` error if the checkpoint collision occurs.
Note: filter_checkpointed_cursor is only used when cursor field is ID for streams like Customer Address etc.
This method should return a datetime from last checkpointed value to adjust slice end, when cursor value is ID (int type)
method gets end datetime from filter_checkpointed_cursor, which is value from filter field from last record.
See https://github.com/airbytehq/oncall/issues/9052 for more details.
"""
if checkpointed_cursor:
if self._checkpoint_cursor_has_collision(checkpointed_cursor):
raise ShopifyBulkExceptions.BulkJobCheckpointCollisionError(
f"The stream: `{self.http_client.name}` checkpoint collision is detected. Try to increase the `BULK Job checkpoint (rows collected)` to the bigger value. The stream will be synced again during the next sync attempt."
)
# set the checkpointed cursor value
self._set_last_checkpoint_cursor_value(checkpointed_cursor)
if isinstance(checkpointed_cursor, str):
return pdm.parse(checkpointed_cursor)
if isinstance(checkpointed_cursor, int):
return pdm.parse(filter_checkpointed_cursor)
return slice_end
def get_adjusted_job_end(
self,
slice_start: datetime,
slice_end: datetime,
checkpointed_cursor: Optional[str] = None,
filter_checkpointed_cursor: Optional[str] = None,
) -> datetime:
if self._job_adjust_slice_from_checkpoint:
# set the checkpointing to default, before the next slice is emitted, to avoid inf.loop
self._reset_checkpointing()
return self._adjust_slice_end(slice_end, checkpointed_cursor, filter_checkpointed_cursor)
if self._is_long_running_job:
self._job_size_reduce_next()
return slice_start
return slice_end
def _emit_final_job_message(self, job_current_elapsed_time: int) -> None:
final_message = f"Stream: `{self.http_client.name}`, the BULK Job: `{self._job_id}` time elapsed: {job_current_elapsed_time} sec."
if self._job_any_lines_collected:
lines_collected_message = f" Rows collected: {self._job_last_rec_count} --> records: `{self.record_producer.record_composed}`."
final_message = final_message + lines_collected_message
# emit final Bulk job status message
LOGGER.info(f"{final_message}")
def _process_bulk_results(self) -> Iterable[Mapping[str, Any]]:
if self._job_result_filename:
# produce records from saved bulk job result
yield from self.record_producer.read_file(self._job_result_filename)
else:
yield from []
@limiter.balance_rate_limit(api_type=ApiTypeEnum.graphql.value)
def job_get_results(self) -> Optional[Iterable[Mapping[str, Any]]]:
"""
This method checks the status for the `CREATED` Shopify BULK Job, using it's `ID`.
The time spent for the Job execution is tracked to understand the effort.
"""
job_started = time()
try:
# track created job until it's COMPLETED
self._job_check_state()
yield from self._process_bulk_results()
except (
ShopifyBulkExceptions.BulkJobFailed,
ShopifyBulkExceptions.BulkJobTimout,
ShopifyBulkExceptions.BulkJobAccessDenied,
# when the job is canceled by non-source actions,
# we should raise the system_error
ShopifyBulkExceptions.BulkJobCanceled,
) as bulk_job_error:
raise bulk_job_error
finally:
job_current_elapsed_time = round((time() - job_started), 3)
# emit the final Bulk Job log message
self._emit_final_job_message(job_current_elapsed_time)
# check whether or not we should expand or reduce the size of the slice
self.__adjust_job_size(job_current_elapsed_time)
# reset the state for COMPLETED job
self.__reset_state()
|
ShopifyBulkManager
|
python
|
ray-project__ray
|
python/ray/serve/tests/unit/test_schema.py
|
{
"start": 29342,
"end": 36839
}
|
class ____:
def test_parse_dict(self):
schema = LoggingConfig.parse_obj(
{
"log_level": logging.DEBUG,
"encoding": "JSON",
"logs_dir": "/my_dir",
"enable_access_log": True,
}
)
assert schema.log_level == "DEBUG"
assert schema.encoding == "JSON"
assert schema.logs_dir == "/my_dir"
assert schema.enable_access_log
assert schema.additional_log_standard_attrs == []
# Test string values for log_level.
schema = LoggingConfig.parse_obj(
{
"log_level": "DEBUG",
}
)
assert schema.log_level == "DEBUG"
def test_wrong_encoding_type(self):
with pytest.raises(ValidationError):
LoggingConfig.parse_obj(
{
"logging_level": logging.INFO,
"encoding": "NOT_EXIST",
"logs_dir": "/my_dir",
"enable_access_log": True,
}
)
def test_default_values(self):
schema = LoggingConfig.parse_obj({})
assert schema.log_level == "INFO"
assert schema.encoding == "TEXT"
assert schema.logs_dir is None
assert schema.enable_access_log
assert schema.additional_log_standard_attrs == []
def test_additional_log_standard_attrs_type(self):
schema = LoggingConfig.parse_obj({"additional_log_standard_attrs": ["name"]})
assert isinstance(schema.additional_log_standard_attrs, list)
assert schema.additional_log_standard_attrs == ["name"]
def test_additional_log_standard_attrs_type_error(self):
with pytest.raises(ValidationError):
LoggingConfig.parse_obj({"additional_log_standard_attrs": "name"})
def test_additional_log_standard_attrs_deduplicate(self):
schema = LoggingConfig.parse_obj(
{"additional_log_standard_attrs": ["name", "name"]}
)
assert schema.additional_log_standard_attrs == ["name"]
# This function is defined globally to be accessible via import path
def global_f():
return "Hello world!"
def test_deployment_to_schema_to_deployment():
@serve.deployment(
num_replicas=3,
ray_actor_options={
"runtime_env": {
"working_dir": TEST_MODULE_PINNED_URI,
"py_modules": [TEST_DEPLOY_GROUP_PINNED_URI],
}
},
)
def f():
# The body of this function doesn't matter. It gets replaced by
# global_f() when the import path in f._func_or_class is overwritten.
# This function is used as a convenience to apply the @serve.deployment
# decorator without converting global_f() into a Deployment object.
pass
deployment = schema_to_deployment(deployment_to_schema(f))
deployment = deployment.options(
func_or_class="ray.serve.tests.test_schema.global_f"
)
assert deployment.num_replicas == 3
assert (
deployment.ray_actor_options["runtime_env"]["working_dir"]
== TEST_MODULE_PINNED_URI
)
assert deployment.ray_actor_options["runtime_env"]["py_modules"] == [
TEST_DEPLOY_GROUP_PINNED_URI,
]
def test_unset_fields_schema_to_deployment_ray_actor_options():
# Ensure unset fields are excluded from ray_actor_options
@serve.deployment(
num_replicas=3,
ray_actor_options={},
)
def f():
pass
deployment = schema_to_deployment(deployment_to_schema(f))
deployment = deployment.options(
func_or_class="ray.serve.tests.test_schema.global_f"
)
# Serve will set num_cpus to 1 if it's not set.
assert len(deployment.ray_actor_options) == 1
assert deployment.ray_actor_options["num_cpus"] == 1
def test_serve_instance_details_is_json_serializable():
"""Test that ServeInstanceDetails is json serializable."""
serialized_policy_def = (
b"\x80\x05\x95L\x00\x00\x00\x00\x00\x00\x00\x8c\x1cray."
b"serve.autoscaling_policy\x94\x8c'replica_queue_length_"
b"autoscaling_policy\x94\x93\x94."
)
details = ServeInstanceDetails(
controller_info={"node_id": "fake_node_id"},
proxy_location="EveryNode",
proxies={"node1": {"status": "HEALTHY"}},
applications={
"app1": {
"name": "app1",
"route_prefix": "/app1",
"docs_path": "/docs/app1",
"status": "RUNNING",
"message": "fake_message",
"last_deployed_time_s": 123,
"source": "imperative",
"deployments": {
"deployment1": {
"name": "deployment1",
"status": "HEALTHY",
"status_trigger": "AUTOSCALING",
"message": "fake_message",
"deployment_config": {
"name": "deployment1",
"autoscaling_config": {
# Byte object will cause json serializable error
"_serialized_policy_def": serialized_policy_def
},
},
"target_num_replicas": 0,
"required_resources": {"CPU": 1},
"replicas": [],
}
},
"external_scaler_enabled": False,
}
},
)._get_user_facing_json_serializable_dict(exclude_unset=True)
details_json = json.dumps(details)
expected_json = json.dumps(
{
"controller_info": {"node_id": "fake_node_id"},
"proxy_location": "EveryNode",
"proxies": {"node1": {"status": "HEALTHY"}},
"applications": {
"app1": {
"name": "app1",
"route_prefix": "/app1",
"docs_path": "/docs/app1",
"status": "RUNNING",
"message": "fake_message",
"last_deployed_time_s": 123.0,
"source": "imperative",
"deployments": {
"deployment1": {
"name": "deployment1",
"status": "HEALTHY",
"status_trigger": "AUTOSCALING",
"message": "fake_message",
"deployment_config": {
"name": "deployment1",
"autoscaling_config": {},
},
"target_num_replicas": 0,
"required_resources": {"CPU": 1},
"replicas": [],
}
},
"external_scaler_enabled": False,
}
},
}
)
assert details_json == expected_json
# ensure internal field, serialized_policy_def, is not exposed
application = details["applications"]["app1"]
deployment = application["deployments"]["deployment1"]
autoscaling_config = deployment["deployment_config"]["autoscaling_config"]
assert "_serialized_policy_def" not in autoscaling_config
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
TestLoggingConfig
|
python
|
apache__airflow
|
providers/standard/src/airflow/providers/standard/decorators/branch_external_python.py
|
{
"start": 1217,
"end": 2403
}
|
class ____(_PythonDecoratedOperator, BranchExternalPythonOperator):
"""Wraps a Python callable and captures args/kwargs when called for execution."""
template_fields = BranchExternalPythonOperator.template_fields
custom_operator_name: str = "@task.branch_external_python"
def branch_external_python_task(
python_callable: Callable | None = None, multiple_outputs: bool | None = None, **kwargs
) -> TaskDecorator:
"""
Wrap a python function into a BranchExternalPythonOperator.
For more information on how to use this operator, take a look at the guide:
:ref:`concepts:branching`
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as XCom keys.
Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_BranchExternalPythonDecoratedOperator,
**kwargs,
)
|
_BranchExternalPythonDecoratedOperator
|
python
|
google__jax
|
jax/experimental/jax2tf/tests/flax_models/bilstm_classifier.py
|
{
"start": 3350,
"end": 5533
}
|
class ____(nn.Module):
"""Embeds batches of token IDs into feature space.
Attributes:
vocab_size: The size of the vocabulary (i.e., the number of embeddings).
embedding_size: The dimensionality of the embeddings.
embedding_init: The initializer used to initialize the embeddings.
frozen: Freezes the embeddings table, keeping it fixed at initial values.
dropout_rate: Percentage of units to drop after embedding the inputs.
word_dropout_rate: Percentage of input words to replace with unk_idx.
unk_idx: The index (integer) to use to replace inputs for word dropout.
"""
vocab_size: int
embedding_size: int
embedding_init: Callable[...,
Array] = nn.initializers.normal(stddev=0.1)
frozen: bool = False
dropout_rate: float = 0.
word_dropout_rate: float = 0.
unk_idx: int | None = None
deterministic: bool | None = None
dtype: jnp.dtype = jnp.dtype('float32')
def setup(self):
self.embedding = self.param(
'embedding',
self.embedding_init,
(self.vocab_size,
self.embedding_size),
self.dtype)
self.dropout_layer = nn.Dropout(rate=self.dropout_rate)
self.word_dropout_layer = WordDropout(
dropout_rate=self.word_dropout_rate,
unk_idx=self.unk_idx)
def __call__(self, inputs: Array,
deterministic: bool | None = None) -> Array:
"""Embeds the input sequences and applies word dropout and dropout.
Args:
inputs: Batch of input token ID sequences <int64>[batch_size, seq_length].
deterministic: Disables dropout when set to True.
Returns:
The embedded inputs, shape: <float32>[batch_size, seq_length,
embedding_size].
"""
deterministic = nn.module.merge_param(
'deterministic', self.deterministic, deterministic)
inputs = self.word_dropout_layer(inputs, deterministic=deterministic)
embedded_inputs = self.embedding[inputs]
# Keep the embeddings fixed at initial (e.g. pretrained) values.
if self.frozen:
embedded_inputs = jax.lax.stop_gradient(embedded_inputs)
return self.dropout_layer(embedded_inputs, deterministic=deterministic)
|
Embedder
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/cli/argparsing/parsers.py
|
{
"start": 972,
"end": 1129
}
|
class ____(Completion):
"""Argument completion unavailable."""
message: str = 'No completions available.'
@dataclasses.dataclass
|
CompletionUnavailable
|
python
|
huggingface__transformers
|
tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py
|
{
"start": 27294,
"end": 31222
}
|
class ____(EncoderDecoderMixin, unittest.TestCase):
def get_encoder_decoder_model(self, config, decoder_config):
encoder_model = SwinModel(config).eval()
decoder_model = BartForCausalLM(decoder_config).eval()
return encoder_model, decoder_model
def prepare_config_and_inputs(self):
model_tester_encoder = SwinModelTester(self, batch_size=13, embed_dim=32)
model_tester_decoder = BartModelTester(self, batch_size=13, hidden_size=32, max_position_embeddings=512)
encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs()
decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs()
config, pixel_values, _ = encoder_config_and_inputs
decoder_config, decoder_inputs_dict = decoder_config_and_inputs
decoder_inputs_dict["labels"] = decoder_inputs_dict["decoder_input_ids"]
# make sure that cross attention layers are added
decoder_config.add_cross_attention = True
# disable cache for now
decoder_config.use_cache = False
return {
"config": config,
"pixel_values": pixel_values,
"decoder_config": decoder_config,
**decoder_inputs_dict,
}
def check_encoder_decoder_model_output_attentions(
self,
config,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
labels=None,
pixel_values=None,
**kwargs,
):
# force eager attention to support output attentions
config._attn_implementation = "eager"
decoder_config._attn_implementation = "eager"
# make the decoder inputs a different shape from the encoder inputs to harden the test
decoder_input_ids = decoder_input_ids[:, :-1]
decoder_attention_mask = decoder_attention_mask[:, :-1]
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
output_attentions=True,
)
encoder_attentions = outputs_encoder_decoder["encoder_attentions"]
self.assertEqual(len(encoder_attentions), config.num_hidden_layers)
# in Swin, the seq_len equals:
seq_len = encoder_model.config.window_size**2
self.assertEqual(encoder_attentions[0].shape[-3:], (config.num_attention_heads[0], seq_len, seq_len))
decoder_attentions = outputs_encoder_decoder["decoder_attentions"]
num_decoder_layers = (
decoder_config.num_decoder_layers
if hasattr(decoder_config, "num_decoder_layers")
else decoder_config.num_hidden_layers
)
self.assertEqual(len(decoder_attentions), num_decoder_layers)
self.assertEqual(
decoder_attentions[0].shape[-3:],
(decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]),
)
cross_attentions = outputs_encoder_decoder["cross_attentions"]
self.assertEqual(len(cross_attentions), num_decoder_layers)
encoder_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
cross_attention_input_seq_len = decoder_input_ids.shape[-1]
self.assertEqual(
cross_attentions[0].shape[-3:],
(decoder_config.num_attention_heads, cross_attention_input_seq_len, encoder_seq_len),
)
@unittest.skip(reason="There are no published pretrained BART-causal checkpoints for now")
def test_real_model_save_load_from_pretrained(self):
pass
@require_torch
|
Swin2BartModelTest
|
python
|
facebook__pyre-check
|
client/configuration/tests/scheduler_policies_test.py
|
{
"start": 383,
"end": 4697
}
|
class ____(testslide.TestCase):
def test_policy_from_and_to_json(self) -> None:
def assert_parsed(input: object, expected: SchedulerPolicy) -> None:
self.assertEqual(SchedulerPolicy.from_json(input, "<unknown>"), expected)
self.assertEqual(input, expected.to_json())
def assert_not_parsed(input: object) -> None:
with self.assertRaises(InvalidConfiguration):
SchedulerPolicy.from_json(input, "<unknown>")
assert_not_parsed("")
assert_not_parsed("derp")
assert_not_parsed({})
assert_not_parsed({"kind": 1})
assert_not_parsed({"kind": "unknown"})
assert_not_parsed({"kind": "fixed_chunk_size"})
assert_not_parsed(
{"kind": "fixed_chunk_size", "minimum_chunk_size": "not_integer"}
)
assert_not_parsed({"kind": "fixed_chunk_size", "minimum_chunk_size": 1})
assert_not_parsed(
{
"kind": "fixed_chunk_size",
"minimum_chunk_size": 1,
"minimum_chunks_per_worker": 10,
"preferred_chunk_size": -1,
}
)
assert_parsed(
{
"kind": "fixed_chunk_size",
"minimum_chunk_size": 1,
"minimum_chunks_per_worker": 10,
"preferred_chunk_size": 100,
},
SchedulerPolicy(
value=FixedChunkSize(
minimum_chunk_size=1,
minimum_chunks_per_worker=10,
preferred_chunk_size=100,
)
),
)
assert_parsed(
{
"kind": "fixed_chunk_size",
"minimum_chunks_per_worker": 10,
"preferred_chunk_size": 100,
},
SchedulerPolicy(
value=FixedChunkSize(
minimum_chunk_size=None,
minimum_chunks_per_worker=10,
preferred_chunk_size=100,
)
),
)
assert_parsed(
{
"kind": "fixed_chunk_count",
"minimum_chunks_per_worker": 1,
"minimum_chunk_size": 10,
"preferred_chunks_per_worker": 100,
},
SchedulerPolicy(
value=FixedChunkCount(
minimum_chunks_per_worker=1,
minimum_chunk_size=10,
preferred_chunks_per_worker=100,
)
),
)
assert_parsed(
{
"kind": "fixed_chunk_count",
"minimum_chunk_size": 10,
"preferred_chunks_per_worker": 100,
},
SchedulerPolicy(
value=FixedChunkCount(
minimum_chunks_per_worker=None,
minimum_chunk_size=10,
preferred_chunks_per_worker=100,
)
),
)
def test_policies_from_and_to_json(self) -> None:
def assert_parsed(input: object, expected: SchedulerPolicies) -> None:
self.assertEqual(SchedulerPolicies.from_json(input), expected)
self.assertEqual(input, expected.to_json())
def assert_not_parsed(input: object) -> None:
with self.assertRaises(InvalidConfiguration):
SchedulerPolicies.from_json(input)
assert_not_parsed("")
assert_not_parsed({"taint_fixpoint": "foo"})
assert_not_parsed({"taint_fixpoint": {}})
assert_parsed(
{
"taint_fixpoint": {
"kind": "fixed_chunk_size",
"minimum_chunk_size": 1,
"minimum_chunks_per_worker": 10,
"preferred_chunk_size": 100,
}
},
SchedulerPolicies(
policies={
"taint_fixpoint": SchedulerPolicy(
value=FixedChunkSize(
minimum_chunk_size=1,
minimum_chunks_per_worker=10,
preferred_chunk_size=100,
)
)
}
),
)
|
SchedulerPoliciesTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.