import json
import logging
from functools import partial
from itertools import product
from unittest.mock import ANY, AsyncMock, MagicMock
from unittest.mock import patch as mock_patch
from uuid import uuid4

import botocore
import pytest
from exceptiongroup import ExceptionGroup, catch
from moto import mock_aws
from moto.backends import get_backend
from moto.ec2.utils import generate_instance_identity_document
from moto.moto_api import state_manager
from prefect_aws.workers.ecs_worker import (
    _TAG_REGEX,
    _TASK_DEFINITION_CACHE,
    ECS_DEFAULT_CONTAINER_NAME,
    ECS_DEFAULT_CPU,
    ECS_DEFAULT_FAMILY,
    ECS_DEFAULT_MEMORY,
    AwsCredentials,
    ECSJobConfiguration,
    ECSVariables,
    ECSWorker,
    _get_container,
    get_prefect_image_name,
    mask_sensitive_env_values,
    parse_identifier,
)
from pydantic import ValidationError

from prefect.client.schemas.objects import FlowRun
from prefect.settings import PREFECT_API_AUTH_STRING, PREFECT_API_KEY
from prefect.settings.context import temporary_settings
from prefect.utilities.slugify import slugify
from prefect.utilities.templating import find_placeholders

TEST_TASK_DEFINITION = {
    "containerDefinitions": [
        {
            "cpu": 1024,
            "image": "prefecthq/prefect:3-latest",
            "memory": 2048,
            "name": "prefect",
        },
    ],
    "family": "prefect",
}

state_manager.set_transition(
    model_name="ecs::task",
    transition={
        "progression": "manual",
        "times": 9999,
    },  # always return RUNNING for task lastStatus
)


@pytest.fixture
def flow_run():
    return FlowRun(flow_id=uuid4(), deployment_id=uuid4())


@pytest.fixture
def flow_run_no_deployment():
    return FlowRun(flow_id=uuid4())


@pytest.fixture(autouse=True)
def reset_task_definition_cache():
    _TASK_DEFINITION_CACHE.clear()
    yield


@pytest.fixture
def prefect_api_key_setting():
    with temporary_settings({PREFECT_API_KEY: "test-api-key"}):
        yield


@pytest.fixture(autouse=True)
def mock_start_observer(monkeypatch: pytest.MonkeyPatch):
    monkeypatch.setattr("prefect_aws.workers.ecs_worker.start_observer", AsyncMock())
    monkeypatch.setattr("prefect_aws.workers.ecs_worker.stop_observer", AsyncMock())


def create_log_stream(session, run_task, *args, **kwargs):
    """
    When running a task, create the log group and stream if logging is configured on
    containers.

    See https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html
    """
    tasks = run_task(*args, **kwargs)
    if not tasks:
        return tasks
    task = tasks[0]

    ecs_client = session.client("ecs")
    logs_client = session.client("logs")

    task_definition = ecs_client.describe_task_definition(
        taskDefinition=task.task_definition_arn
    )["taskDefinition"]

    for container in task_definition.get("containerDefinitions", []):
        log_config = container.get("logConfiguration", {})
        if log_config:
            if log_config.get("logDriver") != "awslogs":
                continue

            options = log_config.get("options", {})
            if not options:
                raise ValueError("logConfiguration does not include options.")

            group_name = options.get("awslogs-group")
            if not group_name:
                raise ValueError(
                    "logConfiguration.options does not include awslogs-group"
                )

            if options.get("awslogs-create-group") == "true":
                logs_client.create_log_group(logGroupName=group_name)

            stream_prefix = options.get("awslogs-stream-prefix")
            if not stream_prefix:
                raise ValueError(
                    "logConfiguration.options does not include awslogs-stream-prefix"
                )

            logs_client.create_log_stream(
                logGroupName=group_name,
                logStreamName=f"{stream_prefix}/{container['name']}/{task.id}",
            )

    return tasks


def add_ec2_instance_to_ecs_cluster(session, cluster_name):
    ecs_client = session.client("ecs")
    ec2_client = session.client("ec2")
    ec2_resource = session.resource("ec2")

    ecs_client.create_cluster(clusterName=cluster_name)

    images = ec2_client.describe_images()
    image_id = images["Images"][0]["ImageId"]

    test_instance = ec2_resource.create_instances(
        ImageId=image_id, MinCount=1, MaxCount=1
    )[0]

    ecs_client.register_container_instance(
        cluster=cluster_name,
        instanceIdentityDocument=json.dumps(
            generate_instance_identity_document(test_instance)
        ),
    )


def create_test_ecs_cluster(ecs_client, cluster_name) -> str:
    """
    Create an ECS cluster and return its ARN
    """
    return ecs_client.create_cluster(clusterName=cluster_name)["cluster"]["clusterArn"]


def describe_task(ecs_client, task_arn, **kwargs) -> dict:
    """
    Describe a single ECS task
    """
    return ecs_client.describe_tasks(tasks=[task_arn], include=["TAGS"], **kwargs)[
        "tasks"
    ][0]


def describe_task_definition(ecs_client, task):
    return ecs_client.describe_task_definition(
        taskDefinition=task["taskDefinitionArn"]
    )["taskDefinition"]


def patch_calculate_task_resource_requirements(
    _calculate_task_resource_requirements, task_definition
):
    """
    Adds support for non-EC2 execution modes to moto's calculation of task definition.
    """
    for container_definition in task_definition.container_definitions:
        container_definition.setdefault("memory", 0)
    return _calculate_task_resource_requirements(task_definition)


@pytest.fixture
def ecs_mocks(aws_credentials: AwsCredentials):
    with mock_aws():
        session = aws_credentials.get_boto3_session()
        ecs_client = session.client("ecs")

        create_test_ecs_cluster(ecs_client, "default")

        # NOTE: Even when using FARGATE, moto requires container instances to be
        #       registered. This differs from AWS behavior.
        add_ec2_instance_to_ecs_cluster(session, "default")

        # This has the potential to break with moto upgrades since we're patching
        # and internal method, but it was stable between 4 and 5 (so far)
        ecs_backends = get_backend("ecs")
        for account in ecs_backends:
            for region in ecs_backends[account]:
                backend = ecs_backends[account][region]
                orig = backend._calculate_task_resource_requirements
                backend._calculate_task_resource_requirements = partial(
                    patch_calculate_task_resource_requirements, orig
                )
        yield


async def construct_configuration(**options):
    variables = ECSVariables(**options)
    print(f"Using variables: {variables.model_dump_json(indent=2, exclude_none=True)}")

    configuration = await ECSJobConfiguration.from_template_and_values(
        base_job_template=ECSWorker.get_default_base_job_template(),
        values={**variables.model_dump(exclude_none=True)},
    )
    print(f"Constructed test configuration: {configuration.model_dump_json(indent=2)}")

    return configuration


async def construct_configuration_with_job_template(
    template_overrides: dict, **variables: dict
):
    variables: ECSVariables = ECSVariables(**variables)
    print(f"Using variables: {variables.model_dump_json(indent=2)}")

    base_template = ECSWorker.get_default_base_job_template()
    for key in template_overrides:
        base_template["job_configuration"][key] = template_overrides[key]

    print(
        "Using base template configuration:"
        f" {json.dumps(base_template['job_configuration'], indent=2)}"
    )

    configuration: ECSJobConfiguration = (
        await ECSJobConfiguration.from_template_and_values(
            base_job_template=base_template,
            values={**variables.model_dump(exclude_none=True)},
        )
    )
    print(f"Constructed test configuration: {configuration.model_dump_json(indent=2)}")

    return configuration


@pytest.mark.usefixtures("ecs_mocks")
async def test_default(aws_credentials: AwsCredentials, flow_run: FlowRun):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials, command="echo test"
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test-foo") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)
    task = describe_task(ecs_client, task_arn)

    assert task == {
        "attachments": ANY,
        "clusterArn": ANY,
        "containers": [
            {
                "containerArn": ANY,
                "cpu": 0,
                "memory": 0,
                "healthStatus": "HEALTHY",
                "exitCode": 0,
                "image": ANY,
                "lastStatus": "PENDING",
                "networkInterfaces": [],
                "taskArn": ANY,
                "name": "prefect",
            }
        ],
        "desiredStatus": "RUNNING",
        "group": ANY,
        "lastStatus": "RUNNING",
        "launchType": "FARGATE",
        "overrides": {
            "containerOverrides": [
                {"name": "prefect", "environment": [], "command": ["echo", "test"]}
            ]
        },
        "startedBy": ANY,
        "stoppedReason": "",
        "tags": [],
        "taskArn": ANY,
        "taskDefinitionArn": ANY,
    }

    task_definition = describe_task_definition(ecs_client, task)
    assert task_definition["containerDefinitions"] == [
        {
            "name": ECS_DEFAULT_CONTAINER_NAME,
            "image": get_prefect_image_name(),
            "cpu": 0,
            "memory": 0,
            "portMappings": [],
            "essential": True,
            "environment": [],
            "mountPoints": [],
            "volumesFrom": [],
        }
    ]


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.usefixtures("ecs_mocks")
async def test_image(aws_credentials: AwsCredentials, flow_run: FlowRun):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials, image="prefecthq/prefect-dev:main-python3.9"
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)
    task = describe_task(ecs_client, task_arn)
    assert task["lastStatus"] == "RUNNING"

    task_definition = describe_task_definition(ecs_client, task)
    assert task_definition["containerDefinitions"] == [
        {
            "name": ECS_DEFAULT_CONTAINER_NAME,
            "image": "prefecthq/prefect-dev:main-python3.9",
            "cpu": 0,
            "memory": 0,
            "portMappings": [],
            "essential": True,
            "environment": [],
            "mountPoints": [],
            "volumesFrom": [],
        }
    ]


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize("launch_type", ["EC2", "FARGATE", "FARGATE_SPOT"])
async def test_launch_types(
    aws_credentials: AwsCredentials, launch_type: str, flow_run: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials, launch_type=launch_type
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        # Capture the task run call because moto does not track
        # 'capacityProviderStrategy'
        original_run_task = worker._create_task_run
        mock_run_task = MagicMock(side_effect=original_run_task)
        worker._create_task_run = mock_run_task

        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)

    if launch_type != "FARGATE_SPOT":
        assert launch_type in task_definition["compatibilities"]
        assert task["launchType"] == launch_type
    else:
        assert "FARGATE" in task_definition["compatibilities"]
        # FARGATE SPOT requires a null launch type
        assert not task.get("launchType")
        # Instead, it requires a capacity provider strategy but this is not supported
        # by moto and is not present on the task even when provided so we assert on the
        # mock call to ensure it is sent

        assert mock_run_task.call_args[0][1].get("capacityProviderStrategy") == [
            {"capacityProvider": "FARGATE_SPOT", "weight": 1}
        ]

    requires_capabilities = task_definition.get("requiresCompatibilities", [])
    if launch_type != "EC2":
        assert "FARGATE" in requires_capabilities
    else:
        assert not requires_capabilities


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize("launch_type", ["EC2", "FARGATE", "FARGATE_SPOT"])
@pytest.mark.parametrize(
    "cpu,memory", [(None, None), (1024, None), (None, 2048), (2048, 4096)]
)
async def test_cpu_and_memory(
    aws_credentials: AwsCredentials,
    launch_type: str,
    flow_run: FlowRun,
    cpu: int,
    memory: int,
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials, launch_type=launch_type, cpu=cpu, memory=memory
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)
    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)
    container_definition = _get_container(
        task_definition["containerDefinitions"], ECS_DEFAULT_CONTAINER_NAME
    )
    overrides = task["overrides"]
    container_overrides = _get_container(
        overrides["containerOverrides"], ECS_DEFAULT_CONTAINER_NAME
    )

    if launch_type == "EC2":
        # EC2 requires CPU and memory to be defined at the container level
        assert container_definition["cpu"] == cpu or ECS_DEFAULT_CPU
        assert container_definition["memory"] == memory or ECS_DEFAULT_MEMORY
    else:
        # Fargate requires CPU and memory to be defined at the task definition level
        assert task_definition["cpu"] == str(cpu or ECS_DEFAULT_CPU)
        assert task_definition["memory"] == str(memory or ECS_DEFAULT_MEMORY)

    # We always provide non-null values as overrides on the task run
    assert overrides.get("cpu") == (str(cpu) if cpu else None)
    assert overrides.get("memory") == (str(memory) if memory else None)
    # And as overrides for the Prefect container
    assert container_overrides.get("cpu") == cpu
    assert container_overrides.get("memory") == memory


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize("launch_type", ["EC2", "FARGATE", "FARGATE_SPOT"])
async def test_network_mode_default(
    aws_credentials: AwsCredentials,
    launch_type: str,
    flow_run: FlowRun,
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials, launch_type=launch_type
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)

    if launch_type == "EC2":
        assert task_definition["networkMode"] == "bridge"
    else:
        assert task_definition["networkMode"] == "awsvpc"


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize("launch_type", ["EC2", "FARGATE", "FARGATE_SPOT"])
async def test_container_command(
    aws_credentials: AwsCredentials,
    launch_type: str,
    flow_run: FlowRun,
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        launch_type=launch_type,
        command="prefect version",
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)

    container_overrides = _get_container(
        task["overrides"]["containerOverrides"], ECS_DEFAULT_CONTAINER_NAME
    )
    assert container_overrides["command"] == ["prefect", "version"]


@pytest.mark.usefixtures("ecs_mocks")
async def test_task_definition_arn(
    aws_credentials: AwsCredentials, flow_run: FlowRun, caplog
):
    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    task_definition_arn = ecs_client.register_task_definition(**TEST_TASK_DEFINITION)[
        "taskDefinition"
    ]["taskDefinitionArn"]

    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        task_definition_arn=task_definition_arn,
        launch_type="EC2",
    )

    async with ECSWorker(work_pool_name="test") as worker:
        with caplog.at_level(
            logging.WARN, logger=worker.get_flow_run_logger(flow_run).name
        ):
            result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    print(task)
    assert task["taskDefinitionArn"] == task_definition_arn, (
        "The task definition should be used without registering a new one"
    )

    assert (
        "Skipping task definition construction since a task definition"
        " ARN is provided." in caplog.text
    )


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize(
    "overrides",
    [{"image": "new-image"}, {"configure_cloudwatch_logs": True}, {"family": "foobar"}],
)
async def test_task_definition_arn_with_variables_that_are_ignored(
    aws_credentials, overrides, caplog, flow_run
):
    """
    Any of these overrides should cause the task definition to be copied and
    registered as a new version
    """
    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    task_definition_arn = ecs_client.register_task_definition(
        **TEST_TASK_DEFINITION, executionRoleArn="base"
    )["taskDefinition"]["taskDefinitionArn"]

    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        task_definition_arn=task_definition_arn,
        launch_type="EC2",
        **overrides,
    )

    async with ECSWorker(work_pool_name="test") as worker:
        with caplog.at_level(
            logging.WARN, logger=worker.get_flow_run_logger(flow_run).name
        ):
            template_with_placeholders = worker.work_pool.base_job_template[
                "job_configuration"
            ]["task_definition"]
            placeholders = [
                placeholder.name
                for placeholder in find_placeholders(template_with_placeholders)
            ]
            result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    assert task["taskDefinitionArn"] == task_definition_arn, (
        "A new task definition should not be registered"
    )

    assert (
        "Skipping task definition construction since a task definition"
        " ARN is provided." in caplog.text
    )

    assert (
        "The following job variable references"
        " in the task definition template will be ignored: " in caplog.text
    )

    for key in overrides.keys():
        assert key in caplog.text if key in placeholders else key not in caplog.text


@pytest.mark.usefixtures("ecs_mocks")
async def test_environment_variables(
    aws_credentials: AwsCredentials,
    flow_run: FlowRun,
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        env={"FOO": "BAR"},
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)
    prefect_container_definition = _get_container(
        task_definition["containerDefinitions"], ECS_DEFAULT_CONTAINER_NAME
    )
    assert not prefect_container_definition["environment"], (
        "Variables should not be passed until runtime"
    )

    prefect_container_overrides = _get_container(
        task["overrides"]["containerOverrides"], ECS_DEFAULT_CONTAINER_NAME
    )
    expected = [{"name": "FOO", "value": "BAR"}]
    assert prefect_container_overrides.get("environment") == expected


@pytest.mark.usefixtures("ecs_mocks")
async def test_labels(
    aws_credentials: AwsCredentials,
    flow_run: FlowRun,
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        labels={
            "foo": "bar",
            "af_sn253@!$@&$%@(bfausfg!#!*&):@cas{}[]'XY": (
                "af_sn253@!$@&$%@(bfausfg!#!*&):@cas{}[]'XY"
            ),
        },
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)
    assert not task_definition.get("tags"), "Labels should not be passed until runtime"
    assert task.get("tags") == [
        {
            "key": "foo",
            "value": "bar",
        },
        {
            # Slugified to remove invalid characters
            "key": "af_sn253@-@-@-bfausfg-:@cas-XY",
            "value": "af_sn253@-@-@-bfausfg-:@cas-XY",
        },
    ]


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize(
    "labels, expected_keys",
    [
        (
            {
                "validLabel": "validValue",
                "invalid/label*with?chars": "invalid/value&*with%chars",
            },
            {
                "validLabel": "validValue",
                "invalid/label*with?chars": "invalid/value&*with%chars",
            },
        ),
        (
            {
                "flow-name": "Hello, World"
            },  # regression test for https://github.com/PrefectHQ/prefect/issues/13174
            {
                "flow-name": "Hello-World",
            },
        ),
    ],
)
async def test_slugified_labels(
    aws_credentials: AwsCredentials, flow_run: FlowRun, labels, expected_keys
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        labels=labels,
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)

    # Fetch actual tags from the mock ECS response
    actual_tags = {tag["key"]: tag["value"] for tag in task.get("tags", [])}

    # Slugify keys and values for expected tags comparison
    expected_tags = {
        slugify(
            key, regex_pattern=_TAG_REGEX, allow_unicode=True, lowercase=False
        ): slugify(value, regex_pattern=_TAG_REGEX, allow_unicode=True, lowercase=False)
        for key, value in expected_keys.items()
    }

    # Check if the slugified tags are as expected
    for key, value in expected_tags.items():
        assert actual_tags.get(key) == value, (
            f"Failed for key: {key} with expected value: {value}, but got {actual_tags.get(key)}"
        )


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize("default_cluster", [True, False])
async def test_cluster(
    aws_credentials: AwsCredentials, flow_run: FlowRun, default_cluster: bool
):
    configuration = configuration = await construct_configuration(
        cluster=None if default_cluster else "second-cluster",
        aws_credentials=aws_credentials,
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    # Construct a non-default cluster. We build this in either case since otherwise
    # there is only one cluster and there's no choice but to use the default.
    second_cluster_arn = create_test_ecs_cluster(ecs_client, "second-cluster")
    add_ec2_instance_to_ecs_cluster(session, "second-cluster")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)

    if default_cluster:
        assert task["clusterArn"].endswith("default")
    else:
        assert task["clusterArn"] == second_cluster_arn


@pytest.mark.usefixtures("ecs_mocks")
async def test_cluster_and_launch_type_passed_to_run_task(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    """Test that cluster and launchType are explicitly passed to run_task API call."""
    cluster_arn = "arn:aws:ecs:us-east-1:123456789012:cluster/test-cluster"
    launch_type = "FARGATE"

    configuration = await construct_configuration(
        cluster=cluster_arn,
        launch_type=launch_type,
        aws_credentials=aws_credentials,
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")
    create_test_ecs_cluster(ecs_client, "test-cluster")

    async with ECSWorker(work_pool_name="test") as worker:
        # Capture the task run call to verify parameters
        original_run_task = worker._create_task_run
        mock_run_task = MagicMock(side_effect=original_run_task)
        worker._create_task_run = mock_run_task

        result = await worker.run(flow_run, configuration)

    # Verify that cluster and launchType were passed in the run_task call
    run_kwargs = mock_run_task.call_args[0][1]
    assert run_kwargs.get("cluster") == cluster_arn
    assert run_kwargs.get("launchType") == launch_type

    # Verify the task was created successfully
    assert result.status_code == 0


@pytest.mark.usefixtures("ecs_mocks")
async def test_cluster_and_launch_type_passed_when_missing_from_template(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    """Test that cluster and launchType are added even when missing from templated task_run_request.

    This tests the specific bug scenario where template variables resolve to empty/None
    but configuration fields are set directly.
    """
    cluster_arn = "arn:aws:ecs:us-east-1:123456789012:cluster/test-cluster"
    launch_type = "FARGATE"

    # Create configuration with cluster and launch_type set
    configuration = await construct_configuration(
        cluster=cluster_arn,
        launch_type=launch_type,
        aws_credentials=aws_credentials,
    )

    # Manually remove cluster and launchType from task_run_request to simulate
    # the bug scenario where template resolution doesn't provide these values
    if "cluster" in configuration.task_run_request:
        del configuration.task_run_request["cluster"]
    if "launchType" in configuration.task_run_request:
        del configuration.task_run_request["launchType"]

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")
    create_test_ecs_cluster(ecs_client, "test-cluster")

    async with ECSWorker(work_pool_name="test") as worker:
        # Capture the task run call to verify parameters
        original_run_task = worker._create_task_run
        mock_run_task = MagicMock(side_effect=original_run_task)
        worker._create_task_run = mock_run_task

        result = await worker.run(flow_run, configuration)

    # Verify that cluster and launchType were added from configuration
    # even though they were missing from task_run_request
    run_kwargs = mock_run_task.call_args[0][1]
    assert run_kwargs.get("cluster") == cluster_arn
    assert run_kwargs.get("launchType") == launch_type

    # Verify the task was created successfully
    assert result.status_code == 0


@pytest.mark.usefixtures("ecs_mocks")
async def test_execution_role_arn(
    aws_credentials: AwsCredentials,
    flow_run: FlowRun,
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        execution_role_arn="test",
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)

    assert task_definition["executionRoleArn"] == "test"


@pytest.mark.usefixtures("ecs_mocks")
async def test_task_role_arn(
    aws_credentials: AwsCredentials,
    flow_run: FlowRun,
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        task_role_arn="test",
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)
    task = describe_task(ecs_client, task_arn)

    assert task["overrides"]["taskRoleArn"] == "test"


@pytest.mark.usefixtures("ecs_mocks")
async def test_network_config_from_vpc_id(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    session = aws_credentials.get_boto3_session()
    ec2_resource = session.resource("ec2")
    vpc = ec2_resource.create_vpc(CidrBlock="10.0.0.0/16")
    vpc.modify_attribute(EnableDnsHostnames={"Value": True})
    subnet = ec2_resource.create_subnet(CidrBlock="10.0.2.0/24", VpcId=vpc.id)

    configuration = await construct_configuration(
        aws_credentials=aws_credentials, vpc_id=vpc.id
    )

    session = aws_credentials.get_boto3_session()

    async with ECSWorker(work_pool_name="test") as worker:
        # Capture the task run call because moto does not track 'networkConfiguration'
        original_run_task = worker._create_task_run
        mock_run_task = MagicMock(side_effect=original_run_task)
        worker._create_task_run = mock_run_task

        await worker.run(flow_run, configuration)

    network_configuration = mock_run_task.call_args[0][1].get("networkConfiguration")

    # Subnet ids are copied from the vpc
    assert network_configuration == {
        "awsvpcConfiguration": {
            "subnets": [subnet.id],
            "assignPublicIp": "ENABLED",
            "securityGroups": [],
        }
    }


@pytest.mark.usefixtures("ecs_mocks")
async def test_network_config_1_subnet_in_custom_settings_1_in_vpc(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    session = aws_credentials.get_boto3_session()
    ec2_resource = session.resource("ec2")
    vpc = ec2_resource.create_vpc(CidrBlock="10.0.0.0/16")
    vpc.modify_attribute(EnableDnsHostnames={"Value": True})
    subnet = ec2_resource.create_subnet(CidrBlock="10.0.2.0/24", VpcId=vpc.id)
    security_group = ec2_resource.create_security_group(
        GroupName="ECSWorkerTestSG", Description="ECS Worker test SG", VpcId=vpc.id
    )

    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        vpc_id=vpc.id,
        override_network_configuration=True,
        network_configuration={
            "subnets": [subnet.id],
            "assignPublicIp": "DISABLED",
            "securityGroups": [security_group.id],
        },
    )

    session = aws_credentials.get_boto3_session()

    async with ECSWorker(work_pool_name="test") as worker:
        # Capture the task run call because moto does not track 'networkConfiguration'
        original_run_task = worker._create_task_run
        mock_run_task = MagicMock(side_effect=original_run_task)
        worker._create_task_run = mock_run_task

        await worker.run(flow_run, configuration)

    network_configuration = mock_run_task.call_args[0][1].get("networkConfiguration")

    # Subnet ids are copied from the vpc
    assert network_configuration == {
        "awsvpcConfiguration": {
            "subnets": [subnet.id],
            "assignPublicIp": "DISABLED",
            "securityGroups": [security_group.id],
        }
    }


@pytest.mark.usefixtures("ecs_mocks")
async def test_network_config_1_sn_in_custom_settings_many_in_vpc(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    session = aws_credentials.get_boto3_session()
    ec2_resource = session.resource("ec2")
    vpc = ec2_resource.create_vpc(CidrBlock="10.0.0.0/16")
    vpc.modify_attribute(EnableDnsHostnames={"Value": True})
    subnet = ec2_resource.create_subnet(CidrBlock="10.0.2.0/24", VpcId=vpc.id)
    ec2_resource.create_subnet(CidrBlock="10.0.3.0/24", VpcId=vpc.id)
    ec2_resource.create_subnet(CidrBlock="10.0.4.0/24", VpcId=vpc.id)

    security_group = ec2_resource.create_security_group(
        GroupName="ECSWorkerTestSG", Description="ECS Worker test SG", VpcId=vpc.id
    )

    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        vpc_id=vpc.id,
        override_network_configuration=True,
        network_configuration={
            "subnets": [subnet.id],
            "assignPublicIp": "DISABLED",
            "securityGroups": [security_group.id],
        },
    )

    session = aws_credentials.get_boto3_session()

    async with ECSWorker(work_pool_name="test") as worker:
        # Capture the task run call because moto does not track 'networkConfiguration'
        original_run_task = worker._create_task_run
        mock_run_task = MagicMock(side_effect=original_run_task)
        worker._create_task_run = mock_run_task

        await worker.run(flow_run, configuration)

    network_configuration = mock_run_task.call_args[0][1].get("networkConfiguration")

    # Subnet ids are copied from the vpc
    assert network_configuration == {
        "awsvpcConfiguration": {
            "subnets": [subnet.id],
            "assignPublicIp": "DISABLED",
            "securityGroups": [security_group.id],
        }
    }


@pytest.mark.usefixtures("ecs_mocks")
async def test_network_config_many_subnet_in_custom_settings_many_in_vpc(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    session = aws_credentials.get_boto3_session()
    ec2_resource = session.resource("ec2")
    vpc = ec2_resource.create_vpc(CidrBlock="10.0.0.0/16")
    vpc.modify_attribute(EnableDnsHostnames={"Value": True})
    subnets = [
        ec2_resource.create_subnet(CidrBlock="10.0.2.0/24", VpcId=vpc.id),
        ec2_resource.create_subnet(CidrBlock="10.0.33.0/24", VpcId=vpc.id),
        ec2_resource.create_subnet(CidrBlock="10.0.44.0/24", VpcId=vpc.id),
    ]
    subnet_ids = [subnet.id for subnet in subnets]

    security_group = ec2_resource.create_security_group(
        GroupName="ECSWorkerTestSG", Description="ECS Worker test SG", VpcId=vpc.id
    )

    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        vpc_id=vpc.id,
        override_network_configuration=True,
        network_configuration={
            "subnets": subnet_ids,
            "assignPublicIp": "DISABLED",
            "securityGroups": [security_group.id],
        },
    )

    session = aws_credentials.get_boto3_session()

    async with ECSWorker(work_pool_name="test") as worker:
        # Capture the task run call because moto does not track 'networkConfiguration'
        original_run_task = worker._create_task_run
        mock_run_task = MagicMock(side_effect=original_run_task)
        worker._create_task_run = mock_run_task

        await worker.run(flow_run, configuration)

    network_configuration = mock_run_task.call_args[0][1].get("networkConfiguration")

    # Subnet ids are copied from the vpc
    assert network_configuration == {
        "awsvpcConfiguration": {
            "subnets": subnet_ids,
            "assignPublicIp": "DISABLED",
            "securityGroups": [security_group.id],
        }
    }


@pytest.mark.usefixtures("ecs_mocks")
async def test_network_config_from_custom_settings_invalid_subnet(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    session = aws_credentials.get_boto3_session()
    ec2_resource = session.resource("ec2")
    vpc = ec2_resource.create_vpc(CidrBlock="10.0.0.0/16")
    security_group = ec2_resource.create_security_group(
        GroupName="ECSWorkerTestSG", Description="ECS Worker test SG", VpcId=vpc.id
    )
    ec2_resource.create_subnet(CidrBlock="10.0.2.0/24", VpcId=vpc.id)

    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        vpc_id=vpc.id,
        override_network_configuration=True,
        network_configuration={
            "subnets": ["sn-8asdas"],
            "assignPublicIp": "DISABLED",
            "securityGroups": [security_group.id],
        },
    )

    session = aws_credentials.get_boto3_session()

    def handle_error(exc_group: ExceptionGroup):
        assert len(exc_group.exceptions) == 1
        assert isinstance(exc_group.exceptions[0], ValueError)
        assert (
            f"Subnets ['sn-8asdas'] not found within VPC with ID {vpc.id}."
            "Please check that VPC is associated with supplied subnets."
        ) in str(exc_group.exceptions[0])

    with catch({ValueError: handle_error}):
        async with ECSWorker(work_pool_name="test") as worker:
            original_run_task = worker._create_task_run
            mock_run_task = MagicMock(side_effect=original_run_task)
            worker._create_task_run = mock_run_task

            await worker.run(flow_run, configuration)


@pytest.mark.usefixtures("ecs_mocks")
async def test_network_config_from_custom_settings_invalid_subnet_multiple_vpc_subnets(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    session = aws_credentials.get_boto3_session()
    ec2_resource = session.resource("ec2")
    vpc = ec2_resource.create_vpc(CidrBlock="10.0.0.0/16")
    security_group = ec2_resource.create_security_group(
        GroupName="ECSWorkerTestSG", Description="ECS Worker test SG", VpcId=vpc.id
    )
    subnet = ec2_resource.create_subnet(CidrBlock="10.0.2.0/24", VpcId=vpc.id)
    invalid_subnet_id = "subnet-3bf19de7"

    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        vpc_id=vpc.id,
        override_network_configuration=True,
        network_configuration={
            "subnets": [invalid_subnet_id, subnet.id],
            "assignPublicIp": "DISABLED",
            "securityGroups": [security_group.id],
        },
    )

    session = aws_credentials.get_boto3_session()

    def handle_error(exc_group: ExceptionGroup):
        assert len(exc_group.exceptions) == 1
        assert isinstance(exc_group.exceptions[0], ValueError)
        assert (
            f"Subnets ['{invalid_subnet_id}', '{subnet.id}'] not found within VPC with ID"
            f" {vpc.id}.Please check that VPC is associated with supplied subnets."
        ) in str(exc_group.exceptions[0])

    with catch({ValueError: handle_error}):
        async with ECSWorker(work_pool_name="test") as worker:
            original_run_task = worker._create_task_run
            mock_run_task = MagicMock(side_effect=original_run_task)
            worker._create_task_run = mock_run_task

            await worker.run(flow_run, configuration)


@pytest.mark.usefixtures("ecs_mocks")
async def test_network_config_configure_network_requires_vpc_id(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    with pytest.raises(
        ValidationError,
        match="You must provide a `vpc_id` to enable custom `network_configuration`.",
    ):
        await construct_configuration(
            aws_credentials=aws_credentials,
            override_network_configuration=True,
            network_configuration={
                "subnets": [],
                "assignPublicIp": "ENABLED",
                "securityGroups": [],
            },
        )


@pytest.mark.usefixtures("ecs_mocks")
async def test_network_config_from_default_vpc(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    session = aws_credentials.get_boto3_session()
    ec2_client = session.client("ec2")

    default_vpc_id = ec2_client.describe_vpcs(
        Filters=[{"Name": "isDefault", "Values": ["true"]}]
    )["Vpcs"][0]["VpcId"]
    default_subnets = ec2_client.describe_subnets(
        Filters=[{"Name": "vpc-id", "Values": [default_vpc_id]}]
    )["Subnets"]

    configuration = await construct_configuration(aws_credentials=aws_credentials)

    async with ECSWorker(work_pool_name="test") as worker:
        # Capture the task run call because moto does not track 'networkConfiguration'
        original_run_task = worker._create_task_run
        mock_run_task = MagicMock(side_effect=original_run_task)
        worker._create_task_run = mock_run_task

        await worker.run(flow_run, configuration)

    network_configuration = mock_run_task.call_args[0][1].get("networkConfiguration")

    # Subnet ids are copied from the vpc
    assert network_configuration == {
        "awsvpcConfiguration": {
            "subnets": [subnet["SubnetId"] for subnet in default_subnets],
            "assignPublicIp": "ENABLED",
            "securityGroups": [],
        }
    }


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize("explicit_network_mode", [True, False])
async def test_network_config_is_empty_without_awsvpc_network_mode(
    aws_credentials: AwsCredentials, explicit_network_mode: bool, flow_run: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        # EC2 uses the 'bridge' network mode by default but we want to have test
        # coverage for when it is set on the task definition
        task_definition={"networkMode": "bridge"} if explicit_network_mode else None,
        # FARGATE requires the 'awsvpc' network mode
        launch_type="EC2",
    )

    async with ECSWorker(work_pool_name="test") as worker:
        # Capture the task run call because moto does not track 'networkConfiguration'
        original_run_task = worker._create_task_run
        mock_run_task = MagicMock(side_effect=original_run_task)
        worker._create_task_run = mock_run_task

        await worker.run(flow_run, configuration)

    network_configuration = mock_run_task.call_args[0][1].get("networkConfiguration")
    assert network_configuration is None


@pytest.mark.usefixtures("ecs_mocks")
async def test_network_config_missing_default_vpc(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    session = aws_credentials.get_boto3_session()
    ec2_client = session.client("ec2")
    ec2_resource = session.resource("ec2")

    # Get the default VPC and its dependencies
    default_vpc_info = ec2_client.describe_vpcs(
        Filters=[{"Name": "isDefault", "Values": ["true"]}]
    )["Vpcs"][0]
    default_vpc_id = default_vpc_info["VpcId"]
    vpc = ec2_resource.Vpc(default_vpc_id)

    # Delete all subnets
    for subnet in vpc.subnets.all():
        subnet.delete()

    # Delete security groups (except default)
    for sg in vpc.security_groups.all():
        if sg.group_name != "default":
            sg.delete()

    # Delete internet gateways
    for igw in vpc.internet_gateways.all():
        vpc.detach_internet_gateway(InternetGatewayId=igw.id)
        igw.delete()

    # Delete route tables (except main)
    for rt in vpc.route_tables.all():
        # Check if route table has main association
        is_main = False
        if rt.associations_attribute:
            for assoc in rt.associations_attribute:
                if isinstance(assoc, dict):
                    if assoc.get("Main", False):
                        is_main = True
                        break
                else:
                    if getattr(assoc, "main", False):
                        is_main = True
                        break
        if not is_main:
            rt.delete()

    # Now delete the VPC
    ec2_client.delete_vpc(VpcId=default_vpc_id)

    configuration = await construct_configuration(aws_credentials=aws_credentials)

    def handle_error(exc_grp: ExceptionGroup):
        exc = exc_grp.exceptions[0]
        assert isinstance(exc, ValueError)
        assert "Failed to find the default VPC" in str(exc)

    with catch({ValueError: handle_error}):
        async with ECSWorker(work_pool_name="test") as worker:
            await worker.run(flow_run, configuration)


@pytest.mark.usefixtures("ecs_mocks")
async def test_network_config_from_vpc_with_no_subnets(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    session = aws_credentials.get_boto3_session()
    ec2_resource = session.resource("ec2")
    vpc = ec2_resource.create_vpc(CidrBlock="172.16.0.0/16")
    vpc.modify_attribute(EnableDnsHostnames={"Value": True})

    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        vpc_id=vpc.id,
    )

    def handle_error(exc_grp: ExceptionGroup):
        exc = exc_grp.exceptions[0]
        assert isinstance(exc, ValueError)
        assert "Failed to find subnets for VPC with ID" in str(exc)

    with catch({ValueError: handle_error}):
        async with ECSWorker(work_pool_name="test") as worker:
            await worker.run(flow_run, configuration)


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize("launch_type", ["FARGATE", "FARGATE_SPOT"])
async def test_bridge_network_mode_raises_on_fargate(
    aws_credentials: AwsCredentials,
    flow_run: FlowRun,
    launch_type: str,
):
    configuration = await construct_configuration_with_job_template(
        aws_credentials=aws_credentials,
        launch_type=launch_type,
        template_overrides=dict(task_definition={"networkMode": "bridge"}),
    )

    def handle_error(exc_grp: ExceptionGroup):
        exc = exc_grp.exceptions[0]
        assert isinstance(exc, ValueError)
        assert (
            "Found network mode 'bridge' which is not compatible with launch type"
            in str(exc)
        )

    with catch({ValueError: handle_error}):
        async with ECSWorker(work_pool_name="test") as worker:
            await worker.run(flow_run, configuration)


orig = botocore.client.BaseClient._make_api_call


def mock_make_api_call(self, operation_name, kwarg):
    if operation_name == "RunTask":
        return {
            "failures": [
                {"arn": "string", "reason": "string", "detail": "string"},
            ]
        }
    return orig(self, operation_name, kwarg)


@pytest.mark.usefixtures("ecs_mocks")
async def test_run_task_error_handling(
    aws_credentials: AwsCredentials,
    flow_run: FlowRun,
    capsys,
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        task_role_arn="test",
    )

    with mock_patch(
        "botocore.client.BaseClient._make_api_call", new=mock_make_api_call
    ):
        async with ECSWorker(work_pool_name="test") as worker:

            def handle_error(exc_grp: ExceptionGroup):
                assert len(exc_grp.exceptions) == 1
                assert isinstance(exc_grp.exceptions[0], RuntimeError)
                assert exc_grp.exceptions[0].args[0] == "Failed to run ECS task: string"

            with catch({RuntimeError: handle_error}):
                await worker.run(flow_run, configuration)


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize(
    "cloudwatch_logs_options,flow_run",
    product(
        [
            {
                "awslogs-stream-prefix": "override-prefix",
                "max-buffer-size": "2m",
            },
            {
                "max-buffer-size": "2m",
            },
        ],
        [
            FlowRun(deployment_id=uuid4(), flow_id=uuid4()),
            FlowRun(deployment_id=None, flow_id=uuid4()),
        ],
    ),
)
async def test_cloudwatch_log_options(
    aws_credentials: AwsCredentials, flow_run: FlowRun, cloudwatch_logs_options: dict
):
    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        configure_cloudwatch_logs=True,
        execution_role_arn="test",
        cloudwatch_logs_options=cloudwatch_logs_options,
    )
    work_pool_name = "test"
    async with ECSWorker(work_pool_name=work_pool_name) as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)

    for container in task_definition["containerDefinitions"]:
        if cloudwatch_logs_options.get("awslogs-stream-prefix"):
            prefix = cloudwatch_logs_options["awslogs-stream-prefix"]
        elif flow_run.deployment_id:
            prefix = f"prefect-logs_{work_pool_name}_{flow_run.deployment_id}"
        else:
            prefix = f"prefect-logs_{work_pool_name}_{flow_run.flow_id}"
        if container["name"] == ECS_DEFAULT_CONTAINER_NAME:
            # Assert that the container has logging configured with user
            # provided options
            assert container["logConfiguration"] == {
                "logDriver": "awslogs",
                "options": {
                    "awslogs-create-group": "true",
                    "awslogs-group": "prefect",
                    "awslogs-region": "us-east-1",
                    "awslogs-stream-prefix": prefix,
                    "max-buffer-size": "2m",
                },
            }
        else:
            # Other containers should not be modified
            assert "logConfiguration" not in container


@pytest.mark.usefixtures("ecs_mocks")
async def test_deregister_task_definition(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        auto_deregister_task_definition=True,
    )

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    task = describe_task(ecs_client, task_arn)
    tags = task.get("tags", {})
    # Will mark for deregistration and the observer will handle deregistration
    assert any(
        tag["key"] == "prefect.io/degregister-task-definition"
        and tag["value"] == "true"
        for tag in tags
    )


@pytest.mark.usefixtures("ecs_mocks")
async def test_deregister_task_definition_does_not_apply_to_linked_arn(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    task_definition_arn = ecs_client.register_task_definition(**TEST_TASK_DEFINITION)[
        "taskDefinition"
    ]["taskDefinitionArn"]

    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        auto_deregister_task_definition=True,
        task_definition_arn=task_definition_arn,
        launch_type="EC2",
    )
    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    describe_task_definition(ecs_client, task)["status"] == "ACTIVE"


@pytest.mark.usefixtures("ecs_mocks")
async def test_match_latest_revision_in_family(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    configuration_1 = await construct_configuration(
        aws_credentials=aws_credentials,
    )

    configuration_2 = await construct_configuration(
        aws_credentials=aws_credentials,
        execution_role_arn="test",
    )

    configuration_3 = await construct_configuration(
        aws_credentials=aws_credentials,
        match_latest_revision_in_family=True,
        execution_role_arn="test",
    )

    # Let the first worker run and register two task definitions
    async with ECSWorker(work_pool_name="test") as worker:
        await worker.run(flow_run, configuration_1)
        result_1 = await worker.run(flow_run, configuration_2)

    # Start a new worker with an empty cache
    async with ECSWorker(work_pool_name="test") as worker:
        result_2 = await worker.run(flow_run, configuration_3)

    _, task_arn_1 = parse_identifier(result_1.identifier)

    _, task_arn_2 = parse_identifier(result_2.identifier)

    task_1 = describe_task(ecs_client, task_arn_1)
    task_2 = describe_task(ecs_client, task_arn_2)

    assert task_1["taskDefinitionArn"] == task_2["taskDefinitionArn"]
    assert task_2["taskDefinitionArn"].endswith(":2")


@pytest.mark.usefixtures("ecs_mocks")
async def test_match_latest_revision_in_family_custom_family(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    configuration_1 = await construct_configuration(
        aws_credentials=aws_credentials,
        family="test-family",
    )

    configuration_2 = await construct_configuration(
        aws_credentials=aws_credentials,
        execution_role_arn="test",
        family="test-family",
    )

    configuration_3 = await construct_configuration(
        aws_credentials=aws_credentials,
        match_latest_revision_in_family=True,
        execution_role_arn="test",
        family="test-family",
    )

    # Let the first worker run and register two task definitions
    async with ECSWorker(work_pool_name="test") as worker:
        await worker.run(flow_run, configuration_1)
        result_1 = await worker.run(flow_run, configuration_2)

    # Start a new worker with an empty cache
    async with ECSWorker(work_pool_name="test") as worker:
        result_2 = await worker.run(flow_run, configuration_3)

    _, task_arn_1 = parse_identifier(result_1.identifier)

    _, task_arn_2 = parse_identifier(result_2.identifier)

    task_1 = describe_task(ecs_client, task_arn_1)
    task_2 = describe_task(ecs_client, task_arn_2)

    assert task_1["taskDefinitionArn"] == task_2["taskDefinitionArn"]
    assert task_2["taskDefinitionArn"].endswith(":2")


@pytest.mark.usefixtures("ecs_mocks")
async def test_worker_caches_registered_task_definitions(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials, command="echo test"
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result_1 = await worker.run(flow_run, configuration)
        result_2 = await worker.run(flow_run, configuration)

    _, task_arn_1 = parse_identifier(result_1.identifier)
    task_1 = describe_task(ecs_client, task_arn_1)
    _, task_arn_2 = parse_identifier(result_2.identifier)
    task_2 = describe_task(ecs_client, task_arn_2)

    assert task_1["taskDefinitionArn"] == task_2["taskDefinitionArn"]
    assert flow_run.deployment_id in _TASK_DEFINITION_CACHE


@pytest.mark.usefixtures("ecs_mocks")
async def test_worker_caches_registered_task_definitions_no_deployment(
    aws_credentials: AwsCredentials, flow_run_no_deployment: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials, command="echo test"
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result_1 = await worker.run(flow_run_no_deployment, configuration)
        result_2 = await worker.run(flow_run_no_deployment, configuration)

    _, task_arn_1 = parse_identifier(result_1.identifier)
    task_1 = describe_task(ecs_client, task_arn_1)
    _, task_arn_2 = parse_identifier(result_2.identifier)
    task_2 = describe_task(ecs_client, task_arn_2)

    assert task_1["taskDefinitionArn"] == task_2["taskDefinitionArn"]
    assert flow_run_no_deployment.flow_id in _TASK_DEFINITION_CACHE


@pytest.mark.usefixtures("ecs_mocks")
async def test_worker_cache_miss_for_registered_task_definitions_clears_from_cache(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials, command="echo test"
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result_1 = await worker.run(flow_run, configuration)

        # Fail to retrieve from cache on next run
        worker._retrieve_task_definition = MagicMock(
            side_effect=RuntimeError("failure retrieving from cache")
        )

        result_2 = await worker.run(flow_run, configuration)

    _, task_arn_1 = parse_identifier(result_1.identifier)
    task_1 = describe_task(ecs_client, task_arn_1)
    _, task_arn_2 = parse_identifier(result_2.identifier)
    task_2 = describe_task(ecs_client, task_arn_2)

    assert task_1["taskDefinitionArn"] != task_2["taskDefinitionArn"]
    assert task_1["taskDefinitionArn"] not in _TASK_DEFINITION_CACHE.values(), (
        _TASK_DEFINITION_CACHE
    )


@pytest.mark.usefixtures("ecs_mocks")
async def test_worker_task_definition_cache_is_per_deployment_id_or_flow_id(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials, command="echo test"
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result_1 = await worker.run(flow_run, configuration)
        result_2 = await worker.run(
            flow_run.model_copy(update=dict(deployment_id=uuid4())), configuration
        )
        result_3 = await worker.run(
            flow_run.model_copy(update=dict(deployment_id=None)), configuration
        )

    _, task_arn_1 = parse_identifier(result_1.identifier)
    task_1 = describe_task(ecs_client, task_arn_1)
    _, task_arn_2 = parse_identifier(result_2.identifier)
    task_2 = describe_task(ecs_client, task_arn_2)
    _, task_arn_3 = parse_identifier(result_3.identifier)
    task_3 = describe_task(ecs_client, task_arn_3)

    assert (
        task_1["taskDefinitionArn"]
        != task_2["taskDefinitionArn"]
        != task_3["taskDefinitionArn"]
    )


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize(
    "overrides",
    [{"image": "new-image"}, {"configure_cloudwatch_logs": True}, {"family": "foobar"}],
)
async def test_worker_task_definition_cache_miss_on_config_changes(
    aws_credentials: AwsCredentials, flow_run: FlowRun, overrides: dict
):
    configuration_1 = await construct_configuration(
        aws_credentials=aws_credentials, execution_role_arn="test"
    )
    configuration_2 = await construct_configuration(
        aws_credentials=aws_credentials, execution_role_arn="test", **overrides
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result_1 = await worker.run(flow_run, configuration_1)
        result_2 = await worker.run(flow_run, configuration_2)

    _, task_arn_1 = parse_identifier(result_1.identifier)
    task_1 = describe_task(ecs_client, task_arn_1)
    _, task_arn_2 = parse_identifier(result_2.identifier)
    task_2 = describe_task(ecs_client, task_arn_2)

    assert task_1["taskDefinitionArn"] != task_2["taskDefinitionArn"]


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize(
    "overrides",
    [{"image": "new-image"}, {"configure_cloudwatch_logs": True}, {"family": "foobar"}],
)
async def test_worker_task_definition_cache_miss_on_deregistered(
    aws_credentials: AwsCredentials, flow_run: FlowRun, overrides: dict
):
    configuration_1 = await construct_configuration(
        aws_credentials=aws_credentials,
        execution_role_arn="test",
        auto_deregister_task_defininition=True,
    )
    configuration_2 = await construct_configuration(
        aws_credentials=aws_credentials, execution_role_arn="test", **overrides
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result_1 = await worker.run(flow_run, configuration_1)
        result_2 = await worker.run(flow_run, configuration_2)

    _, task_arn_1 = parse_identifier(result_1.identifier)
    task_1 = describe_task(ecs_client, task_arn_1)
    _, task_arn_2 = parse_identifier(result_2.identifier)
    task_2 = describe_task(ecs_client, task_arn_2)

    assert task_1["taskDefinitionArn"] != task_2["taskDefinitionArn"]


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize("launch_type", ["EC2", "FARGATE"])
@pytest.mark.parametrize(
    "overrides",
    [
        {"env": {"FOO": "BAR"}},
        {"command": "test"},
        {"labels": {"FOO": "BAR"}},
        {"cluster": "test"},
        {"task_role_arn": "test"},
        # Note: null environment variables can cause override, but not when missing
        # from the base task definition
        {"env": {"FOO": None}},
        # The following would not result in a copy when using a task_definition_arn
        # but will be eagerly set on the new task definition and result in a cache miss
        # {"cpu": 2048},
        # {"memory": 4096},
        # {"execution_role_arn": "test"},
        # {"launch_type": "EXTERNAL"},
    ],
    ids=lambda item: str(sorted(list(set(item.keys())))),
)
async def test_worker_task_definition_cache_hit_on_config_changes(
    aws_credentials: AwsCredentials,
    flow_run: FlowRun,
    overrides: dict,
    launch_type: str,
):
    """
    Any of these overrides should be configured at runtime and not cause a cache miss
    and for a new task definition to be registered
    """
    configuration_1 = await construct_configuration(
        aws_credentials=aws_credentials,
        execution_role_arn="test",
        launch_type=launch_type,
    )
    configuration_2 = await construct_configuration(
        aws_credentials=aws_credentials,
        execution_role_arn="test",
        launch_type=launch_type,
        **overrides,
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    if "cluster" in overrides:
        create_test_ecs_cluster(ecs_client, overrides["cluster"])
        add_ec2_instance_to_ecs_cluster(session, overrides["cluster"])

    async with ECSWorker(work_pool_name="test") as worker:
        result_1 = await worker.run(flow_run, configuration_1)
        result_2 = await worker.run(flow_run, configuration_2)

    _, task_arn_1 = parse_identifier(result_1.identifier)
    task_1 = describe_task(ecs_client, task_arn_1)
    _, task_arn_2 = parse_identifier(result_2.identifier)
    task_2 = describe_task(ecs_client, task_arn_2)

    assert task_1["taskDefinitionArn"] == task_2["taskDefinitionArn"], (
        "The existing task definition should be used"
    )


@pytest.mark.usefixtures("ecs_mocks")
async def test_user_defined_container_command_in_task_definition_template(
    aws_credentials: AwsCredentials,
    flow_run: FlowRun,
):
    configuration = await construct_configuration_with_job_template(
        template_overrides=dict(
            task_definition={
                "containerDefinitions": [
                    {"name": ECS_DEFAULT_CONTAINER_NAME, "command": ["echo", "hello"]}
                ]
            }
        ),
        aws_credentials=aws_credentials,
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)

    container_overrides = _get_container(
        task["overrides"]["containerOverrides"], ECS_DEFAULT_CONTAINER_NAME
    )
    assert "command" not in container_overrides


@pytest.mark.usefixtures("ecs_mocks")
async def test_user_defined_container_command_in_task_definition_template_override(
    aws_credentials: AwsCredentials,
    flow_run: FlowRun,
):
    configuration = await construct_configuration_with_job_template(
        template_overrides=dict(
            task_definition={
                "containerDefinitions": [
                    {"name": ECS_DEFAULT_CONTAINER_NAME, "command": ["echo", "hello"]}
                ]
            }
        ),
        aws_credentials=aws_credentials,
        command="echo goodbye",
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)

    container_overrides = _get_container(
        task["overrides"]["containerOverrides"], ECS_DEFAULT_CONTAINER_NAME
    )
    assert container_overrides["command"] == ["echo", "goodbye"]


@pytest.mark.usefixtures("ecs_mocks")
async def test_user_defined_container_in_task_definition_template(
    aws_credentials: AwsCredentials,
    flow_run: FlowRun,
):
    configuration = await construct_configuration_with_job_template(
        template_overrides=dict(
            task_definition={
                "containerDefinitions": [
                    {
                        "name": "user-defined-name",
                        "command": ["echo", "hello"],
                        "image": "alpine",
                    }
                ]
            },
        ),
        aws_credentials=aws_credentials,
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)

    user_container = _get_container(
        task_definition["containerDefinitions"], "user-defined-name"
    )
    assert user_container is not None, "The user-specified container should be present"
    assert user_container["command"] == ["echo", "hello"]
    assert user_container["image"] == "alpine", "The image should be left unchanged"

    default_container = _get_container(
        task_definition["containerDefinitions"], ECS_DEFAULT_CONTAINER_NAME
    )
    assert default_container is None, "The default container should be not be added"

    container_overrides = task["overrides"]["containerOverrides"]
    user_container_overrides = _get_container(container_overrides, "user-defined-name")
    default_container_overrides = _get_container(
        container_overrides, ECS_DEFAULT_CONTAINER_NAME
    )
    assert user_container_overrides, (
        "The user defined container should be included in overrides"
    )
    assert default_container_overrides is None, (
        "The default container should not be in overrides"
    )


@pytest.mark.usefixtures("ecs_mocks")
async def test_user_defined_container_image_in_task_definition_template(
    aws_credentials: AwsCredentials,
    flow_run: FlowRun,
):
    configuration = await construct_configuration_with_job_template(
        template_overrides=dict(
            task_definition={
                "containerDefinitions": [
                    {
                        "name": ECS_DEFAULT_CONTAINER_NAME,
                        "image": "use-this-image",
                    }
                ]
            },
        ),
        aws_credentials=aws_credentials,
        image="not-templated-anywhere",
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)

    prefect_container = _get_container(
        task_definition["containerDefinitions"], ECS_DEFAULT_CONTAINER_NAME
    )
    assert prefect_container["image"] == "use-this-image", (
        "The image from the task definition should be used"
    )


@pytest.mark.usefixtures("ecs_mocks")
@pytest.mark.parametrize("launch_type", ["EC2", "FARGATE", "FARGATE_SPOT"])
async def test_user_defined_cpu_and_memory_in_task_definition_template(
    aws_credentials: AwsCredentials, launch_type: str, flow_run: FlowRun
):
    configuration = await construct_configuration_with_job_template(
        template_overrides=dict(
            task_definition={
                "containerDefinitions": [
                    {
                        "name": ECS_DEFAULT_CONTAINER_NAME,
                        "command": "{{ command }}",
                        "image": "{{ image }}",
                        "cpu": 2048,
                        "memory": 4096,
                    }
                ],
                "cpu": "4096",
                "memory": "8192",
            },
        ),
        aws_credentials=aws_credentials,
        launch_type=launch_type,
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)

    container_definition = _get_container(
        task_definition["containerDefinitions"], ECS_DEFAULT_CONTAINER_NAME
    )
    overrides = task["overrides"]
    container_overrides = _get_container(
        overrides["containerOverrides"], ECS_DEFAULT_CONTAINER_NAME
    )

    # All of these values should be retained
    assert container_definition["cpu"] == 2048
    assert container_definition["memory"] == 4096
    assert task_definition["cpu"] == str(4096)
    assert task_definition["memory"] == str(8192)

    # No values should be overridden at runtime
    assert overrides.get("cpu") is None
    assert overrides.get("memory") is None
    assert container_overrides.get("cpu") is None
    assert container_overrides.get("memory") is None


@pytest.mark.usefixtures("ecs_mocks")
async def test_user_defined_environment_variables_in_task_definition_template(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    configuration = await construct_configuration_with_job_template(
        template_overrides=dict(
            task_definition={
                "containerDefinitions": [
                    {
                        "name": ECS_DEFAULT_CONTAINER_NAME,
                        "environment": [
                            {"name": "BAR", "value": "FOO"},
                            {"name": "OVERRIDE", "value": "OLD"},
                        ],
                    }
                ],
            },
        ),
        aws_credentials=aws_credentials,
        env={"FOO": "BAR", "OVERRIDE": "NEW"},
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)

    prefect_container_definition = _get_container(
        task_definition["containerDefinitions"], ECS_DEFAULT_CONTAINER_NAME
    )

    assert prefect_container_definition["environment"] == [
        {"name": "BAR", "value": "FOO"},
        {"name": "OVERRIDE", "value": "OLD"},
    ]

    prefect_container_overrides = _get_container(
        task["overrides"]["containerOverrides"], ECS_DEFAULT_CONTAINER_NAME
    )
    assert prefect_container_overrides.get("environment") == [
        {"name": "FOO", "value": "BAR"},
        {"name": "OVERRIDE", "value": "NEW"},
    ]


@pytest.mark.usefixtures("ecs_mocks")
async def test_user_defined_capacity_provider_strategy(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        capacity_provider_strategy=[
            {"base": 0, "weight": 1, "capacityProvider": "r6i.large"}
        ],
    )
    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        # Capture the task run call because moto does not track
        # 'capacityProviderStrategy'
        original_run_task = worker._create_task_run
        mock_run_task = MagicMock(side_effect=original_run_task)
        worker._create_task_run = mock_run_task

        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    assert not task.get("launchType")
    # Instead, it requires a capacity provider strategy but this is not supported
    # by moto and is not present on the task even when provided so we assert on the
    # mock call to ensure it is sent
    assert mock_run_task.call_args[0][1].get("capacityProviderStrategy") == [
        {"base": 0, "weight": 1, "capacityProvider": "r6i.large"},
    ]


@pytest.mark.usefixtures("ecs_mocks")
async def test_user_defined_capacity_provider_strategy_with_launch_type(
    aws_credentials: AwsCredentials, flow_run: FlowRun, caplog
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        launch_type="EC2",
        capacity_provider_strategy=[
            {
                "weight": 1,
                "base": 0,
                "capacityProvider": "user-defined-capacity-provider",
            }
        ],
    )

    async with ECSWorker(work_pool_name="test") as worker:
        # Capture the task run call because moto does not track 'capacityProviderStrategy'
        original_run_task = worker._create_task_run
        mock_run_task = MagicMock(side_effect=original_run_task)
        worker._create_task_run = mock_run_task

        result = await worker.run(flow_run, configuration)

    assert result.status_code == 0

    # Assert the warning was emitted and that launchType was removed
    assert (
        "Found capacityProviderStrategy. Removing launchType from task run request."
        in caplog.text
    )

    # launchType should be omitted and capacity provider strategy should be present
    run_kwargs = mock_run_task.call_args[0][1]
    assert "launchType" not in run_kwargs
    assert run_kwargs.get("capacityProviderStrategy") == [
        {"weight": 1, "base": 0, "capacityProvider": "user-defined-capacity-provider"}
    ]


@pytest.mark.usefixtures("ecs_mocks")
async def test_ec2_task_definition_with_null_launch_type_uses_cluster_capacity_provider(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    """
    Test that EC2-compatible task definitions can run without an explicit launchType
    to allow AWS cluster default capacity providers to work.

    Regression test for https://github.com/PrefectHQ/prefect/issues/19627
    """
    # Create an EC2-compatible task definition
    ec2_task_definition = {
        "containerDefinitions": [
            {
                "cpu": 1024,
                "image": "prefecthq/prefect:3-latest",
                "memory": 2048,
                "name": "prefect",
            },
        ],
        "family": "prefect-ec2",
        "requiresCompatibilities": ["EC2"],
    }

    # Configure with the EC2 task definition and no launch_type
    # (simulating user setting launch_type to null to use cluster capacity provider)
    configuration = await construct_configuration_with_job_template(
        template_overrides=dict(
            task_definition=ec2_task_definition,
        ),
        aws_credentials=aws_credentials,
    )
    # Explicitly clear launch_type to simulate user setting it to null
    configuration.task_run_request["launchType"] = None

    async with ECSWorker(work_pool_name="test") as worker:
        original_run_task = worker._create_task_run
        mock_run_task = MagicMock(side_effect=original_run_task)
        worker._create_task_run = mock_run_task

        result = await worker.run(flow_run, configuration)

    assert result.status_code == 0

    # launchType should NOT be in the request, allowing AWS cluster
    # default capacity provider to be used
    run_kwargs = mock_run_task.call_args[0][1]
    assert "launchType" not in run_kwargs


@pytest.mark.usefixtures("ecs_mocks")
async def test_user_defined_environment_variables_in_task_run_request_template(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    configuration = await construct_configuration_with_job_template(
        template_overrides=dict(
            task_run_request={
                "overrides": {
                    "containerOverrides": [
                        {
                            "name": ECS_DEFAULT_CONTAINER_NAME,
                            "environment": [
                                {"name": "BAR", "value": "FOO"},
                                {"name": "OVERRIDE", "value": "OLD"},
                            ],
                        }
                    ],
                },
            },
        ),
        aws_credentials=aws_credentials,
        env={"FOO": "BAR", "OVERRIDE": "NEW", "UNSET": None},
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)

    prefect_container_definition = _get_container(
        task_definition["containerDefinitions"], ECS_DEFAULT_CONTAINER_NAME
    )

    assert prefect_container_definition["environment"] == [], (
        "No environment variables in the task definition"
    )

    prefect_container_overrides = _get_container(
        task["overrides"]["containerOverrides"], ECS_DEFAULT_CONTAINER_NAME
    )
    assert prefect_container_overrides.get("environment") == [
        {"name": "BAR", "value": "FOO"},
        {"name": "FOO", "value": "BAR"},
        {"name": "OVERRIDE", "value": "NEW"},
    ]


@pytest.mark.usefixtures("ecs_mocks")
async def test_user_defined_tags_in_task_run_request_template(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    configuration = await construct_configuration_with_job_template(
        template_overrides=dict(
            task_run_request={
                "tags": [
                    {"key": "BAR", "value": "FOO"},
                    {"key": "OVERRIDE", "value": "OLD"},
                ]
            },
        ),
        aws_credentials=aws_credentials,
        labels={"FOO": "BAR", "OVERRIDE": "NEW"},
    )

    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    assert task.get("tags") == [
        {"key": "BAR", "value": "FOO"},
        {"key": "FOO", "value": "BAR"},
        {"key": "OVERRIDE", "value": "NEW"},
    ]


async def test_retry_on_failed_task_start(
    aws_credentials: AwsCredentials, flow_run, ecs_mocks
):
    run_task_mock = MagicMock(return_value={"failures": [{"reason": "Just cause"}]})

    configuration = await construct_configuration(
        aws_credentials=aws_credentials, command="echo test"
    )

    ecs_client = configuration.aws_credentials.get_client("ecs")
    original_run_task = ecs_client.run_task
    ecs_client.run_task = run_task_mock

    try:
        with catch({RuntimeError: lambda exc_group: None}):
            async with ECSWorker(work_pool_name="test") as worker:
                await worker.run(flow_run, configuration)

        assert run_task_mock.call_count == 3
    finally:
        ecs_client.run_task = original_run_task


async def test_mask_sensitive_env_values():
    task_run_request = {
        "overrides": {
            "containerOverrides": [
                {
                    "environment": [
                        {"name": "PREFECT_API_KEY", "value": "SeNsItiVe VaLuE"},
                        {"name": "PREFECT_API_URL", "value": "NORMAL_VALUE"},
                    ]
                }
            ]
        }
    }

    res = mask_sensitive_env_values(task_run_request, ["PREFECT_API_KEY"], 3, "***")
    assert (
        res["overrides"]["containerOverrides"][0]["environment"][0]["value"] == "SeN***"
    )
    assert (
        res["overrides"]["containerOverrides"][0]["environment"][1]["value"]
        == "NORMAL_VALUE"
    )


@pytest.mark.usefixtures("ecs_mocks")
async def test_get_or_generate_family(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
    )

    work_pool_name = "test"
    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")
    family = f"{ECS_DEFAULT_FAMILY}_{work_pool_name}_{flow_run.deployment_id}"

    async with ECSWorker(work_pool_name=work_pool_name) as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)
    assert task_definition["family"] == family


@pytest.mark.usefixtures("ecs_mocks")
async def test_get_or_generate_family_no_deployment(
    aws_credentials: AwsCredentials, flow_run_no_deployment: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
    )

    work_pool_name = "test"
    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")
    family = f"{ECS_DEFAULT_FAMILY}_{work_pool_name}_{flow_run_no_deployment.flow_id}"

    async with ECSWorker(work_pool_name="test") as worker:
        result = await worker.run(flow_run_no_deployment, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)
    assert task_definition["family"] == family


@pytest.mark.usefixtures("ecs_mocks")
async def test_task_definitions_equal_logs_differences(caplog):
    taskdef_1 = {
        "containerDefinitions": [
            {
                "name": "prefect",
                "image": "prefecthq/prefect:2-latest",
                "cpu": 256,
                "memory": 512,
                "essential": True,
            }
        ],
        "family": "test-family",
        "networkMode": "bridge",
    }

    taskdef_2 = {
        "containerDefinitions": [
            {
                "name": "prefect",
                "image": "prefecthq/prefect:3-latest",  # Different image version
                "cpu": 512,  # Different CPU
                "memory": 512,
                "essential": True,
            }
        ],
        "family": "test-family",
        "networkMode": "bridge",
        "executionRoleArn": "test-role",  # Additional key
    }

    logger = logging.getLogger("prefect.workers.ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        with caplog.at_level(logging.DEBUG, logger="prefect.workers.ecs"):
            result = worker._task_definitions_equal(taskdef_1, taskdef_2, logger)

            assert result is False

            assert (
                "Keys only in retrieved task definition: {'executionRoleArn'}"
                in caplog.text
            )
            assert "Value differs for key 'containerDefinitions'" in caplog.text

            assert "Generated:  " in caplog.text
            assert "Retrieved: " in caplog.text
            assert "prefecthq/prefect:2-latest" in caplog.text
            assert "prefecthq/prefect:3-latest" in caplog.text
            assert "256" in caplog.text
            assert "512" in caplog.text


async def test_task_definitions_equal_environment_variable_ordering():
    """Test that task definitions with environment variables in different order are considered equal."""
    # Simulate task definition created by Prefect
    taskdef_generated = {
        "containerDefinitions": [
            {
                "name": "prefect",
                "image": "prefecthq/prefect:2-latest",
                "cpu": 256,
                "memory": 512,
                "essential": True,
                "environment": [
                    {"name": "PREFECT_API_URL", "value": "https://api.prefect.cloud"},
                    {"name": "ENVIRONMENT", "value": "production"},
                    {"name": "DATABASE_URL", "value": "postgresql://..."},
                ],
            }
        ],
        "family": "test-family",
        "networkMode": "bridge",
    }

    # Simulate task definition retrieved from AWS ECS API (with reordered environment variables)
    taskdef_from_aws = {
        "containerDefinitions": [
            {
                "name": "prefect",
                "image": "prefecthq/prefect:2-latest",
                "cpu": 256,
                "memory": 512,
                "essential": True,
                "environment": [
                    {"name": "DATABASE_URL", "value": "postgresql://..."},
                    {"name": "PREFECT_API_URL", "value": "https://api.prefect.cloud"},
                    {"name": "ENVIRONMENT", "value": "production"},
                ],
            }
        ],
        "family": "test-family",
        "networkMode": "bridge",
    }

    logger = logging.getLogger("prefect.workers.ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        # This should return True since the task definitions are semantically identical
        result = worker._task_definitions_equal(
            taskdef_generated, taskdef_from_aws, logger
        )

        assert result is True, (
            "Task definitions with reordered environment variables should be considered equal"
        )


async def test_task_definitions_equal_secrets_ordering():
    """Test that task definitions with secrets in different order are considered equal."""
    # Task definition with secrets in one order
    taskdef_1 = {
        "containerDefinitions": [
            {
                "name": "prefect",
                "image": "prefecthq/prefect:2-latest",
                "cpu": 256,
                "memory": 512,
                "essential": True,
                "secrets": [
                    {
                        "name": "DB_PASSWORD",
                        "valueFrom": "arn:aws:secretsmanager:us-east-1:123456789012:secret:db-pass",
                    },
                    {
                        "name": "API_KEY",
                        "valueFrom": "arn:aws:secretsmanager:us-east-1:123456789012:secret:api-key",
                    },
                ],
            }
        ],
        "family": "test-family",
        "networkMode": "bridge",
    }

    # Task definition with secrets in different order
    taskdef_2 = {
        "containerDefinitions": [
            {
                "name": "prefect",
                "image": "prefecthq/prefect:2-latest",
                "cpu": 256,
                "memory": 512,
                "essential": True,
                "secrets": [
                    {
                        "name": "API_KEY",
                        "valueFrom": "arn:aws:secretsmanager:us-east-1:123456789012:secret:api-key",
                    },
                    {
                        "name": "DB_PASSWORD",
                        "valueFrom": "arn:aws:secretsmanager:us-east-1:123456789012:secret:db-pass",
                    },
                ],
            }
        ],
        "family": "test-family",
        "networkMode": "bridge",
    }

    logger = logging.getLogger("prefect.workers.ecs")

    async with ECSWorker(work_pool_name="test") as worker:
        # This should return True since the task definitions are semantically identical
        result = worker._task_definitions_equal(taskdef_1, taskdef_2, logger)

        assert result is True, (
            "Task definitions with reordered secrets should be considered equal"
        )


@pytest.mark.usefixtures("ecs_mocks", "prefect_api_key_setting")
async def test_run_task_with_api_key(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
    )
    configuration.prepare_for_flow_run(flow_run)

    work_pool_name = "test"
    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")
    async with ECSWorker(work_pool_name=work_pool_name) as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)

    assert any(
        env
        for env in task["overrides"]["containerOverrides"][0]["environment"]
        if env["name"] == "PREFECT_API_KEY" and env["value"] == "test-api-key"
    )


@pytest.mark.usefixtures("ecs_mocks", "prefect_api_key_setting")
async def test_run_task_with_api_key_secret_arn(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        prefect_api_key_secret_arn="arn:aws:secretsmanager:us-east-1:123456789012:secret:prefect-worker-api-key",
    )
    configuration.prepare_for_flow_run(flow_run)

    work_pool_name = "test"
    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")
    async with ECSWorker(work_pool_name=work_pool_name) as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)

    assert task_definition["containerDefinitions"][0]["secrets"] == [
        {
            "name": "PREFECT_API_KEY",
            "valueFrom": "arn:aws:secretsmanager:us-east-1:123456789012:secret:prefect-worker-api-key",
        }
    ]

    assert not any(
        env
        for env in task["overrides"]["containerOverrides"][0]["environment"]
        if env["name"] == "PREFECT_API_KEY"
    )


@pytest.fixture
def prefect_api_auth_string_setting():
    with temporary_settings({PREFECT_API_AUTH_STRING: "test-auth-string"}):
        yield


@pytest.mark.usefixtures("ecs_mocks", "prefect_api_auth_string_setting")
async def test_run_task_with_api_auth_string_secret_arn(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        prefect_api_auth_string_secret_arn="arn:aws:secretsmanager:us-east-1:123456789012:secret:prefect-worker-api-auth-string",
    )
    configuration.prepare_for_flow_run(flow_run)

    work_pool_name = "test"
    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")
    async with ECSWorker(work_pool_name=work_pool_name) as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)

    assert {
        "name": "PREFECT_API_AUTH_STRING",
        "valueFrom": "arn:aws:secretsmanager:us-east-1:123456789012:secret:prefect-worker-api-auth-string",
    } in task_definition["containerDefinitions"][0]["secrets"]

    assert not any(
        env
        for env in task["overrides"]["containerOverrides"][0]["environment"]
        if env["name"] == "PREFECT_API_AUTH_STRING"
    )


@pytest.fixture
def prefect_both_secrets_setting():
    with temporary_settings(
        {PREFECT_API_KEY: "test-api-key", PREFECT_API_AUTH_STRING: "test-auth-string"}
    ):
        yield


@pytest.mark.usefixtures("ecs_mocks", "prefect_both_secrets_setting")
async def test_run_task_with_both_secrets(
    aws_credentials: AwsCredentials, flow_run: FlowRun
):
    configuration = await construct_configuration(
        aws_credentials=aws_credentials,
        prefect_api_key_secret_arn="arn:aws:secretsmanager:us-east-1:123456789012:secret:prefect-worker-api-key",
        prefect_api_auth_string_secret_arn="arn:aws:secretsmanager:us-east-1:123456789012:secret:prefect-worker-api-auth-string",
    )
    configuration.prepare_for_flow_run(flow_run)

    work_pool_name = "test"
    session = aws_credentials.get_boto3_session()
    ecs_client = session.client("ecs")
    async with ECSWorker(work_pool_name=work_pool_name) as worker:
        result = await worker.run(flow_run, configuration)

    _, task_arn = parse_identifier(result.identifier)

    task = describe_task(ecs_client, task_arn)
    task_definition = describe_task_definition(ecs_client, task)

    expected_secrets = [
        {
            "name": "PREFECT_API_KEY",
            "valueFrom": "arn:aws:secretsmanager:us-east-1:123456789012:secret:prefect-worker-api-key",
        },
        {
            "name": "PREFECT_API_AUTH_STRING",
            "valueFrom": "arn:aws:secretsmanager:us-east-1:123456789012:secret:prefect-worker-api-auth-string",
        },
    ]

    actual_secrets = task_definition["containerDefinitions"][0]["secrets"]
    assert len(actual_secrets) == 2
    for expected_secret in expected_secrets:
        assert expected_secret in actual_secrets

    assert not any(
        env
        for env in task["overrides"]["containerOverrides"][0]["environment"]
        if env["name"] in ["PREFECT_API_KEY", "PREFECT_API_AUTH_STRING"]
    )
