"""Test the runner with local functions.

The functions should behave in the same way on the Argo cluster, meaning annotations
and import logic should be taken into account. The functions are not required to be a
part of a Workflow when running locally.

The tests will usually need to set the experimental feature environment variables which
can be done through monkeypatch.setenv. This is because the runner code acts as if it
is running on Argo, where the global_config will not contain the experimental features.
"""

import importlib
import json
import sys
from pathlib import Path
from typing import Any, Dict, List, Literal
from unittest.mock import MagicMock, patch

import pytest

import hera.workflows.artifact as artifact_module
import tests.helper as test_module
from hera.shared._pydantic import _PYDANTIC_VERSION
from hera.shared.serialization import serialize
from hera.workflows._runner.util import _run, _runner, create_module_string
from hera.workflows.io.v1 import Output as OutputV1

try:
    from hera.workflows.io.v2 import Output as OutputV2
except ImportError:
    from hera.workflows.io.v1 import Output as OutputV2


@pytest.mark.parametrize(
    "entrypoint,kwargs_list,expected_output",
    (
        pytest.param(
            "tests.script_runner.parameter_inputs:no_type_parameter",
            [{"name": "my_anything", "value": "test"}],
            "test",
            id="no-type-string",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:no_type_parameter",
            [{"name": "my_anything", "value": "1"}],
            1,
            id="no-type-int",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:no_type_parameter",
            [{"name": "my_anything", "value": "null"}],
            None,
            id="no-type-none",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:no_type_parameter",
            [{"name": "my_anything", "value": "true"}],
            True,
            id="no-type-bool",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:no_type_parameter",
            [{"name": "my_anything", "value": "[]"}],
            [],
            id="no-type-list",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:no_type_parameter",
            [{"name": "my_anything", "value": "{}"}],
            {},
            id="no-type-dict",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:str_or_int_parameter",
            [{"name": "my_str_or_int", "value": "hi there"}],
            "type given: str",
            id="str-or-int-given-str",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:str_or_int_parameter",
            [{"name": "my_str_or_int", "value": "3"}],
            "type given: int",
            id="str-or-int-given-int",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:str_literal",
            [{"name": "my_literal", "value": "1"}],
            "type given: str",
            id="str-literal",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:str_parameter_expects_jsonstr_dict",
            [{"name": "my_json_str", "value": json.dumps({"my": "dict"})}],
            {"my": "dict"},
            id="str-json-param-as-dict",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:str_parameter_expects_jsonstr_list",
            [{"name": "my_json_str", "value": json.dumps([{"my": "dict"}])}],
            [{"my": "dict"}],
            id="str-json-param-as-list",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:annotated_str_literal",
            [{"name": "my_literal", "value": "1"}],
            "type given: str",
            id="annotated-str-literal",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:annotated_str_literal_unrelated",
            [{"name": "my_literal", "value": "1"}],
            "type given: str",
            id="annotated-str-literal-unrelated-metadata",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:annotated_str_parameter_expects_jsonstr_dict",
            [{"name": "my_json_str", "value": json.dumps({"my": "dict"})}],
            {"my": "dict"},
            id="str-json-annotated-param-as-dict",
        ),
    ),
)
def test_parameter_loading(
    entrypoint: str,
    kwargs_list: List[Dict[str, str]],
    expected_output: Any,
):
    # WHEN
    output = _runner(entrypoint, kwargs_list)

    # THEN
    assert output == expected_output


@pytest.mark.parametrize(
    "entrypoint,kwargs_list,expected_output",
    [
        (
            "examples.workflows.hera_runner.typed_script_input_output:my_function",
            [{"name": "input", "value": '{"a": 2, "b": "bar", "c": 42}'}],
            '{"output": [{"a": 2, "b": "bar", "c": 42}]}',
        ),
        (
            "examples.workflows.hera_runner.typed_script_input_output:another_function",
            [{"name": "inputs", "value": '[{"a": 2, "b": "bar", "c": 42}, {"a": 2, "b": "bar", "c": 42.0}]'}],
            '{"output": [{"a": 2, "b": "bar", "c": 42}, {"a": 2, "b": "bar", "c": 42.0}]}',
        ),
        (
            "examples.workflows.hera_runner.typed_script_input_output:str_function",
            [{"name": "input", "value": '{"a": 2, "b": "bar", "c": 42}'}],
            '{"output": [{"a": 2, "b": "bar", "c": 42}]}',
        ),
        (
            "examples.workflows.hera_runner.typed_script_input_output:function_kebab",
            [
                {"name": "a-but-kebab", "value": "3"},
                {"name": "b-but-kebab", "value": "bar"},
                {"name": "c-but-kebab", "value": "42.0"},
            ],
            '{"output": [{"a": 3, "b": "bar", "c": 42.0}]}',
        ),
        (
            "examples.workflows.hera_runner.typed_script_input_output:function_kebab_object",
            [{"name": "input-value", "value": '{"a": 3, "b": "bar", "c": "abc"}'}],
            '{"output": [{"a": 3, "b": "bar", "c": "abc"}]}',
        ),
    ],
)
def test_runner_parameter_inputs(
    entrypoint,
    kwargs_list: List[Dict[str, str]],
    expected_output,
):
    # WHEN
    output = _runner(entrypoint, kwargs_list)
    # THEN
    assert serialize(output) == expected_output


@pytest.mark.parametrize(
    "entrypoint,kwargs_list,expected_output,pydantic_mode",
    [
        pytest.param(
            "tests.script_runner.parameter_inputs:annotated_basic_types",
            [{"name": "a-but-kebab", "value": "3"}, {"name": "b-but-kebab", "value": "bar"}],
            '{"output": [{"a": 3, "b": "bar"}]}',
            _PYDANTIC_VERSION,
            id="basic-test",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:annotated_basic_types",
            [{"name": "a-but-kebab", "value": "3"}, {"name": "b-but-kebab", "value": "1"}],
            '{"output": [{"a": 3, "b": "1"}]}',
            _PYDANTIC_VERSION,
            id="str-param-given-int",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:annotated_basic_types_with_other_metadata",
            [{"name": "a-but-kebab", "value": "3"}, {"name": "b-but-kebab", "value": "1"}],
            '{"output": [{"a": 3, "b": "1"}]}',
            _PYDANTIC_VERSION,
            id="str-param-given-int",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:annotated_object",
            [{"name": "input-value", "value": '{"a": 3, "b": "bar"}'}],
            '{"output": [{"a": 3, "b": "bar"}]}',
            _PYDANTIC_VERSION,
            id="annotated-object",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:annotated_object_v1",
            [{"name": "input-value", "value": '{"a": 3, "b": "bar"}'}],
            '{"output": [{"a": 3, "b": "bar"}]}',
            1,
            id="annotated-object-v1",
        ),
        pytest.param(
            "tests.script_runner.parameter_inputs:annotated_parameter_no_name",
            [{"name": "annotated_input_value", "value": '{"a": 3, "b": "bar"}'}],
            '{"output": [{"a": 3, "b": "bar"}]}',
            _PYDANTIC_VERSION,
            id="annotated-param-no-name",
        ),
    ],
)
def test_runner_annotated_parameter_inputs(
    entrypoint: str,
    kwargs_list: List[Dict[str, str]],
    expected_output: Any,
    pydantic_mode: Literal[1, 2],
    monkeypatch: pytest.MonkeyPatch,
):
    # GIVEN
    monkeypatch.setenv("hera__pydantic_mode", str(pydantic_mode))
    # WHEN
    output = _runner(entrypoint, kwargs_list)
    # THEN
    assert serialize(output) == expected_output


@pytest.mark.parametrize(
    "function_name,kwargs_list,expected_files",
    [
        (
            "empty_str_param",
            [],
            [{"subpath": "tmp/hera-outputs/parameters/empty-str", "value": ""}],
        ),
        (
            "none_param",
            [],
            [{"subpath": "tmp/hera-outputs/parameters/null-str", "value": "null"}],
        ),
        (
            "script_param",
            [{"name": "a_number", "value": "3"}],
            [{"subpath": "tmp/hera-outputs/parameters/successor", "value": "4"}],
        ),
        (
            "script_artifact",
            [{"name": "a_number", "value": "3"}],
            [{"subpath": "tmp/hera-outputs/artifacts/successor", "value": "4"}],
        ),
        (
            "script_artifact_path",
            [{"name": "a_number", "value": "3"}],
            [{"subpath": "file.txt", "value": "4"}],
        ),
        (
            "script_artifact_and_param",
            [{"name": "a_number", "value": "3"}],
            [
                {"subpath": "tmp/hera-outputs/parameters/successor", "value": "4"},
                {"subpath": "tmp/hera-outputs/artifacts/successor", "value": "5"},
            ],
        ),
        (
            "script_two_params",
            [{"name": "a_number", "value": "3"}],
            [
                {"subpath": "tmp/hera-outputs/parameters/successor", "value": "4"},
                {"subpath": "tmp/hera-outputs/parameters/successor2", "value": "5"},
            ],
        ),
        (
            "script_two_artifacts",
            [{"name": "a_number", "value": "3"}],
            [
                {"subpath": "tmp/hera-outputs/artifacts/successor", "value": "4"},
                {"subpath": "tmp/hera-outputs/artifacts/successor2", "value": "5"},
            ],
        ),
        (
            "script_outputs_in_function_signature",
            [{"name": "a_number", "value": "3"}],
            [
                {"subpath": "tmp/hera-outputs/parameters/successor", "value": "4"},
                {"subpath": "tmp/hera-outputs/artifacts/successor2", "value": "5"},
            ],
        ),
        (
            "script_outputs_in_function_signature_with_path",
            [{"name": "a_number", "value": "3"}],
            [
                {"subpath": "successor", "value": "4"},
                {"subpath": "successor2", "value": "5"},
            ],
        ),
        (
            "script_param_artifact_in_function_signature_and_return_type",
            [{"name": "a_number", "value": "3"}],
            [
                {"subpath": "tmp/hera-outputs/parameters/successor", "value": "4"},
                {"subpath": "tmp/hera-outputs/artifacts/successor2", "value": "5"},
                {"subpath": "tmp/hera-outputs/parameters/successor3", "value": "6"},
                {"subpath": "tmp/hera-outputs/artifacts/successor4", "value": "7"},
            ],
        ),
        (
            "return_list_str",
            [],
            [{"subpath": "tmp/hera-outputs/parameters/list-of-str", "value": '["my", "list"]'}],
        ),
        (
            "return_dict",
            [],
            [{"subpath": "tmp/hera-outputs/parameters/dict-of-str", "value": '{"my-key": "my-value"}'}],
        ),
        (
            "return_base_model",
            [],
            [{"subpath": "tmp/hera-outputs/parameters/base-model-output", "value": '{"a": "foo", "b": "bar"}'}],
        ),
    ],
)
def test_script_annotations_outputs(
    function_name,
    kwargs_list: List[Dict[str, str]],
    expected_files: List[Dict[str, str]],
    tmp_path: Path,
    monkeypatch: pytest.MonkeyPatch,
):
    """Test that the output annotations are parsed correctly and save outputs to correct destinations."""
    # GIVEN
    outputs_directory = str(tmp_path / "tmp/hera-outputs")

    monkeypatch.setattr(test_module, "ARTIFACT_PATH", str(tmp_path))
    monkeypatch.setenv("hera__outputs_directory", outputs_directory)

    # Force a reload of the test module, as the runner performs "importlib.import_module", which
    # may fetch a cached version which will not have the correct ARTIFACT_PATH
    import tests.script_runner.annotated_outputs as output_tests_module

    importlib.reload(output_tests_module)

    # WHEN
    output = _runner(f"{output_tests_module.__name__}:{function_name}", kwargs_list)
    # THEN
    assert output is None, "Runner should not return values directly when using return Annotations"
    for file in expected_files:
        assert Path(tmp_path / file["subpath"]).is_file()
        assert Path(tmp_path / file["subpath"]).read_text() == file["value"]


@pytest.mark.parametrize(
    "function_name,expected_error,expected_files",
    [
        (
            "script_param_output_raises_index_error",
            IndexError,
            [{"subpath": "tmp/hera-outputs/parameters/param-output", "value": ""}],
        ),
        (
            "script_artifact_output_raises_index_error",
            IndexError,
            [{"subpath": "tmp/hera-outputs/artifacts/artifact-output", "value": ""}],
        ),
    ],
)
def test_script_raising_error_still_outputs(
    function_name,
    expected_error: type,
    expected_files: List[Dict[str, str]],
    tmp_path: Path,
    monkeypatch: pytest.MonkeyPatch,
):
    """Test that the output annotations are parsed correctly and save outputs to correct destinations."""
    # GIVEN

    outputs_directory = str(tmp_path / "tmp/hera-outputs")

    monkeypatch.setattr(test_module, "ARTIFACT_PATH", str(tmp_path))
    monkeypatch.setenv("hera__outputs_directory", outputs_directory)

    # Force a reload of the test module, as the runner performs "importlib.import_module", which
    # may fetch a cached version which will not have the correct ARTIFACT_PATH
    import tests.script_runner.annotated_outputs as output_tests_module

    importlib.reload(output_tests_module)

    # WHEN
    with pytest.raises(expected_error):
        _runner(f"{output_tests_module.__name__}:{function_name}", [])

    # THEN
    for file in expected_files:
        assert Path(tmp_path / file["subpath"]).is_file()
        assert Path(tmp_path / file["subpath"]).read_text() == file["value"]


@pytest.mark.parametrize(
    "function_name,kwargs_list,exception",
    [
        (
            "script_two_params_one_output",
            [{"name": "a_number", "value": "3"}],
            "The number of outputs does not match the annotation",
        ),
        (
            "script_param_incorrect_basic_type",
            [{"name": "a_number", "value": "3"}],
            "The type of output `successor`, `<class 'str'>` does not match the annotated type `<class 'int'>`",
        ),
        (
            "script_param_incorrect_generic_type",
            [{"name": "a_number", "value": "3"}],
            "The type of output `successor`, `<class 'int'>` does not match the annotated type `typing.Dict[str, str]`",
        ),
        (
            "script_param_no_name",
            [{"name": "a_number", "value": "3"}],
            "The name was not provided for one of the outputs.",
        ),
    ],
)
def test_script_annotations_outputs_exceptions(
    function_name,
    kwargs_list: List[Dict[str, str]],
    exception,
):
    """Test that the output annotations throw the expected exceptions."""
    # WHEN
    with pytest.raises(ValueError) as e:
        _ = _runner(f"tests.script_runner.annotated_outputs:{function_name}", kwargs_list)

    # THEN
    assert exception in str(e.value)


@pytest.mark.parametrize(
    "function,file_contents,expected_output",
    [
        (
            "no_loader",
            "First test!",
            "First test!",
        ),
        (
            "no_loader_as_string",
            "Another test",
            "Another test",
        ),
        (
            "json_object_loader",
            """{"a": "Hello ", "b": "there!"}""",
            "Hello there!",
        ),
        (
            "file_loader",
            "This file had a path",
            "This file had a path",
        ),
        (
            "file_loader",
            "/this/file/contains/a/path",  # A file containing a path as a string (we should not do any further processing)
            "/this/file/contains/a/path",
        ),
    ],
)
def test_script_annotations_artifact_inputs(
    function,
    file_contents,
    expected_output,
    tmp_path: Path,
    monkeypatch: pytest.MonkeyPatch,
):
    """Test that the input artifact annotations are parsed correctly and the loaders behave as intended."""
    # GIVEN
    filepath = tmp_path / "my_file.txt"
    filepath.write_text(file_contents)

    monkeypatch.setattr(test_module, "ARTIFACT_PATH", str(filepath))

    # Force a reload of the test module, as the runner performs "importlib.import_module", which
    # may fetch a cached version which will not have the correct ARTIFACT_PATH
    import tests.script_runner.artifact_loaders as module

    importlib.reload(module)

    kwargs_list = []

    # WHEN
    output = _runner(f"{module.__name__}:{function}", kwargs_list)

    # THEN
    assert serialize(output) == expected_output


@pytest.mark.parametrize(
    "entrypoint,artifact_name,file_contents,expected_output",
    [
        (
            "tests.script_runner.artifact_loaders:file_loader_default_path",
            "an_artifact",
            "Hello there!",
            "Hello there!",
        ),
    ],
)
def test_script_annotations_artifacts_no_name_or_path(
    entrypoint,
    artifact_name,
    file_contents,
    expected_output,
    tmp_path: Path,
    monkeypatch: pytest.MonkeyPatch,
):
    """Test that the input artifact annotations are parsed correctly and the loaders behave as intended."""
    # GIVEN
    filepath = tmp_path / f"{artifact_name}"
    filepath.write_text(file_contents)

    # Trailing slash required
    monkeypatch.setattr(artifact_module, "_DEFAULT_ARTIFACT_INPUT_DIRECTORY", f"{tmp_path}/")

    kwargs_list = []

    # WHEN
    output = _runner(entrypoint, kwargs_list)

    # THEN
    assert serialize(output) == expected_output


def test_script_annotations_artifacts_wrong_loader():
    """Test that the input artifact annotation with no loader throws an exception."""
    # GIVEN
    entrypoint = "tests.script_runner.artifact_with_invalid_loader:invalid_loader"
    kwargs_list = []

    # WHEN
    with pytest.raises(ValueError) as e:
        _runner(entrypoint, kwargs_list)

    # THEN
    assert "value is not a valid enumeration member" in str(e.value)


def test_script_annotations_unknown_type():
    # GIVEN
    expected_output = "a string"
    entrypoint = "tests.script_runner.unknown_annotation_types:unknown_annotations_ignored"
    kwargs_list = [{"name": "my_string", "value": expected_output}]

    # WHEN
    output = _runner(entrypoint, kwargs_list)

    # THEN
    assert serialize(output) == expected_output


@pytest.mark.parametrize(
    "kwargs_list",
    [
        [{"name": "a_string", "value": "123"}],
        [{"name": "a_number", "value": "123"}],
    ],
)
@patch("hera.workflows._runner.util._runner")
@patch("hera.workflows._runner.util._parse_args")
def test_run(mock_parse_args, mock_runner, kwargs_list: List[Dict[str, str]], tmp_path: Path):
    # GIVEN
    file_path = Path(tmp_path / "test_params")
    file_path.write_text(serialize(kwargs_list))

    args = MagicMock(entrypoint="my_entrypoint", args_path=file_path)
    mock_parse_args.return_value = args
    mock_runner.return_value = kwargs_list

    # WHEN
    _run()

    # THEN
    mock_parse_args.assert_called_once()
    mock_runner.assert_called_once_with("my_entrypoint", kwargs_list)


@patch("hera.workflows._runner.util._runner")
@patch("hera.workflows._runner.util._parse_args")
def test_run_empty_file(mock_parse_args, mock_runner, tmp_path: Path):
    # GIVEN
    file_path = Path(tmp_path / "test_params")
    file_path.write_text("")

    args = MagicMock(entrypoint="my_entrypoint", args_path=file_path)
    mock_parse_args.return_value = args
    mock_runner.return_value = None

    # WHEN
    _run()

    # THEN
    mock_parse_args.assert_called_once()
    mock_runner.assert_called_once_with("my_entrypoint", [])


@patch("hera.workflows._runner.util._runner")
@patch("hera.workflows._runner.util._parse_args")
def test_run_null_string(mock_parse_args, mock_runner, tmp_path: Path):
    # GIVEN
    file_path = Path(tmp_path / "test_params")
    file_path.write_text("null")

    args = MagicMock(entrypoint="my_entrypoint", args_path=file_path)
    mock_parse_args.return_value = args
    mock_runner.return_value = None

    # WHEN
    _run()

    # THEN
    mock_parse_args.assert_called_once()
    mock_runner.assert_called_once_with("my_entrypoint", [])


@pytest.mark.parametrize("pydantic_mode", [1, _PYDANTIC_VERSION])
@pytest.mark.parametrize(
    "entrypoint,kwargs_list,expected_output",
    [
        pytest.param(
            "tests.script_runner.pydantic_io_vX:pydantic_input_parameters",
            [
                {"name": "my_required_int", "value": "4"},
                {"name": "my_int", "value": "3"},
                {"name": "my_annotated_int", "value": "2"},
                {"name": "multiple-ints", "value": "[1, 2, 3]"},
            ],
            "42",
            id="test parameter only input variations",
        ),
        pytest.param(
            "tests.script_runner.pydantic_io_vX:pydantic_input_parameters_unrelated_annotation",
            [
                {"name": "my_required_int", "value": "4"},
                {"name": "my_int", "value": "3"},
                {"name": "my_annotated_int", "value": "2"},
                {"name": "multiple-ints", "value": "[1, 2, 3]"},
            ],
            "42",
            id="test with unrelated annotation",
        ),
        pytest.param(
            "tests.script_runner.pydantic_io_vX:pydantic_io_in_generic",
            [
                {"name": "my_inputs", "value": '[{"my_required_int": 2, "my_annotated_int": 3}]'},
            ],
            "1",
            id="test generic usage (reverts to regular pydantic class implementation)",
        ),
    ],
)
def test_runner_pydantic_inputs_params(
    entrypoint,
    kwargs_list: List[Dict[str, str]],
    expected_output,
    pydantic_mode,
    monkeypatch: pytest.MonkeyPatch,
):
    # GIVEN
    entrypoint = entrypoint.replace("pydantic_io_vX", f"pydantic_io_v{pydantic_mode}")
    monkeypatch.setenv("hera__pydantic_mode", str(pydantic_mode))
    monkeypatch.setenv("hera__script_pydantic_io", "")

    # WHEN
    output = _runner(entrypoint, kwargs_list)

    # THEN
    assert serialize(output) == expected_output


@pytest.mark.parametrize("pydantic_mode", [1, _PYDANTIC_VERSION])
@pytest.mark.parametrize(
    "entrypoint,expected_files",
    [
        pytest.param(
            "tests.script_runner.pydantic_io_vX:pydantic_output_parameters",
            [
                {"subpath": "tmp/hera-outputs/parameters/my_output_str", "value": "a string!"},
                {"subpath": "tmp/hera-outputs/parameters/second-output", "value": "my-val"},
                {"subpath": "tmp/hera-outputs/parameters/annotated_output", "value": "test"},
            ],
            id="pydantic output parameter variations",
        ),
    ],
)
def test_runner_pydantic_output_params(
    entrypoint,
    expected_files,
    pydantic_mode,
    monkeypatch: pytest.MonkeyPatch,
    tmp_path: Path,
):
    # GIVEN
    entrypoint = entrypoint.replace("pydantic_io_vX", f"pydantic_io_v{pydantic_mode}")
    monkeypatch.setenv("hera__pydantic_mode", str(pydantic_mode))
    monkeypatch.setenv("hera__script_pydantic_io", "")

    outputs_directory = str(tmp_path / "tmp/hera-outputs")
    monkeypatch.setenv("hera__outputs_directory", outputs_directory)

    # WHEN
    output = _runner(entrypoint, [])

    # THEN
    assert isinstance(output, (OutputV1, OutputV2))
    for file in expected_files:
        assert Path(tmp_path / file["subpath"]).is_file()
        assert Path(tmp_path / file["subpath"]).read_text() == file["value"]


@pytest.mark.parametrize("pydantic_mode", [1, _PYDANTIC_VERSION])
@pytest.mark.parametrize(
    "entrypoint,input_files,expected_output",
    [
        pytest.param(
            "tests.script_runner.pydantic_io_vX:pydantic_input_artifact",
            {
                "json_artifact": '{"a": 3, "b": "bar"}',
                "path": "dummy",
                "str-path": "dummy",
                "file": "dummy",
            },
            '{"a": 3, "b": "bar"}',
            id="pydantic io artifact input variations",
        ),
    ],
)
def test_runner_pydantic_input_artifacts(
    entrypoint,
    input_files: Dict,
    expected_output,
    pydantic_mode,
    monkeypatch: pytest.MonkeyPatch,
    tmp_path: Path,
):
    # GIVEN
    entrypoint = entrypoint.replace("pydantic_io_vX", f"pydantic_io_v{pydantic_mode}")

    for file, contents in input_files.items():
        filepath = tmp_path / file
        filepath.write_text(contents)

    monkeypatch.setattr(artifact_module, "_DEFAULT_ARTIFACT_INPUT_DIRECTORY", f"{tmp_path}/")
    monkeypatch.setattr(test_module, "ARTIFACT_PATH", str(tmp_path))

    module = importlib.import_module(entrypoint.split(":")[0])
    importlib.reload(module)

    monkeypatch.setenv("hera__pydantic_mode", str(pydantic_mode))
    monkeypatch.setenv("hera__script_pydantic_io", "")

    # WHEN
    output = _runner(entrypoint, [])

    # THEN
    assert serialize(output) == expected_output


@pytest.mark.parametrize("pydantic_mode", [1, _PYDANTIC_VERSION])
@pytest.mark.parametrize(
    "entrypoint,input_files,expected_files",
    [
        pytest.param(
            "tests.script_runner.pydantic_io_vX:pydantic_output_artifact",
            {
                "json": '{"a": 3, "b": "bar"}',
                "path": "dummy",
                "str-path": "dummy",
                "file": "dummy",
            },
            [
                {"subpath": "tmp/hera-outputs/artifacts/artifact-str-output", "value": "test"},
                {"subpath": "tmp/hera-outputs/artifacts/another_artifact", "value": "test2"},
            ],
            id="pydantic io artifact output variations",
        ),
    ],
)
def test_runner_pydantic_output_artifacts(
    entrypoint,
    input_files: Dict,
    expected_files,
    pydantic_mode,
    monkeypatch: pytest.MonkeyPatch,
    tmp_path: Path,
):
    # GIVEN
    entrypoint = entrypoint.replace("pydantic_io_vX", f"pydantic_io_v{pydantic_mode}")

    for file, contents in input_files.items():
        filepath = tmp_path / file
        filepath.write_text(contents)

    monkeypatch.setattr(artifact_module, "_DEFAULT_ARTIFACT_INPUT_DIRECTORY", f"{tmp_path}/")
    monkeypatch.setattr(test_module, "ARTIFACT_PATH", str(tmp_path))
    monkeypatch.setenv("hera__pydantic_mode", str(pydantic_mode))
    monkeypatch.setenv("hera__script_pydantic_io", "")

    outputs_directory = str(tmp_path / "tmp/hera-outputs")
    monkeypatch.setenv("hera__outputs_directory", outputs_directory)

    # WHEN
    output = _runner(entrypoint, [])

    # THEN
    assert isinstance(output, (OutputV1, OutputV2))
    for file in expected_files:
        assert Path(tmp_path / file["subpath"]).is_file()
        assert Path(tmp_path / file["subpath"]).read_text() == file["value"]


@pytest.mark.parametrize("pydantic_mode", [1, _PYDANTIC_VERSION])
@pytest.mark.parametrize(
    "entrypoint,expected_files",
    [
        pytest.param(
            "tests.script_runner.pydantic_io_vX:pydantic_output_using_exit_code",
            [
                {"subpath": "tmp/hera-outputs/parameters/my_output_str", "value": "a string!"},
                {"subpath": "tmp/hera-outputs/parameters/second-output", "value": "my-val"},
            ],
            id="pydantic output with exit code",
        ),
    ],
)
def test_runner_pydantic_output_with_exit_code(
    entrypoint,
    expected_files,
    pydantic_mode,
    monkeypatch: pytest.MonkeyPatch,
    tmp_path: Path,
):
    # GIVEN
    entrypoint = entrypoint.replace("pydantic_io_vX", f"pydantic_io_v{pydantic_mode}")

    monkeypatch.setenv("hera__pydantic_mode", str(pydantic_mode))
    monkeypatch.setenv("hera__script_pydantic_io", "")

    outputs_directory = str(tmp_path / "tmp/hera-outputs")
    monkeypatch.setenv("hera__outputs_directory", outputs_directory)

    # WHEN / THEN
    output = _runner(entrypoint, [])

    assert output.exit_code == 42

    for file in expected_files:
        assert Path(tmp_path / file["subpath"]).is_file()
        assert Path(tmp_path / file["subpath"]).read_text() == file["value"]


@pytest.mark.parametrize("pydantic_mode", [1, _PYDANTIC_VERSION])
@pytest.mark.parametrize(
    "entrypoint,expected_files",
    [
        pytest.param(
            "tests.script_runner.pydantic_io_vX:pydantic_output_using_exit_code",
            [
                {"subpath": "tmp/hera-outputs/parameters/my_output_str", "value": "a string!"},
                {"subpath": "tmp/hera-outputs/parameters/second-output", "value": "my-val"},
            ],
            id="use _run to check actual system exit",
        ),
    ],
)
@patch("hera.workflows._runner.util._parse_args")
def test_run_pydantic_output_with_exit_code(
    mock_parse_args,
    entrypoint,
    expected_files,
    pydantic_mode,
    monkeypatch: pytest.MonkeyPatch,
    tmp_path: Path,
):
    # GIVEN
    entrypoint = entrypoint.replace("pydantic_io_vX", f"pydantic_io_v{pydantic_mode}")
    file_path = Path(tmp_path / "test_params")
    file_path.write_text("")
    args = MagicMock(entrypoint=entrypoint, args_path=file_path)
    mock_parse_args.return_value = args

    monkeypatch.setenv("hera__pydantic_mode", str(pydantic_mode))
    monkeypatch.setenv("hera__script_pydantic_io", "")

    module = importlib.import_module(entrypoint.split(":")[0])
    importlib.reload(module)

    outputs_directory = str(tmp_path / "tmp/hera-outputs")
    monkeypatch.setenv("hera__outputs_directory", outputs_directory)

    # WHEN / THEN
    with pytest.raises(SystemExit) as e:
        _run()

    assert e.value.code == 42
    mock_parse_args.assert_called_once()

    for file in expected_files:
        assert Path(tmp_path / file["subpath"]).is_file()
        assert Path(tmp_path / file["subpath"]).read_text() == file["value"]


@pytest.mark.parametrize("pydantic_mode", [1, _PYDANTIC_VERSION])
@pytest.mark.parametrize(
    "entrypoint,expected_files,expected_result",
    [
        pytest.param(
            "tests.script_runner.pydantic_io_v1:pydantic_output_using_result",
            [
                {"subpath": "tmp/hera-outputs/parameters/my_output_str", "value": "a string!"},
                {"subpath": "tmp/hera-outputs/parameters/second-output", "value": "my-val"},
            ],
            "42",
            id="pydantic output with result output",
        ),
    ],
)
def test_runner_pydantic_output_with_result(
    entrypoint,
    expected_files,
    expected_result,
    pydantic_mode,
    monkeypatch: pytest.MonkeyPatch,
    tmp_path: Path,
):
    # GIVEN
    monkeypatch.setenv("hera__pydantic_mode", str(pydantic_mode))
    monkeypatch.setenv("hera__script_pydantic_io", "")

    outputs_directory = str(tmp_path / "tmp/hera-outputs")
    monkeypatch.setenv("hera__outputs_directory", outputs_directory)

    # WHEN / THEN
    output = _runner(entrypoint, [])

    assert serialize(output.result) == serialize(expected_result)

    for file in expected_files:
        assert Path(tmp_path / file["subpath"]).is_file()
        assert Path(tmp_path / file["subpath"]).read_text() == file["value"]


@pytest.mark.parametrize("pydantic_mode", [1, _PYDANTIC_VERSION])
@pytest.mark.parametrize(
    "entrypoint,error_type,error_match",
    [
        pytest.param(
            "tests.script_runner.pydantic_io_v2_invalid:pydantic_input_invalid",
            ValueError,
            "Annotation metadata cannot contain more than one Artifact/Parameter.",
            id="invalid input annotation",
        ),
        pytest.param(
            "tests.script_runner.pydantic_io_v2_invalid:pydantic_output_invalid",
            ValueError,
            "Annotation metadata cannot contain more than one Artifact/Parameter.",
            id="invalid output annotation",
        ),
    ],
)
def test_runner_pydantic_with_invalid_annotations(
    entrypoint,
    error_type,
    error_match,
    pydantic_mode,
    monkeypatch: pytest.MonkeyPatch,
    tmp_path: Path,
):
    # GIVEN
    monkeypatch.setenv("hera__pydantic_mode", str(pydantic_mode))
    monkeypatch.setenv("hera__script_pydantic_io", "")

    outputs_directory = str(tmp_path / "tmp/hera-outputs")
    monkeypatch.setenv("hera__outputs_directory", outputs_directory)

    # WHEN / THEN
    with pytest.raises(error_type, match=error_match):
        _runner(entrypoint, [])


@pytest.mark.parametrize(
    "entrypoint",
    [
        "tests.script_runner.parameter_with_complex_types:optional_str_parameter",
        "tests.script_runner.parameter_with_complex_types:optional_str_parameter_using_union",
        "tests.script_runner.parameter_with_complex_types:optional_str_parameter_using_or",
        "tests.script_runner.parameter_with_complex_types:optional_str_parameter_using_multiple_or",
    ],
)
@pytest.mark.parametrize(
    "kwargs_list,expected_output",
    [
        pytest.param(
            [{"name": "my_string", "value": "a string"}],
            "a string",
        ),
        pytest.param(
            [{"name": "my_string", "value": None}],
            "null",
        ),
    ],
)
def test_script_optional_parameter(
    entrypoint,
    kwargs_list,
    expected_output,
):
    # WHEN
    output = _runner(entrypoint, kwargs_list)

    # THEN
    assert serialize(output) == expected_output


@pytest.mark.parametrize(
    "entrypoint,kwargs_list,expected_output",
    [
        [
            "tests.script_runner.parameter_with_complex_types:optional_int_parameter",
            [{"name": "my_int", "value": 123}],
            "123",
        ],
        [
            "tests.script_runner.parameter_with_complex_types:optional_int_parameter",
            [{"name": "my_int", "value": None}],
            "null",
        ],
        [
            "tests.script_runner.parameter_with_complex_types:union_parameter",
            [{"name": "my_param", "value": "a string"}],
            "a string",
        ],
        [
            "tests.script_runner.parameter_with_complex_types:union_parameter",
            [{"name": "my_param", "value": 123}],
            "123",
        ],
        [
            "tests.script_runner.parameter_with_complex_types:fn_with_output_tuple",
            [{"name": "my_string", "value": "123"}],
            '["123", "123"]',
        ],
    ],
)
def test_script_with_complex_types(
    entrypoint,
    kwargs_list,
    expected_output,
):
    # WHEN
    output = _runner(entrypoint, kwargs_list)

    # THEN
    assert serialize(output) == expected_output


def test_script_partially_annotated_tuple_should_raise_an_error():
    # GIVEN
    entrypoint = "tests.script_runner.parameter_with_complex_types:fn_with_output_tuple_partially_annotated"
    kwargs_list = [{"name": "my_string", "value": "123"}]

    # WHEN/THEN
    with pytest.raises(
        ValueError,
        match=(
            "Function 'fn_with_output_tuple_partially_annotated' output has partially annotated tuple return type. "
            "Tuple elements must be all Annotated as Parameter/Artifact, or contain no Parameter/Artifact annotations for a raw tuple return type."
        ),
    ):
        _runner(entrypoint, kwargs_list)


@pytest.mark.parametrize(
    "sys_path_relatives,file_rel_path,expected",
    [
        pytest.param(["project"], "project/wf_a.py", "wf_a", id="Exact direct match in sys.path"),
        pytest.param(["project"], "project/workflows/wf_a.py", "workflows.wf_a", id="Submodule match in sys.path"),
        pytest.param(
            ["project"],
            "project/workflows/subpackage/another/wf_a.py",
            "workflows.subpackage.another.wf_a",
            id="Deep submodule match in sys.path",
        ),
        pytest.param(
            ["project", "project/src"],
            "project/src/workflows/wf_b.py",
            "workflows.wf_b",
            id="More specific match (src dir) in sys.path",
        ),
        pytest.param([], "project/workflows/wf_c.py", "wf_c", id="No match, fallback to stem"),
        pytest.param(
            [""],
            "project/workflows/wf_d.py",
            "project.workflows.wf_d",
            id="sys.path contains root, nested module path is full path",
        ),
    ],
)
def test_create_module_string(
    tmp_path,
    monkeypatch,
    sys_path_relatives: list[str],
    file_rel_path: str,
    expected: str,
):
    # GIVEN
    # Create file structure
    file_path = tmp_path / file_rel_path
    file_path.parent.mkdir(parents=True, exist_ok=True)

    # Set up sys.path using tmp_path as root
    mock_sys_path = [str(tmp_path / rel) for rel in sys_path_relatives]
    monkeypatch.setattr(sys, "path", mock_sys_path)

    # THEN
    assert create_module_string(file_path) == expected


def test_symlinked_sys_path(tmp_path, monkeypatch):
    real_dir = tmp_path / "real_project"
    real_dir.mkdir()
    file_path = real_dir / "wf.py"

    # Create a symlink pointing to real_project
    symlink_path = tmp_path / "link_project"
    symlink_path.symlink_to(real_dir)

    monkeypatch.setattr(sys, "path", [str(symlink_path)])

    assert create_module_string(file_path) == "wf"
