# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import asyncio
import json
import re
import unittest.mock
from contextlib import nullcontext as does_not_raise
from datetime import UTC, datetime, timedelta, timezone
from typing import Optional

import pytest
from pandas import Timedelta, Timestamp

import mlrun.errors
import mlrun.utils.regex
import mlrun.utils.version
import mlrun_pipelines.client
import mlrun_pipelines.models
from mlrun.common.schemas.hub import HubSourceType
from mlrun.config import config
from mlrun.datastore.store_resources import parse_store_uri
from mlrun.utils import logger
from mlrun.utils.helpers import (
    StorePrefix,
    enrich_image_url,
    ensure_batch_job_suffix,
    extend_hub_uri_if_needed,
    get_data_from_path,
    get_parsed_docker_registry,
    get_pretty_types_names,
    get_regex_list_as_string,
    merge_requirements,
    parse_artifact_uri,
    remove_tag_from_artifact_uri,
    resolve_image_tag_suffix,
    set_data_by_path,
    split_path,
    str_to_timestamp,
    template_artifact_path,
    update_in,
    validate_artifact_key_name,
    validate_function_name,
    validate_tag_name,
    validate_v3io_stream_consumer_group,
    verify_field_regex,
    verify_list_items_type,
)

STORE_PREFIX = "store://{kind}/dummy-project/dummy-db-key"
ARTIFACT_STORE_PREFIX = STORE_PREFIX.format(kind=StorePrefix.Artifact)
DATASET_STORE_PREFIX = STORE_PREFIX.format(kind=StorePrefix.Dataset)
MODEL_STORE_PREFIX = STORE_PREFIX.format(kind=StorePrefix.Model)


def test_retry_until_successful_fatal_failure():
    original_exception = Exception("original")

    def _raise_fatal_failure():
        raise mlrun.errors.MLRunFatalFailureError(original_exception=original_exception)

    with pytest.raises(Exception, match=str(original_exception)):
        mlrun.utils.helpers.retry_until_successful(
            0, 1, logger, True, _raise_fatal_failure
        )


@pytest.mark.parametrize(
    "d,expected",
    [
        (
            "2024-11-11 07:44:56.255000+0000",
            datetime(2024, 11, 11, 7, 44, 56, 255000, tzinfo=UTC),
        ),
        (
            "2024-11-11 07:44:56+0000",
            datetime(2024, 11, 11, 7, 44, 56, tzinfo=UTC),
        ),
    ],
)
def test_enrich_datetime_with_tz_info(d, expected: datetime):
    assert expected == mlrun.utils.helpers.enrich_datetime_with_tz_info(d)


def test_retry_until_successful_sync():
    counter = 0

    def increase_counter():
        nonlocal counter
        counter += 1
        if counter < 3:
            raise Exception("error")

    mlrun.utils.helpers.retry_until_successful(0, 3, logger, True, increase_counter)


@pytest.mark.asyncio
async def test_retry_until_successful_async():
    counter = 0

    async def increase_counter():
        await asyncio.sleep(0.1)
        nonlocal counter
        counter += 1
        if counter < 3:
            raise Exception("error")

    await mlrun.utils.helpers.retry_until_successful_async(
        0, 3, logger, True, increase_counter
    )


@pytest.mark.parametrize(
    "value, expected",
    [
        ("asd", does_not_raise()),
        ("Asd", does_not_raise()),
        ("AsA", does_not_raise()),
        ("As-123_2.8A", does_not_raise()),
        ("1As-123_2.8A5", does_not_raise()),
        (
            "azsxdcfvg-azsxdcfvg-azsxdcfvg-azsxdcfvg-azsxdcfvg-azsxdcfvg-azs",
            does_not_raise(),
        ),
        (
            # Invalid because the first letter is -
            "-As-123_2.8A",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        (
            # Invalid because the last letter is .
            "As-123_2.8A.",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        (
            # Invalid because $ is not allowed
            "As-123_2.8A$a",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        (
            # Invalid because it's more than 63 characters
            "azsxdcfvg-azsxdcfvg-azsxdcfvg-azsxdcfvg-azsxdcfvg-azsxdcfvg-azsx",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
    ],
)
def test_run_name_regex(value, expected):
    with expected:
        verify_field_regex("test_field", value, mlrun.utils.regex.run_name)


@pytest.mark.parametrize(
    "value, expected",
    [
        ("{{pipelineparam:op=;name=mem}}", does_not_raise()),
        ("{{pipelineparam:op=2;name=mem}}", does_not_raise()),
        ("{{pipelineparam:op=10Mb;name=mem}}", does_not_raise()),
    ],
)
def test_pipeline_param(value, expected):
    with expected:
        verify_field_regex("test_field", value, mlrun.utils.regex.pipeline_param)


@pytest.mark.parametrize(
    "value,expected",
    [
        ("asd", does_not_raise()),
        ("asdlnasd-123123-asd", does_not_raise()),
        # DNS-1035
        ("t012312-asdasd", does_not_raise()),
        (
            # Starts with alphanumeric number
            "012312-asdasd",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        ("As-123_2.8A", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        ("1As-123_2.8A5", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        (
            # Invalid because the first letter is -
            "-As-123_2.8A",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        (
            # Invalid because the last letter is .
            "As-123_2.8A.",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        (
            # Invalid because $ is not allowed
            "As-123_2.8A$a",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        # sprakjob length 29
        ("asdnoinasoidas-asdaskdlnaskdl", does_not_raise()),
        (
            "asdnoinasoidas-asdaskdlnaskdl2",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
    ],
)
def test_spark_job_name_regex(value, expected):
    with expected:
        verify_field_regex("test_field", value, mlrun.utils.regex.sparkjob_name)


@pytest.mark.parametrize(
    "case",
    [
        {
            "input_uri": "http://no-hub-prefix",
            "expected_output": "http://no-hub-prefix",
        },
        {
            "input_uri": "hub://function_name",
            "expected_output": "function_name/latest/src/function.yaml",
        },
        {
            "input_uri": "hub://function_name:1.2.3",
            "expected_output": "function_name/1.2.3/src/function.yaml",
        },
        {
            "input_uri": "hub://default/function-name",
            "expected_output": "function_name/latest/src/function.yaml",
        },
        {
            "input_uri": "hub://default/function-name:3.4.5",
            "expected_output": "function_name/3.4.5/src/function.yaml",
        },
    ],
)
def test_extend_hub_uri(rundb_mock, case):
    hub_url = mlrun.mlconf.get_default_hub_source_url_prefix(HubSourceType.functions)
    input_uri = case["input_uri"]
    expected_output = case["expected_output"]
    output, is_hub_url = extend_hub_uri_if_needed(input_uri)
    if is_hub_url:
        expected_output = hub_url + expected_output
    assert expected_output == output


@pytest.mark.parametrize(
    "regex_list,value,expected_str,expected",
    [
        (
            [r"^.{0,9}$", r"^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"],
            "blabla123",
            "(?=^.{0,9}$)(?=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$).*$",
            True,
        ),
        (
            [r"^.{0,6}$", r"^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"],
            "blabla123",
            "(?=^.{0,6}$)(?=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$).*$",
            False,
        ),
        (
            [r"^.{0,6}$", r"^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"],
            "bla^%",
            "(?=^.{0,6}$)(?=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$).*$",
            False,
        ),
        (
            [r"^.{0,6}$", r"^a...e$", r"ab*"],
            "abcde",
            "(?=^.{0,6}$)(?=^a...e$)(?=ab*).*$",
            True,
        ),
        (
            [r"^.{0,6}$", r"^a...e$", r"ab*"],
            "abababe",
            "(?=^.{0,6}$)(?=^a...e$)(?=ab*).*$",
            False,
        ),
        (
            [r"^.{0,6}$", r"^a...e$", r"ab*"],
            "bcea",
            "(?=^.{0,6}$)(?=^a...e$)(?=ab*).*$",
            False,
        ),
    ],
)
def test_get_regex_list_as_string(regex_list, value, expected_str, expected):
    regex_str = get_regex_list_as_string(regex_list)
    assert expected_str == regex_str
    match = re.match(regex_str, value)
    assert match if expected else match is None


@pytest.mark.parametrize(
    "tag_name,expected",
    [
        (
            "tag_with_char!@#",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        (
            "tag^name",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        (
            "(tagname)",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        (
            "tagname%",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        ("tagname2.0", does_not_raise()),
        ("tag-name", does_not_raise()),
        ("tag-NAME", does_not_raise()),
        ("tag_name", does_not_raise()),
    ],
)
def test_validate_tag_name(tag_name, expected):
    with expected:
        validate_tag_name(
            tag_name,
            field_name="artifact.metadata,tag",
        )


@pytest.mark.parametrize(
    "artifact_name,expected",
    [
        # Invalid names
        (
            "artifact/name",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        (
            "/artifact-name",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        (
            "artifact-name/",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        (
            "artifact-name\\test",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        ("", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        (
            "artifact-name#",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        ("artifact@name", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        ("artifact#name", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        ("artifact-name#", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        ("artifact:name", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        ("artifact_name$", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        # Valid names
        ("artifact-name2.0", does_not_raise()),
        ("artifact-name3", does_not_raise()),
        ("artifact_name", does_not_raise()),
        ("artifact.name", does_not_raise()),
        ("artifactNAME", does_not_raise()),
    ],
)
def test_validate_artifact_name(artifact_name, expected):
    with expected:
        validate_artifact_key_name(
            artifact_name,
            field_name="artifact.key",
        )
    with expected:
        validate_artifact_key_name(
            artifact_name,
            field_name="artifact.db_key",
        )


@pytest.mark.parametrize(
    "uri,project,expected_project,expected_key,expected_iteration,expected_tag,expected_tree,expected_uid",
    [
        # Backward compatibility: URI without uid
        ("artifact_key", "default", "default", "artifact_key", 0, None, None, None),
        (
            "project_name/artifact_key",
            "",
            "project_name",
            "artifact_key",
            0,
            None,
            None,
            None,
        ),
        (
            "project_name/artifact_key#1",
            "",
            "project_name",
            "artifact_key",
            1,
            None,
            None,
            None,
        ),
        (
            "project_name/artifact_key:latest",
            "",
            "project_name",
            "artifact_key",
            0,
            "latest",
            None,
            None,
        ),
        (
            "project_name/artifact_key@a1b2c3",
            "",
            "project_name",
            "artifact_key",
            0,
            None,
            "a1b2c3",
            None,
        ),
        (
            "artifact_key#2:tag@us3jfdrkj",
            "default",
            "default",
            "artifact_key",
            2,
            "tag",
            "us3jfdrkj",
            None,
        ),
        # New functionality: URI with uid
        (
            "artifact_key^uid123",
            "default",
            "default",
            "artifact_key",
            0,
            None,
            None,
            "uid123",
        ),
        (
            "project_name/artifact_key^uid123",
            "",
            "project_name",
            "artifact_key",
            0,
            None,
            None,
            "uid123",
        ),
        (
            "project_name/artifact_key#1:latest@branch^uid123",
            "",
            "project_name",
            "artifact_key",
            1,
            "latest",
            "branch",
            "uid123",
        ),
        (
            "artifact_key@branch^uid123",
            "default",
            "default",
            "artifact_key",
            0,
            None,
            "branch",
            "uid123",
        ),
    ],
)
def test_parse_artifact_uri(
    uri,
    project,
    expected_project,
    expected_key,
    expected_iteration,
    expected_tag,
    expected_tree,
    expected_uid,
):
    result = parse_artifact_uri(uri, project)
    assert result == (
        expected_project,
        expected_key,
        expected_iteration,
        expected_tag,
        expected_tree,
        expected_uid,
    ), f"Failed to parse artifact URI: {uri}"


@pytest.mark.parametrize(
    "value,expected",
    [
        ("a", does_not_raise()),
        ("a_b", does_not_raise()),
        ("_a_b", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
    ],
)
def test_validate_v3io_consumer_group(value, expected):
    with expected:
        validate_v3io_stream_consumer_group(
            value,
        )


@pytest.mark.parametrize(
    "case",
    [
        {
            "image": "mlrun/mlrun",
            "expected_output": "ghcr.io/mlrun/mlrun:0.5.2-unstable-adsf76s",
        },
        {
            "image": "mlrun/mlrun:some_tag",
            "expected_output": "ghcr.io/mlrun/mlrun:some_tag",
        },
        {
            "image": "quay.io/mlrun/mlrun",
            "expected_output": "quay.io/mlrun/mlrun:0.5.2-unstable-adsf76s",
        },
        {
            "image": "quay.io/mlrun/mlrun:some_tag",
            "expected_output": "quay.io/mlrun/mlrun:some_tag",
        },
        {
            "image": "mlrun/ml-models",
            "expected_output": "ghcr.io/mlrun/ml-models:0.5.2-unstable-adsf76s",
        },
        {
            "image": "mlrun/ml-models:some_tag",
            "expected_output": "ghcr.io/mlrun/ml-models:some_tag",
        },
        {
            "image": "quay.io/mlrun/ml-models",
            "expected_output": "quay.io/mlrun/ml-models:0.5.2-unstable-adsf76s",
        },
        {
            "image": "quay.io/mlrun/ml-models:some_tag",
            "expected_output": "quay.io/mlrun/ml-models:some_tag",
        },
        {"image": "fake_mlrun/ml-models", "expected_output": "fake_mlrun/ml-models"},
        {"image": "some_repo/some_image", "expected_output": "some_repo/some_image"},
        {
            "image": "python:3.9",
            "expected_output": "dummy-repo/python:3.9",
        },
        {
            "image": "some-repo/some-image",
            "expected_output": "dummy-repo/some-repo/some-image",
            "images_to_enrich_registry": "some-repo/some-image",
        },
        {
            "image": "some-repo/some-image:some-tag",
            "expected_output": "dummy-repo/some-repo/some-image:some-tag",
            "images_to_enrich_registry": "some-repo/some-image",
        },
        {
            "image": "mlrun/mlrun",
            "expected_output": "mlrun/mlrun:0.5.2-unstable-adsf76s",
            "images_to_enrich_registry": "some-repo/some-image",
        },
        {
            "image": "mlrun/mlrun",
            "expected_output": "ghcr.io/mlrun/mlrun:0.5.2-unstable-adsf76s",
            "images_to_enrich_registry": "some-repo/some-image,mlrun/mlrun",
        },
        {
            "image": "mlrun/mlrun:some-tag",
            "expected_output": "ghcr.io/mlrun/mlrun:some-tag",
            "images_to_enrich_registry": "some-repo/some-image,mlrun/mlrun",
        },
        {
            "image": "mlrun/mlrun",
            "expected_output": "mlrun/mlrun:0.5.2-unstable-adsf76s",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun:bla",
            "expected_output": "ghcr.io/mlrun/mlrun:bla",
            "images_to_enrich_registry": "mlrun/mlrun",
            "images_registry": "ghcr.io",
        },
        {
            "image": "mlrun/mlrun:bla",
            "expected_output": "mlrun/mlrun:bla",
            "images_to_enrich_registry": "mlrun/mlrun",
            "images_registry": "",
        },
        {
            "image": "mlrun/mlrun:0.5.3",
            "expected_output": "mlrun/mlrun:0.5.3",
            "images_to_enrich_registry": "mlrun/mlrun:0.5.2",
        },
        {
            "image": "mlrun/mlrun",
            "expected_output": "ghcr.io/mlrun/mlrun:unstable",
            "images_tag": None,
            "version": "0.0.0+unstable",
        },
        {
            "image": "mlrun/mlrun",
            "expected_output": "ghcr.io/mlrun/mlrun:0.1.2-some-special-tag",
            "images_tag": None,
            "version": "0.1.2+some-special-tag",
        },
        {
            "image": "mlrun/mlrun",
            "client_version": "0.9.3-client-version",
            "images_tag": None,
            "expected_output": "mlrun/mlrun:0.9.3-client-version",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun",
            "client_version": "0.9.3-client-version",
            "images_tag": "0.10.0-override-version",
            "expected_output": "mlrun/mlrun:0.10.0-override-version",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun",
            "client_version": "0.9.3-client-version",
            "images_tag": "0.10.0-override-version",
            "version": "0.10.5-server-version",
            "expected_output": "mlrun/mlrun:0.10.0-override-version",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun",
            "client_version": None,
            "images_tag": None,
            "version": "0.10.5-server-version",
            "expected_output": "mlrun/mlrun:0.10.5-server-version",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun",
            "client_version": "0.9.3-client-version",
            "images_tag": None,
            "version": "0.10.5-server-version",
            "expected_output": "mlrun/mlrun:0.9.3-client-version",
            "images_to_enrich_registry": "",
        },
        {
            "image": "some/image",
            "client_version": "0.9.3-client-version",
            "images_tag": None,
            "version": "0.10.5-server-version",
            "expected_output": "some/image",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun",
            "client_version": "1.3.0",
            "client_python_version": "3.9.0",
            "images_tag": None,
            "version": None,
            "expected_output": "mlrun/mlrun:1.3.0",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun",
            "client_version": "1.9.0",
            "client_python_version": "3.9.13",
            "images_tag": None,
            "version": None,
            "expected_output": "mlrun/mlrun:1.9.0-py39",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun",
            "client_version": "1.11.0",
            "client_python_version": "3.7.13",
            "images_tag": None,
            "version": None,
            "expected_output": "mlrun/mlrun:1.11.0",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun",
            "client_version": "1.9.0",
            "client_python_version": None,
            "images_tag": None,
            "version": None,
            "expected_output": "mlrun/mlrun:1.9.0",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun",
            "client_version": "1.9.0",
            "client_python_version": "3.11.13",
            "images_tag": None,
            "version": None,
            "expected_output": "mlrun/mlrun:1.9.0",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun:1.2.0",
            "client_version": "1.3.0",
            "client_python_version": None,
            "images_tag": None,
            "version": None,
            "expected_output": "mlrun/mlrun:1.2.0",
            "images_to_enrich_registry": "",
        },
        # image_url is "python", client_python_version is "3.9".
        {
            "image": "python",
            "client_python_version": "3.9",
            "expected_output": "dummy-repo/python:3.9",
        },
        # image_url is " python " (with spaces), client_python_version is "3.9".
        {
            "image": " python ",
            "client_python_version": "3.9.18",
            "expected_output": "dummy-repo/python:3.9",
        },
        {
            "image": " python ",
            "client_python_version": "3.9",
            "expected_output": "dummy-repo/python:3.9",
        },
        # image_url is "python:3.8" (tag already provided), and not in "images_to_enrich_registry".
        {
            "image": "python:3.8",
            "client_python_version": "3.9",
            "expected_output": "python:3.8",
        },
        # image_url is "python", client_python_version is None.
        {
            "image": "python",
            "client_python_version": None,
            "expected_output": "python",
        },
        # image_url is "python", client_python_version is "" (empty string).
        {
            "image": "python",
            "client_python_version": "",
            "expected_output": "python",
            "images_tag": None,
            "version": None,
            "client_version": None,
        },
        {
            "image": "myimage",
            "client_python_version": "3.9",
            "expected_output": "myimage",
        },
        {
            "image": "another/python",
            "client_python_version": "3.9",
            "expected_output": "another/python",
        },
        {
            "image": "python-something",
            "client_python_version": "3.9",
            "expected_output": "python-something",
        },
        # Test with an mlrun image like "mlrun/mlrun", client_python_version="3.9", client_version="1.6.0".
        # resolve_image_tag_suffix for 1.6.0 and py3.9 returns ""
        {
            "image": "mlrun/mlrun",
            "client_python_version": "3.9",
            "client_version": "1.6.0",
            "version": "1.6.0",  # Mock server version
            "images_tag": None,
            "images_registry": "",
            "expected_output": "mlrun/mlrun:1.6.0",
        },
        {
            "image": "mlrun/mlrun:customtag",
            "client_python_version": "3.9",
            "images_registry": "",
            "expected_output": "mlrun/mlrun:customtag",
        },
        # version >= 1.10.0 — ml-base image is deprecated, image should be switched to mlrun/mlrun
        {
            "image": "mlrun/ml-base",
            "client_version": "1.10.0",
            "images_tag": None,
            "images_registry": "",
            "expected_output": "mlrun/mlrun:1.10.0",
        },
        {
            "image": "mlrun/ml-base",
            "client_version": "1.10.0-rc8",
            "images_tag": None,
            "images_registry": "",
            "expected_output": "mlrun/mlrun:1.10.0-rc8",
        },
        {
            "image": "mlrun/ml-base",
            "client_version": "1.11.0",
            "images_tag": None,
            "images_registry": "",
            "expected_output": "mlrun/mlrun:1.11.0",
        },
        {
            "image": "mlrun/ml-base",
            "client_version": "1.10.0",
            "client_python_version": "3.9.13",
            "images_tag": None,
            "expected_output": "mlrun/mlrun:1.10.0-py39",
            "images_to_enrich_registry": "",
        },
        # version < 1.10.0 — ml-base image is still valid, image should remain unchanged
        {
            "image": "mlrun/ml-base",
            "client_version": "1.7.0",
            "images_tag": None,
            "images_registry": "",
            "expected_output": "mlrun/ml-base:1.7.0",
        },
        {
            "image": "mlrun/ml-base",
            "client_version": "1.9.0",
            "images_tag": None,
            "images_registry": "",
            "expected_output": "mlrun/ml-base:1.9.0",
        },
        {
            # explicit older tag in image should keep ml-base without replacement despite newer client version
            "image": "mlrun/ml-base:1.7.2",
            "client_version": "1.10.0",
            "images_tag": None,
            "images_registry": "",
            "expected_output": "mlrun/ml-base:1.7.2",
        },
        {
            # image tag > 1.10.0, the image should be switched to mlrun/mlrun
            "image": "mlrun/ml-base",
            "client_version": None,
            "images_tag": "1.10.0",
            "images_registry": "",
            "expected_output": "mlrun/mlrun:1.10.0",
        },
        {
            # images_tag takes precedence over client_version and triggers replacement even if client_version is older
            "image": "mlrun/ml-base",
            "client_version": "1.9.0",
            "images_tag": "1.10.0",
            "images_registry": "",
            "expected_output": "mlrun/mlrun:1.10.0",
        },
        {
            "image": "mlrun/mlrun-kfp",
            "client_version": "1.10.0",
            "client_python_version": "3.9.13",
            "images_tag": None,
            "expected_output": "mlrun/mlrun-kfp:1.10.0-py39",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun-kfp",
            "client_version": "1.10.0",
            "client_python_version": "3.11.13",
            "images_tag": None,
            "expected_output": "mlrun/mlrun-kfp:1.10.0",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun-kfp",
            "client_version": "1.10.0-rc1",
            "client_python_version": "3.11.13",
            "images_tag": None,
            "expected_output": "mlrun/mlrun-kfp:1.10.0-rc1",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun-kfp",
            "client_version": "1.9.0",
            "client_python_version": "3.9.10",
            "images_tag": None,
            # no -py suffix as 1.9 has no dual python support
            "expected_output": "mlrun/mlrun-kfp:1.9.0",
            "images_to_enrich_registry": "",
        },
        {
            "image": "mlrun/mlrun-kfp:1.10.0-rc37",
            "client_version": "1.10.0-rc37",
            "client_python_version": "3.9.13",
            "images_tag": None,
            "expected_output": "mlrun/mlrun-kfp:1.10.0-rc37-py39",
            "images_to_enrich_registry": "",
        },
    ],
)
def test_enrich_image(case):
    # Preserve original values
    original_images_tag = config.images_tag
    original_images_registry = config.images_registry
    original_vendor_images_registry = config.vendor_images_registry
    original_images_to_enrich_registry = config.images_to_enrich_registry
    original_version_get = mlrun.utils.version.Version().get

    try:
        # Set values from case or use defaults
        config.images_tag = case.get("images_tag", "0.5.2-unstable-adsf76s")
        config.images_registry = case.get("images_registry", "ghcr.io/")
        config.vendor_images_registry = case.get(
            "vendor_images_registry", "dummy-repo/"
        )
        config.images_to_enrich_registry = case.get(
            "images_to_enrich_registry", original_images_to_enrich_registry
        )

        if "version" in case:  # Allows explicitly setting version to None for mock
            mlrun.utils.version.Version().get = unittest.mock.Mock(
                return_value={"version": case.get("version")}
            )
        elif (
            "client_version" not in case and "images_tag" not in case
        ):  # if no versions are set, ensure server is also None
            mlrun.utils.version.Version().get = unittest.mock.Mock(
                return_value={"version": None}
            )

        image = case["image"]
        expected_output = case["expected_output"]
        client_version = case.get("client_version")
        client_python_version = case.get("client_python_version")

        output = enrich_image_url(image, client_version, client_python_version)
        assert output == expected_output

    finally:
        # Restore original values
        config.images_tag = original_images_tag
        config.images_registry = original_images_registry
        config.vendor_images_registry = original_vendor_images_registry
        config.images_to_enrich_registry = original_images_to_enrich_registry
        mlrun.utils.version.Version().get = original_version_get


@pytest.mark.parametrize(
    "mlrun_version,python_version,expected",
    [
        ("1.9.0", "3.9.13", "-py39"),
        ("1.9.0", "3.11.13", ""),
        ("1.9.0", None, ""),
        ("1.9.0", "3.10.13", ""),
        ("1.9.0", "3.11.0", ""),
        ("1.8.0", "3.9.0", ""),
        ("1.8.0", "3.10.0", ""),
        ("1.9.0-rc12", "3.9.13", "-py39"),
        ("1.9.0-rc12", "3.11.13", ""),
        ("1.9.0-rc12", None, ""),
        ("1.9.0-rc12", "3.10.13", ""),
        ("1.9.1", "3.9.13", "-py39"),
        ("1.9.1", "3.11.13", ""),
        ("1.9.1", None, ""),
        ("1.9.1", "3.10.13", ""),
        ("1.9.1-rc12", "3.9.13", "-py39"),
        ("1.9.1-rc12", "3.11.13", ""),
        # an example of a version which contains a suffix of commit hash and not a rc suffix (our CI uses this format)
        ("1.9.0-zwqeiubz", "3.9.13", "-py39"),
        ("1.9.0-zwqeiubz", "3.11.13", ""),
        # an example of a dev version which contains `unstable` and not a rc suffix (When compiling from source without
        # defining a version)
        ("0.0.0-unstable", "3.9.13", "-py39"),
        ("0.0.0-unstable", "3.11.13", ""),
        # list of versions which are later than 1.9.0, if we decide to stop supporting python 3.9 in later versions
        # we can remove them
        ("1.10.0", "3.11.13", ""),
        ("1.10.0", "3.9.13", "-py39"),
        ("1.10.0-rc1", "3.9.13", "-py39"),
        ("1.10.0-rc1", "3.11.13", ""),
    ],
)
def test_resolve_image_tag_suffix(mlrun_version, python_version, expected):
    assert resolve_image_tag_suffix(mlrun_version, python_version) == expected


@pytest.mark.parametrize(
    "case",
    [
        {"docker_registry": "", "expected_registry": "", "expected_repository": None},
        {
            "docker_registry": "hedi/ingber",
            "expected_registry": None,
            "expected_repository": "hedi/ingber",
        },
        {
            "docker_registry": "localhost/hedingber",
            "expected_registry": "localhost",
            "expected_repository": "hedingber",
        },
        {
            "docker_registry": "gcr.io/hedingber",
            "expected_registry": "gcr.io",
            "expected_repository": "hedingber",
        },
        {
            "docker_registry": "local-registry:80/hedingber",
            "expected_registry": "local-registry:80",
            "expected_repository": "hedingber",
        },
        {
            "docker_registry": "docker-registry.default-tenant.app.hedingber-30-1.iguazio-cd1.com:80/hedingber",
            "expected_registry": "docker-registry.default-tenant.app.hedingber-30-1.iguazio-cd1.com:80",
            "expected_repository": "hedingber",
        },
        {
            "docker_registry": "docker-registry.default-tenant.app.hedingber-30-1.iguazio-cd1.com:80",
            "expected_registry": "docker-registry.default-tenant.app.hedingber-30-1.iguazio-cd1.com:80",
            "expected_repository": None,
        },
        {
            "docker_registry": "quay.io/",
            "expected_registry": "quay.io",
            "expected_repository": "",
        },
    ],
)
def test_get_parsed_docker_registry(case):
    config.httpdb.builder.docker_registry = case["docker_registry"]
    registry, repository = get_parsed_docker_registry()
    assert case["expected_registry"] == registry
    assert case["expected_repository"] == repository


@pytest.mark.parametrize(
    "uri,expected_output",
    [
        ("store:///123", (StorePrefix.Artifact, "123")),
        ("store://xyz", (StorePrefix.Artifact, "xyz")),
        (
            "store://feature-sets/123",
            (StorePrefix.FeatureSet, "123"),
        ),
        (
            "store://feature-vectors/456",
            (StorePrefix.FeatureVector, "456"),
        ),
        (
            "store://artifacts/890",
            (StorePrefix.Artifact, "890"),
        ),
        ("xxx://xyz", (None, "")),
    ],
)
def test_parse_store_uri(uri, expected_output):
    output = parse_store_uri(uri)
    assert expected_output == output


@pytest.mark.parametrize(
    "case",
    [
        {
            "artifact_path": "v3io://just/regular/path",
            "expected_artifact_path": "v3io://just/regular/path",
        },
        {
            "artifact_path": "v3io://path-with-unrealted-template/{{run.uid}}",
            "expected_artifact_path": "v3io://path-with-unrealted-template/project",
        },
        {
            "artifact_path": "v3io://template-project-not-provided/{{project}}",
            "raise": True,
        },
        {
            "artifact_path": "v3io://template-project-provided/{{project}}",
            "project": "some-project",
            "expected_artifact_path": "v3io://template-project-provided/some-project",
        },
        {
            "artifact_path": "v3io://legacy-template-project-provided/{{run.project}}",
            "project": "some-project",
            "expected_artifact_path": "v3io://legacy-template-project-provided/some-project",
        },
    ],
)
def test_template_artifact_path(case):
    if case.get("raise"):
        with pytest.raises(mlrun.errors.MLRunInvalidArgumentError):
            template_artifact_path(case["artifact_path"], case.get("project"))
    else:
        filled_artifact_path = template_artifact_path(
            case["artifact_path"], case.get("project")
        )
        assert case["expected_artifact_path"] == filled_artifact_path


def test_update_in():
    obj = {}
    update_in(obj, "a.b.c", 2)
    assert obj["a"]["b"]["c"] == 2
    update_in(obj, "a.b.c", 3)
    assert obj["a"]["b"]["c"] == 3

    update_in(obj, "a.b.d", 3, append=True)
    assert obj["a"]["b"]["d"] == [3]
    update_in(obj, "a.b.d", 4, append=True)
    assert obj["a"]["b"]["d"] == [3, 4]


@pytest.mark.parametrize(
    "keys,val",
    [
        (
            ["meta", "label", "tags.data.com/env"],
            "value",
        ),
        (
            ["spec", "handler"],
            [1, 2, 3],
        ),
        (["metadata", "test", "labels", "test.data"], 1),
        (["metadata.test", "test.test", "labels", "test.data"], True),
        (["metadata", "test.middle.com", "labels", "test.data"], "data"),
    ],
)
def test_update_in_with_dotted_keys(keys, val):
    obj = {}
    # Join the keys list with dots to form a single key string.
    # If a key in the list has dots, wrap it with escaping (\\).
    key = ".".join([key if "." not in key else f"\\{key}\\" for key in keys])
    update_in(obj, key, val)
    for key in keys:
        obj = obj.get(key)
    assert obj == val


@pytest.mark.parametrize("actual_list", [[1], [1, "asd"], [None], ["asd", 23]])
@pytest.mark.parametrize("expected_types", [[str]])
def test_verify_list_types_failure(actual_list, expected_types):
    with pytest.raises(mlrun.errors.MLRunInvalidArgumentTypeError):
        verify_list_items_type(actual_list, expected_types)


@pytest.mark.parametrize(
    "actual_list", [[1.0, 8, "test"], ["test", 0.0], [None], [[["test"], 23]]]
)
@pytest.mark.parametrize("expected_types", [[str, int]])
def test_verify_list_multiple_types_failure(actual_list, expected_types):
    with pytest.raises(mlrun.errors.MLRunInvalidArgumentTypeError):
        verify_list_items_type(actual_list, expected_types)


@pytest.mark.parametrize("actual_list", [[], ["test"], ["test", "test1"]])
@pytest.mark.parametrize("expected_types", [[str]])
def test_verify_list_types_success(actual_list, expected_types):
    verify_list_items_type(actual_list, expected_types)


@pytest.mark.parametrize(
    "actual_list",
    [[1, 8, "test"], ["test", 0], [], ["test", 23, "test"], ["test"], [1], [123, 123]],
)
@pytest.mark.parametrize("expected_types", [[str, int]])
def test_verify_list_multiple_types_success(actual_list, expected_types):
    verify_list_items_type(actual_list, expected_types)


def test_get_pretty_types_names():
    cases = [
        ([], ""),
        ([str], "str"),
        ([str, int], "Union[str,int]"),
    ]
    for types, expected in cases:
        pretty_result = get_pretty_types_names(types)
        assert pretty_result == expected


@pytest.mark.parametrize(
    "value, expected, exception",
    [
        # True values
        ("y", True, does_not_raise()),
        ("yes", True, does_not_raise()),
        ("t", True, does_not_raise()),
        ("true", True, does_not_raise()),
        ("on", True, does_not_raise()),
        ("1", True, does_not_raise()),
        # False values
        ("n", False, does_not_raise()),
        ("no", False, does_not_raise()),
        ("f", False, does_not_raise()),
        ("false", False, does_not_raise()),
        ("off", False, does_not_raise()),
        ("0", False, does_not_raise()),
        # Invalid values
        ("maybe", None, pytest.raises(ValueError)),
        ("2", None, pytest.raises(ValueError)),
        ("", None, pytest.raises(ValueError)),
        (" ", None, pytest.raises(ValueError)),
        # Case insensitivity
        ("Y", True, does_not_raise()),
        ("nO", False, does_not_raise()),
        ("TrUe", True, does_not_raise()),
        ("FaLsE", False, does_not_raise()),
    ],
)
def test_str_to_bool(value, expected, exception):
    with exception:
        assert mlrun.utils.str_to_bool(value) == expected


def test_str_to_timestamp():
    now_time = Timestamp("2021-01-01 00:01:00")
    cases = [
        (None, None, None),
        ("1/1/2022", Timestamp("2022-01-01 00:00:00"), None),
        (Timestamp("1/1/2022"), Timestamp("1/1/2022"), None),
        ("not now", None, ValueError),
        (" now ", now_time, None),
        (" now floor 1H", now_time - Timedelta("1m"), None),
        ("now - 1d1h", now_time - Timedelta("1d1h"), None),
        ("now +1d1m", now_time + Timedelta("1d1m"), None),
        ("now +1d1m floor 1D", now_time + Timedelta("1d") - Timedelta("1m"), None),
        ("now * 1d1m", None, mlrun.errors.MLRunInvalidArgumentError),
        (
            "2022-01-11T18:28:00+00:00",
            Timestamp("2022-01-11 18:28:00+0000", tz="UTC"),
            None,
        ),
        (
            "2022-01-11T18:28:00-06:00",
            Timestamp("2022-01-11 18:28:00", tz="US/Central"),
            None,
        ),
    ]
    for time_str, expected, exception in cases:
        if exception is not None:
            with pytest.raises(exception):
                str_to_timestamp(time_str, now_time=now_time)
        else:
            timestamp = str_to_timestamp(time_str, now_time=now_time)
            print(time_str, timestamp, expected)
            assert timestamp == expected


def test_create_linear_backoff():
    stop_value = 120
    base = 2
    coefficient = 4
    backoff = mlrun.utils.helpers.create_linear_backoff(base, coefficient, stop_value)
    for i in range(0, 120):
        expected_value = min(base + i * coefficient, stop_value)
        assert expected_value, next(backoff)


def test_create_linear_backoff_negative_coefficient():
    stop_value = 2
    base = 120
    coefficient = -4
    backoff = mlrun.utils.helpers.create_linear_backoff(base, coefficient, stop_value)
    for i in range(120, 0):
        expected_value = min(base + i * coefficient, stop_value)
        assert expected_value, next(backoff)


def test_create_exponential_backoff():
    base = 2
    max_value = 120
    backoff = mlrun.utils.helpers.create_exponential_backoff(base, max_value)
    for i in range(1, 120):
        expected_value = min(base**i, max_value)
        assert expected_value, next(backoff)


def test_create_step_backoff():
    steps = [[2, 3], [10, 5], [120, None]]
    backoff = mlrun.utils.helpers.create_step_backoff(steps)
    for step in steps:
        step_value, step_occurrences = step
        if step_occurrences is not None:
            for _ in range(0, step_occurrences):
                assert step_value, next(backoff)
        else:
            # Run another 10 iterations:
            for _ in range(0, 10):
                assert step_value, next(backoff)


@pytest.mark.parametrize("fatal_exception", (False, True))
def test_retry_until_successful(fatal_exception):
    def test_run(backoff):
        call_count = {"count": 0}
        unsuccessful_mock = unittest.mock.Mock()
        successful_mock = unittest.mock.Mock()

        def some_func(count_dict, a, b, some_other_thing=None):
            logger.debug(
                "Some function called", a=a, b=b, some_other_thing=some_other_thing
            )
            if count_dict["count"] < 3:
                logger.debug("Some function is still running, raising exception")
                count_dict["count"] += 1
                unsuccessful_mock()
                raise Exception("I'm running,try again later")

            logger.debug("Some function finished successfully")
            successful_mock()
            return "Finished"

        with pytest.raises(Exception) if fatal_exception else does_not_raise():
            result = mlrun.utils.retry_until_successful(
                backoff,
                120,
                logger,
                True,
                some_func,
                call_count,
                5,
                [1, 8],
                fatal_exceptions=(Exception,) if fatal_exception else (),
                some_other_thing="Just",
            )
        if not fatal_exception:
            assert result, "Finished"
            assert unsuccessful_mock.call_count, 3
            assert successful_mock.call_count, 1

    test_run(0.02)

    test_run(mlrun.utils.create_linear_backoff(0.02, 0.02))


@pytest.mark.parametrize(
    "iterable_list, chunk_size, expected_chunked_list",
    [
        (["a", "b", "c"], 1, [["a"], ["b"], ["c"]]),
        (["a", "b", "c"], 2, [["a", "b"], ["c"]]),
        (["a", "b", "c"], 3, [["a", "b", "c"]]),
        (["a", "b", "c"], 4, [["a", "b", "c"]]),
        (["a", "b", "c"], 0, [["a", "b", "c"]]),
    ],
)
def test_iterate_list_by_chunks(iterable_list, chunk_size, expected_chunked_list):
    chunked_list = mlrun.utils.iterate_list_by_chunks(iterable_list, chunk_size)
    assert list(chunked_list) == expected_chunked_list


@pytest.mark.parametrize(
    "username,expected_normalized_username",
    [
        # sanity, all good
        ("test", "test"),
        # ensure ends with alphanumeric
        ("test.", "test"),
        ("test-", "test"),
        # lowercase
        ("TestUser", "testuser"),
        # remove special characters
        ("UserName!@#$", "username"),
        # dasherize
        ("user_name", "user-name"),
        ("User-Name_123", "user-name-123"),
        # everything with @ (email-like username)
        ("User_Name@domain.com", "user-name"),
        ("user@domain.com", "user"),
        ("user.name@example.com", "username"),
        ("user_name@example.com", "user-name"),
    ],
)
def test_normalize_username(username, expected_normalized_username):
    normalized_username = mlrun.utils.helpers.normalize_project_username(username)
    assert normalized_username == expected_normalized_username


@pytest.mark.parametrize(
    "basedir,path,is_symlink, is_valid",
    [
        ("/base", "/base/valid", False, True),
        ("/base", "/base/valid", True, True),
        ("/base", "/../invalid", True, False),
        ("/base", "/../invalid", False, False),
    ],
)
def test_is_safe_path(basedir, path, is_symlink, is_valid):
    safe = mlrun.utils.is_safe_path(basedir, path, is_symlink)
    assert safe == is_valid


@pytest.mark.parametrize(
    "kind, tag, target_path, uid, expected",
    [
        (
            "artifact",
            "v1",
            "/path/to/artifact",
            None,
            f"{ARTIFACT_STORE_PREFIX}:v1@dummy-tree",
        ),
        (
            "artifact",
            None,
            "/path/to/artifact",
            "dummy-uid",
            f"{ARTIFACT_STORE_PREFIX}:latest@dummy-tree^dummy-uid",
        ),
        (
            "artifact",
            "latest",
            "/path/to/artifact",
            "dummy-uid",
            f"{ARTIFACT_STORE_PREFIX}:latest@dummy-tree^dummy-uid",
        ),
        (
            "dataset",
            "v1",
            "/path/to/artifact",
            None,
            f"{DATASET_STORE_PREFIX}:v1@dummy-tree",
        ),
        (
            "dataset",
            None,
            "/path/to/artifact",
            None,
            f"{DATASET_STORE_PREFIX}:latest@dummy-tree",
        ),
        (
            "dataset",
            None,
            "/path/to/artifact",
            "dummy-uid",
            f"{DATASET_STORE_PREFIX}:latest@dummy-tree^dummy-uid",
        ),
        (
            "dataset",
            "latest",
            "/path/to/artifact",
            None,
            f"{DATASET_STORE_PREFIX}:latest@dummy-tree",
        ),
        (
            "model",
            "v1",
            "/path/to/artifact",
            "dummy-uid",
            f"{MODEL_STORE_PREFIX}:v1@dummy-tree^dummy-uid",
        ),
        (
            "model",
            None,
            "/path/to/artifact",
            None,
            f"{MODEL_STORE_PREFIX}:latest@dummy-tree",
        ),
        (
            "model",
            "latest",
            "/path/to/artifact",
            "dummy-uid",
            f"{MODEL_STORE_PREFIX}:latest@dummy-tree^dummy-uid",
        ),
        ("dir", "v1", "/path/to/artifact", "dummy-uid", "/path/to/artifact"),
        ("table", "v1", "/path/to/artifact", "dummy-uid", "/path/to/artifact"),
        ("plot", "v1", "/path/to/artifact", "dummy-uid", "/path/to/artifact"),
    ],
)
def test_get_artifact_target(kind, tag, target_path, uid, expected):
    item = {
        "kind": kind,
        "spec": {
            "db_key": "dummy-db-key",
            "target_path": target_path,
        },
        "metadata": {"tree": "dummy-tree", "tag": tag, "uid": uid},
    }
    target = mlrun.utils.get_artifact_target(item, project="dummy-project")
    assert target == expected


def test_validate_single_def_handler_invalid_handler():
    code = """
def handler():
    pass
def handler():
    pass
"""
    with pytest.raises(mlrun.errors.MLRunInvalidArgumentError) as exc:
        mlrun.utils.validate_single_def_handler("mlrun", code)
    assert str(exc.value) == (
        "The code file contains a function named “handler“, which is reserved. "
        + "Use a different name for your function."
    )


@pytest.mark.parametrize(
    "obj, expected",
    [
        ({"a": 1, "b": 2}, {"a": 1, "b": 2}),
        ('{"a": 1, "b": 2}', {"a": 1, "b": 2}),
        ({}, {}),
        ("{}", {}),
        (None, None),
    ],
)
def test_as_dict(obj, expected):
    assert expected == mlrun.utils.helpers.as_dict(obj)


@pytest.mark.parametrize(
    "code",
    [
        """
def dummy_handler():
    pass
def handler():
    pass
""",
        """
def handler():
    pass
""",
        """
def handler():
    pass
def dummy_handler():
    def handler():
        pass
    handler()
""",
        """
# def handler():
#     pass
def handler():
    pass
""",
    ],
)
def test_validate_single_def_handler_valid_handler(code):
    try:
        mlrun.utils.validate_single_def_handler("mlrun", code)
    except mlrun.errors.MLRunInvalidArgumentError:
        pytest.fail(
            "validate_single_def_handler raised MLRunInvalidArgumentError unexpectedly."
        )


@pytest.mark.parametrize(
    "base_url, path, expected_result",
    [
        # Base URL without trailing slash
        (
            "http://example.com",
            "path/to/resource",
            "http://example.com/path/to/resource",
        ),
        (
            "http://example.com",
            "/path/to/resource",
            "http://example.com/path/to/resource",
        ),
        ("http://example.com", "", "http://example.com"),
        ("http://example.com", None, "http://example.com"),
        # Base URL with trailing slash
        (
            "http://example.com/",
            "path/to/resource",
            "http://example.com/path/to/resource",
        ),
        (
            "http://example.com/",
            "/path/to/resource",
            "http://example.com/path/to/resource",
        ),
        ("http://example.com/", "", "http://example.com/"),
        ("http://example.com/", None, "http://example.com/"),
        # Path with or without leading slash
        ("http://example.com", "path", "http://example.com/path"),
        ("http://example.com/", "/path", "http://example.com/path"),
        ("http://example.com", "/path", "http://example.com/path"),
        # Complex cases
        (
            "http://example.com/base",
            "subpath/resource",
            "http://example.com/base/subpath/resource",
        ),
        (
            "http://example.com/base/",
            "/subpath/resource",
            "http://example.com/base/subpath/resource",
        ),
        # Empty base_url
        (
            "",
            "/path",
            "/path",
        ),
        (
            None,
            "/path",
            "/path",
        ),
    ],
)
def test_join_urls(base_url, path, expected_result):
    assert mlrun.utils.helpers.join_urls(base_url, path) == expected_result


@pytest.mark.parametrize(
    "input_time, expected_output",
    [
        (None, None),
        # no timezone
        ("2025-01-15T11:00:00", datetime(2025, 1, 15, 11, 0, 0, tzinfo=UTC)),
        # timezone-aware datetime (UTC+2), should convert to UTC
        (
            "2025-01-15T11:00:00+02:00",
            datetime(2025, 1, 15, 9, 0, 0, tzinfo=UTC),
        ),
        # already in UTC
        (
            "2025-01-15T11:00:00+00:00",
            datetime(2025, 1, 15, 11, 0, 0, tzinfo=UTC),
        ),
    ],
)
def test_datetime_from_iso(input_time, expected_output):
    assert mlrun.utils.helpers.datetime_from_iso(input_time) == expected_output


@pytest.mark.parametrize(
    "dt, expected",
    [
        # Test for naive datetime (without tzinfo), should be set to UTC
        (datetime(2025, 3, 13, 12, 30, 45, 123456), "2025-03-13 12:30:45.123456+00:00"),
        # Test for datetime with UTC timezone info
        (
            datetime(2025, 3, 13, 12, 30, 45, 123456, tzinfo=UTC),
            "2025-03-13 12:30:45.123456+00:00",
        ),
        # Test for datetime with a non-UTC timezone offset (+05:00), should keep the original timezone
        (
            datetime(
                2025, 3, 13, 12, 30, 45, 123456, tzinfo=timezone(timedelta(hours=5))
            ),
            "2025-03-13 12:30:45.123456+05:00",
        ),
        # Test for datetime with a timezone offset (+02:00), should keep the original timezone
        (
            datetime(
                2025, 3, 13, 12, 30, 45, 123456, tzinfo=timezone(timedelta(hours=2))
            ),
            "2025-03-13 12:30:45.123456+02:00",
        ),
    ],
)
def test_format_datetime(dt, expected):
    assert mlrun.utils.helpers.format_datetime(dt) == expected


@pytest.mark.parametrize(
    "input_start_date,"
    "input_end_date,"
    "input_existing_filter_json,"
    "input_experiment_id,"
    "expected_filter_object",
    [
        # End date only, no existing filter
        (
            None,
            "2024-11-05T15:30:00Z",
            None,
            None,
            {
                "predicates": [
                    {
                        "key": "created_at",
                        "op": mlrun_pipelines.models.FilterOperations.LESS_THAN_EQUALS.value,
                        "timestamp_value": "2024-11-05T15:30:00Z",
                    }
                ]
            },
        ),
        # Start and end dates, no existing filter
        (
            "2024-10-01T00:00:00Z",
            "2024-11-05T15:30:00Z",
            None,
            None,
            {
                "predicates": [
                    {
                        "key": "created_at",
                        "op": mlrun_pipelines.models.FilterOperations.LESS_THAN_EQUALS.value,
                        "timestamp_value": "2024-11-05T15:30:00Z",
                    },
                    {
                        "key": "created_at",
                        "op": mlrun_pipelines.models.FilterOperations.GREATER_THAN_EQUALS.value,
                        "timestamp_value": "2024-10-01T00:00:00Z",
                    },
                ]
            },
        ),
        # Existing filter with a 'name' predicate should be dropped; other predicates preserved
        (
            None,
            "2024-11-05T15:30:00Z",
            json.dumps(
                {
                    "predicates": [
                        {
                            "key": "name",
                            "op": mlrun_pipelines.models.FilterOperations.IS_SUBSTRING.value,
                            "string_value": "test-project",
                        },
                        {
                            "key": "status",
                            "op": mlrun_pipelines.models.FilterOperations.EQUALS.value,
                            "string_value": "Succeeded",
                        },
                    ]
                }
            ),
            None,
            {
                "predicates": [
                    # 'status' preserved
                    {
                        "key": "name",
                        "op": 9,
                        "string_value": "test-project",
                    },
                    {
                        "key": "status",
                        "op": mlrun_pipelines.models.FilterOperations.EQUALS.value,
                        "string_value": "Succeeded",
                    },
                    # end_date added
                    {
                        "key": "created_at",
                        "op": mlrun_pipelines.models.FilterOperations.LESS_THAN_EQUALS.value,
                        "timestamp_value": "2024-11-05T15:30:00Z",
                    },
                ]
            },
        ),
        # Experiment ID filter added alongside dates
        (
            "2024-10-01T00:00:00Z",
            "2024-11-05T15:30:00Z",
            None,
            "721ff4f8-d465-455e-bdab-a79857a62136",
            {
                "predicates": [
                    {
                        "key": "created_at",
                        "op": mlrun_pipelines.models.FilterOperations.LESS_THAN_EQUALS.value,
                        "timestamp_value": "2024-11-05T15:30:00Z",
                    },
                    {
                        "key": "created_at",
                        "op": mlrun_pipelines.models.FilterOperations.GREATER_THAN_EQUALS.value,
                        "timestamp_value": "2024-10-01T00:00:00Z",
                    },
                    {
                        "key": "experiment_id",
                        "op": mlrun_pipelines.models.FilterOperations.IN.value,
                        "string_values": {
                            "values": ["721ff4f8-d465-455e-bdab-a79857a62136"]
                        },
                    },
                ]
            },
        ),
    ],
)
def test_get_kfp_list_runs_filter(
    input_start_date: Optional[str],
    input_end_date: Optional[str],
    input_existing_filter_json: Optional[str],
    input_experiment_id: Optional[str],
    expected_filter_object: dict,
):
    experiment_ids = []
    if input_experiment_id:
        experiment_ids.append(input_experiment_id)
    generated_filter_json: str = mlrun_pipelines.client.create_list_runs_filter(
        start_date=input_start_date,
        end_date=input_end_date,
        filter_=input_existing_filter_json,
        experiment_ids=experiment_ids,
    )
    generated_filter_object = json.loads(generated_filter_json)
    assert generated_filter_object == expected_filter_object


@pytest.mark.parametrize(
    "date_input, expected_output, expectation",
    [
        # Valid date without timezone, assume UTC
        ("2024-11-05T15:30:00", "2024-11-05T15:30:00Z", does_not_raise()),
        # Valid date with UTC timezone
        ("2024-11-05T15:30:00Z", "2024-11-05T15:30:00Z", does_not_raise()),
        # Valid date with different timezone (convert to UTC)
        ("2024-11-05T15:30:00+02:00", "2024-11-05T13:30:00Z", does_not_raise()),
        # Valid date with timezone-aware string
        ("2024-11-05T15:30:00-05:00", "2024-11-05T20:30:00Z", does_not_raise()),
        # Date with timezone info but no time
        ("2024-11-05", "2024-11-05T00:00:00Z", does_not_raise()),
        ("2024/11/05T09:00", "2024-11-05T09:00:00Z", does_not_raise()),
        # Invalid date format
        ("invalid-date", "", pytest.raises(ValueError)),
        # Overflow date (not a realistic timestamp)
        ("9999-99-99T99:99:99Z", "", pytest.raises(ValueError)),
    ],
)
def test_validate_and_convert_date(date_input, expected_output, expectation):
    with expectation:
        assert (
            mlrun.utils.helpers.validate_and_convert_date(date_input) == expected_output
        )


@pytest.mark.parametrize(
    "input_uri,expected_output",
    [
        ("store://proj/key:latest", "store://proj/key"),
        ("key#1:dev@tree^uid", "key#1@tree^uid"),
        ("store://key:tag", "store://key"),
        (
            "store://models/remote-model-project/my_model#0@tree",
            "store://models/remote-model-project/my_model#0@tree",
        ),
        (
            "store://llm-prompts/test-nuclio-runtime/my_llm#0:v1@0eb15a5a-b093-4ca3-9e7d-c22482a6c990^c4f4dcc412acd61460adf9b4a4e799567c4793c8",
            "store://llm-prompts/test-nuclio-runtime/my_llm#0@0eb15a5a-b093-4ca3-9e7d-c22482a6c990^c4f4dcc412acd61460adf9b4a4e799567c4793c8",
        ),
        ("key:tag", "key"),
        ("key#1:tag", "key#1"),
        ("key#1@tree", "key#1@tree"),
        ("key#1@tree^uid", "key#1@tree^uid"),
        ("store://key#1:tag@tree", "store://key#1@tree"),
    ],
)
def test_remove_tag_from_artifact_uri(input_uri, expected_output):
    assert remove_tag_from_artifact_uri(input_uri) == expected_output


@pytest.mark.parametrize(
    "path, data, expected",
    [
        ("b", {"a": {"x": 1}, "b": 2}, 2),  # simple key with int
        ("missing", {"x": 1}, None),  # missing key
        (
            "a.b.c",
            {"a": {"b": {"c": {"value": 42}}}},
            {"value": 42},
        ),  # nested dict
        ("a.missing", {"a": {"b": 1}}, {}),  # partially missing nested path
        (None, {"x": 1, "y": 2}, {"x": 1, "y": 2}),  # path is None
    ],
)
def test_get_data_from_path_parametrized(path, data, expected):
    path_as_list = split_path(path)
    assert get_data_from_path(path_as_list, data) == expected


@pytest.mark.parametrize(
    "path, initial_data, value, expected_data",
    [
        ("a", {}, 42, {"a": 42}),
        ("a.b.c", {}, 99, {"a": {"b": {"c": 99}}}),
        ("a.b.c", {"a": {"b": {"c": 1}}}, 2, {"a": {"b": {"c": 2}}}),
        ("x.y", {}, "value", {"x": {"y": "value"}}),
        ("single", {}, "only", {"single": "only"}),
        (
            None,
            {"existing": "data"},
            {"new_key": 123},
            {"existing": "data", "new_key": 123},
        ),
    ],
)
def test_set_data_by_path_success(path, initial_data, value, expected_data):
    path_as_list = split_path(path)
    set_data_by_path(path_as_list, initial_data, value)
    assert initial_data == expected_data


@pytest.mark.parametrize(
    "path, value, exc_type, exc_msg",
    [
        # For path=None, test that non-dict value raises ValueError
        (None, "not a dict", ValueError, "value must be a dictionary"),
        # For path=None with dict value, no exception expected, so not included here
        # For invalid path types, test MLRunInvalidArgumentError is raised
        (123, "some_value", mlrun.errors.MLRunInvalidArgumentError, "Expected path"),
        (3.14, "some_value", mlrun.errors.MLRunInvalidArgumentError, "Expected path"),
        (
            {"not": "a path"},
            "some_value",
            mlrun.errors.MLRunInvalidArgumentError,
            "Expected path",
        ),
    ],
)
def test_set_data_by_path_invalid_path(path, value, exc_type, exc_msg):
    data = {}
    with pytest.raises(exc_type, match=exc_msg):
        set_data_by_path(path, data, value)


@pytest.mark.parametrize(
    "priority_reqs, reqs, expected_result",
    [
        (None, None, []),
        ([], ["requests"], ["requests"]),
        (["requests"], [], ["requests"]),
        (
            ["requests>=1.0", "pydantic==1.0"],
            ["requests==2.0", "pandas"],
            ["requests>=1.0", "pydantic==1.0", "pandas"],
        ),
    ],
)
def test_merge_requirements(priority_reqs, reqs, expected_result):
    result = merge_requirements(reqs_priority=priority_reqs, reqs_secondary=reqs)
    assert set(result) == set(expected_result)


# Test ensure_batch_job_suffix
@pytest.mark.parametrize(
    "function_name,expected_name,expected_renamed",
    [
        # Normal case - suffix should be added
        ("my-function", "my-function-batch", True),
        # Already has suffix - should not be renamed
        ("my-function-batch", "my-function-batch", False),
        # Edge cases
        (None, None, False),
        ("", "", False),
        # Name contains "batch" but doesn't end with "-batch"
        ("batch-processor", "batch-processor-batch", True),
    ],
)
def test_ensure_batch_job_suffix(function_name, expected_name, expected_renamed):
    """Test that ensure_batch_job_suffix correctly adds suffix when needed."""
    modified_name, was_renamed, suffix = ensure_batch_job_suffix(function_name)

    assert modified_name == expected_name
    assert was_renamed == expected_renamed
    assert suffix == "-batch"


@pytest.mark.parametrize(
    "function_name,expected",
    [
        # Invalid names - uppercase letters
        ("MyFunction", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        ("FUNCTION", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        ("myFunction", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        # Invalid names - special characters
        ("my_function", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        ("my.function", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        ("my function", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        ("my@function", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        ("my#function", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        # Invalid names - starts/ends with dash
        ("-myfunction", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        ("myfunction-", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)),
        # Empty name - allowed (returns early without validation)
        ("", does_not_raise()),
        # Invalid names - too long (>63 characters)
        (
            "a" * 64,
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        (
            "my-very-long-function-name-that-exceeds-kubernetes-limit-of-sixtythree",
            pytest.raises(mlrun.errors.MLRunInvalidArgumentError),
        ),
        # Valid names
        ("myfunction", does_not_raise()),
        ("my-function", does_not_raise()),
        ("my-function-2", does_not_raise()),
        ("function123", does_not_raise()),
        ("123function", does_not_raise()),
        ("a", does_not_raise()),
        ("a1", does_not_raise()),
        ("1a", does_not_raise()),
        # Valid names - at the limit (63 characters)
        ("a" * 63, does_not_raise()),
        ("my-function-" + "a" * 50, does_not_raise()),
    ],
)
def test_validate_function_name(function_name, expected):
    """Test that validate_function_name enforces DNS-1123 label requirements."""
    with expected:
        validate_function_name(function_name)
