# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import string
import tempfile
from contextlib import nullcontext as does_not_raise
from datetime import datetime
from unittest.mock import Mock

import pandas as pd
import pytest
import pytz

import mlrun.datastore
from mlrun import MLRunInvalidArgumentError, new_function
from mlrun.datastore import KafkaSource
from mlrun.datastore.azure_blob import AzureBlobStore
from mlrun.datastore.base import HttpStore
from mlrun.datastore.datastore import schema_to_model_provider, schema_to_store
from mlrun.datastore.datastore_profile import DatastoreProfileKafkaStream
from mlrun.datastore.dbfs_store import DBFSStore
from mlrun.datastore.filestore import FileStore
from mlrun.datastore.google_cloud_storage import GoogleCloudStorageStore
from mlrun.datastore.model_provider.huggingface_provider import HuggingFaceProvider
from mlrun.datastore.model_provider.openai_provider import OpenAIProvider
from mlrun.datastore.redis import RedisStore
from mlrun.datastore.s3 import S3Store
from mlrun.datastore.v3io import V3ioStore


def test_http_fs_parquet_as_df():
    data_item = mlrun.datastore.store_manager.object(
        "https://s3.wasabisys.com/iguazio/data/market-palce/aggregate/metrics.pq"
    )
    data_item.as_df()


def test_http_fs_parquet_with_params_as_df():
    data_item = mlrun.datastore.store_manager.object(
        "https://s3.wasabisys.com/iguazio/data/market-palce/aggregate/metrics.pq?param1=1&param2=2"
    )
    data_item.as_df()


# ML-10075
# TODO: find another dataset and re-enable this test
@pytest.mark.skip(
    reason="Starting with PyArrow 17, this test causes a conflict between partition data and parquet data"
)
def test_s3_fs_parquet_as_df():
    data_item = mlrun.datastore.store_manager.object(
        "s3://aws-public-blockchain/v1.0/btc/blocks/date=2023-02-27/"
        "part-00000-7de4c87e-242f-4568-b5d7-aae4cc75e9ad-c000.snappy.parquet"
    )
    data_item.as_df()


def test_kafka_source_with_attributes():
    source = KafkaSource(
        brokers="broker_host:9092",
        topics="mytopic",
        group="mygroup",
        sasl_user="myuser",
        sasl_pass="mypassword",
        attributes={
            "sasl": {
                "handshake": True,
            },
        },
    )
    function = new_function(kind="remote")
    source.add_nuclio_trigger(function)
    attributes = function.spec.config["spec.triggers.kafka"]["attributes"]
    assert attributes["brokers"] == ["broker_host:9092"]
    assert attributes["topics"] == ["mytopic"]
    assert attributes["consumerGroup"] == "mygroup"
    assert attributes["sasl"] == {
        "enable": True,
        "user": "myuser",
        "password": "mypassword",
        "mechanism": "PLAIN",
        "handshake": True,
    }


def test_kafka_source_with_attributes_as_ds_profile():
    ds = DatastoreProfileKafkaStream(
        name="dskafkasrc",
        brokers="broker_host:9092",
        topics="mytopic",
        group="mygroup",
        sasl_user="myuser",
        sasl_pass="mypassword",
        kwargs_public={
            "sasl": {
                "handshake": True,
            },
        },
        kwargs_private={
            "sasl": {
                "password": "wrong_password",
            },
        },
    )
    source = KafkaSource(path="ds://dskafkasrc")
    function = new_function(kind="remote")
    mlrun.datastore.sources.datastore_profile_read = Mock(return_value=ds)
    source.add_nuclio_trigger(function)
    attributes = function.spec.config["spec.triggers.kafka"]["attributes"]
    assert attributes["brokers"] == ["broker_host:9092"]
    assert attributes["topics"] == ["mytopic"]
    assert attributes["consumerGroup"] == "mygroup"
    assert attributes["sasl"] == {
        "enable": True,
        "user": "myuser",
        "password": "mypassword",
        "mechanism": "PLAIN",
        "handshake": True,
    }


def test_kafka_source_with_attributes_as_ds_profile_brokers_list():
    ds = DatastoreProfileKafkaStream(
        name="dskafkasrc",
        brokers=["broker_host:9092", "broker_host2:9093"],
        topics=["mytopic", "mytopic2"],
        group="mygroup",
        kwargs_public={
            "sasl": {
                "handshake": True,
                "enabled": True,
            },
        },
        kwargs_private={
            "sasl": {
                "password": "mypassword",
                "user": "myuser",
            },
        },
    )
    source = KafkaSource(path="ds://dskafkasrc")
    function = new_function(kind="remote")
    mlrun.datastore.sources.datastore_profile_read = Mock(return_value=ds)
    source.add_nuclio_trigger(function)
    attributes = function.spec.config["spec.triggers.kafka"]["attributes"]
    assert attributes["brokers"] == ["broker_host:9092", "broker_host2:9093"]
    assert attributes["topics"] == ["mytopic", "mytopic2"]
    assert attributes["consumerGroup"] == "mygroup"
    assert attributes["sasl"] == {
        "enabled": True,
        "user": "myuser",
        "password": "mypassword",
        "handshake": True,
    }


def test_kafka_source_without_attributes():
    source = KafkaSource(
        brokers="broker_host:9092",
        topics="mytopic",
        group="mygroup",
        sasl_user="myuser",
        sasl_pass="mypassword",
    )
    function = new_function(kind="remote")
    source.add_nuclio_trigger(function)
    attributes = function.spec.config["spec.triggers.kafka"]["attributes"]
    assert attributes["brokers"] == ["broker_host:9092"]
    assert attributes["topics"] == ["mytopic"]
    assert attributes["consumerGroup"] == "mygroup"
    assert attributes["sasl"] == {
        "enable": True,
        "handshake": True,
        "user": "myuser",
        "password": "mypassword",
        "mechanism": "PLAIN",
    }


@pytest.mark.parametrize(
    "schemas,expected_class,expected",
    [
        (["file"] + list(string.ascii_lowercase), FileStore, does_not_raise()),
        (["s3"], S3Store, does_not_raise()),
        (["az", "wasbs", "wasb"], AzureBlobStore, does_not_raise()),
        (["v3io", "v3ios"], V3ioStore, does_not_raise()),
        (["redis", "rediss"], RedisStore, does_not_raise()),
        (["http", "https"], HttpStore, does_not_raise()),
        (["gcs", "gs"], GoogleCloudStorageStore, does_not_raise()),
        (["dbfs"], DBFSStore, does_not_raise()),
        (["random"], None, pytest.raises(ValueError)),
    ],
)
def test_schema_to_store(schemas, expected_class, expected):
    with expected:
        stores = [schema_to_store(schema) for schema in schemas]
        assert all(store == expected_class for store in stores)


@pytest.mark.parametrize(
    "schemas,expected_class,expected",
    [
        (["openai"], OpenAIProvider, does_not_raise()),
        (["huggingface"], HuggingFaceProvider, does_not_raise()),
        (["random"], None, pytest.raises(ValueError)),
    ],
)
def test_schema_to_model_provider(schemas, expected_class, expected):
    with expected:
        stores = [schema_to_model_provider(schema) for schema in schemas]
        assert all(store == expected_class for store in stores)


# ML-6308
@pytest.mark.parametrize("start_time_tz", [None, "naive", "with_tz"])
@pytest.mark.parametrize("end_time_tz", [None, "naive", "with_tz"])
@pytest.mark.parametrize("df_tz", [False, True])
def test_as_df_time_filters(start_time_tz, end_time_tz, df_tz):
    time_column = "timestamp"

    parquet_file = os.path.join(
        os.path.join(os.path.dirname(__file__), "assets"), "testdata.parquet"
    )
    full_df = pd.read_parquet(parquet_file)

    with tempfile.NamedTemporaryFile(
        mode="w", suffix=".parquet", delete=True
    ) as parquet_file:
        if df_tz:
            full_df[time_column] = full_df[time_column].dt.tz_localize("UTC")
        full_df.to_parquet(parquet_file.name)

        data_item = mlrun.datastore.store_manager.object(f"file://{parquet_file.name}")

        start_time = None
        tzinfo = pytz.UTC if start_time_tz == "with_tz" else None
        if start_time_tz:
            start_time = datetime(2020, 12, 1, 17, 28, 15, tzinfo=tzinfo)

        end_time = None
        tzinfo = pytz.UTC if end_time_tz == "with_tz" else None
        if end_time_tz:
            end_time = datetime(2020, 12, 1, 17, 29, 15, tzinfo=tzinfo)

        if {start_time_tz, end_time_tz} == {"naive", "with_tz"}:
            expectation = pytest.raises(
                MLRunInvalidArgumentError,
                match="start_time and end_time must have the same time zone",
            )
        else:
            expectation = does_not_raise()

        with expectation:
            resp = data_item.as_df(
                start_time=start_time, end_time=end_time, time_column=time_column
            )
            num_row_expected = (
                190 - (80 if start_time_tz else 0) - (90 if end_time_tz else 0)
            )
            assert len(resp) == num_row_expected


def test_partition_filtering_year_month():
    """
    Simple test for partition filtering with year/month structure.
    Tests filtering data from 2020-2022 with year=YYYY/month=MM partitions.
    """
    time_column = "timestamp"

    # Create test data spanning 2020-2022, all months
    test_data = []
    for year in range(2020, 2023):  # 2020, 2021, 2022
        for month in range(1, 13):  # Jan-Dec
            for day in [5, 15, 25]:  # 3 records per month
                timestamp = datetime(year, month, day, 12, 0, 0)
                test_data.append(
                    {
                        "timestamp": timestamp,
                        "year": year,
                        "month": month,
                        "value": year * 100 + month * 10 + day,
                    }
                )

    df = pd.DataFrame(test_data)

    # Create partitioned parquet structure: year=YYYY/month=MM/
    with tempfile.TemporaryDirectory() as temp_dir:
        # Write partitioned parquet files
        df.to_parquet(temp_dir, partition_cols=["year", "month"], engine="pyarrow")

        # Create DataItem with trailing slash for directory
        dir_url = f"file://{temp_dir}/"
        data_item = mlrun.datastore.store_manager.object(dir_url)

        # Test filter: Get data from June 2021 to August 2021
        start_time = datetime(2021, 6, 10, 0, 0, 0)  # June 10, 2021
        end_time = datetime(2021, 8, 20, 0, 0, 0)  # August 20, 2021

        # Execute the filter
        result = data_item.as_df(
            start_time=start_time,
            end_time=end_time,
            time_column=time_column,
            format="parquet",
            optimize_discovery=False,
        )

        # Calculate expected results manually
        # Should include: June 15 & 25 + July 5, 15 & 25 + August 5 & 15 = 7 records
        expected_df = df[
            (df[time_column] > start_time) & (df[time_column] <= end_time)
        ].reset_index(drop=True)
        pd.testing.assert_frame_equal(
            result,
            expected_df,
            check_like=True,
            check_dtype=False,
            check_categorical=False,
        )


@pytest.mark.parametrize(
    "with_time_zone",
    [True, False],
)
@pytest.mark.parametrize(
    "description, start_time_args, end_time_args, partition_args, expected",
    [
        # ----------------------------
        # YEAR ONLY PARTITIONS
        # ----------------------------
        (
            "Year fully inside range",
            (2024, 1, 1, 0, 0),
            (2024, 12, 31, 23, 59),
            dict(year=2024),
            True,
        ),
        (
            "Year fully before range",
            (2024, 1, 1, 0, 0),
            (2024, 12, 31, 23, 59),
            dict(year=2023),
            False,
        ),
        (
            "Year fully after range",
            (2024, 1, 1, 0, 0),
            (2024, 12, 31, 23, 59),
            dict(year=2025),
            False,
        ),
        (
            "Year boundary overlap",
            (2023, 12, 31, 23, 59),
            (2024, 1, 1, 0, 0),
            dict(year=2024),
            True,
        ),
        # ----------------------------
        # YEAR + MONTH PARTITIONS
        # ----------------------------
        (
            "Month inside full-year range",
            (2024, 1, 1, 0, 0),
            (2024, 12, 31, 23, 59),
            dict(year=2024, month=2),
            True,
        ),
        (
            "Month outside (previous year)",
            (2024, 1, 1, 0, 0),
            (2024, 12, 31, 23, 59),
            dict(year=2023, month=12),
            False,
        ),
        (
            "Month crossing year boundary (Dec → Jan)",
            (2023, 12, 1, 0, 0),
            (2024, 1, 1, 0, 0),
            dict(year=2023, month=12),
            True,
        ),
        (
            "Exact boundary: range start == partition_end",
            (2024, 1, 1, 0, 0),
            (2024, 1, 1, 0, 0),
            dict(year=2024, month=1),
            True,
        ),
        # ----------------------------
        # YEAR + MONTH + DAY PARTITIONS
        # ----------------------------
        (
            "Day inside range",
            (2024, 5, 1, 0, 0),
            (2024, 12, 31, 0, 0),
            dict(year=2024, month=5, day=15),
            True,
        ),
        (
            "Day outside range",
            (2024, 5, 16, 0, 0),
            (2024, 12, 31, 0, 0),
            dict(year=2023, month=5, day=15),
            False,
        ),
        (
            "Day boundary exact-touch",
            (2024, 5, 15, 0, 0),
            (2024, 5, 16, 0, 0),
            dict(year=2024, month=5, day=15),
            True,
        ),
        # ----------------------------
        # YEAR + MONTH + DAY + HOUR PARTITIONS
        # ----------------------------
        (
            "Hour inside range",
            (2024, 6, 10, 4, 0),
            (2024, 6, 30, 0, 0),
            dict(year=2024, month=6, day=10, hour=5),
            True,
        ),
        (
            "Hour outside range",
            (2024, 6, 10, 6, 0),
            (2024, 6, 30, 0, 0),
            dict(year=2024, month=7, day=10, hour=5),
            False,
        ),
        (
            "Hour partition touches range start",
            (2024, 6, 10, 6, 0),
            (2024, 6, 10, 8, 0),
            dict(year=2024, month=6, day=10, hour=6),
            True,
        ),
    ],
)
def test_is_directory_in_range(
    description,
    start_time_args,
    end_time_args,
    partition_args,
    expected,
    with_time_zone,
):
    tz = pytz.UTC if with_time_zone else None

    start_time = datetime(*start_time_args, tzinfo=tz)
    end_time = datetime(*end_time_args, tzinfo=tz)

    result = mlrun.datastore.base.DataStore._is_directory_in_range(
        start_time, end_time, **partition_args
    )
    assert result == expected, f"Failed case: {description} (tz-aware={with_time_zone})"
