# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import base64
import csv
import json
import os
from pathlib import Path
from tempfile import TemporaryDirectory

import numpy as np
import pandas as pd
import plotly.graph_objects as go
import pytest
import yaml

import mlrun
import mlrun.projects.pipelines
import mlrun_pipelines.common.ops
from mlrun import mlconf, new_function, new_task
from mlrun.artifacts import PlotlyArtifact
from mlrun.utils import logger

model_body = "abc is 123"
results_body = "<b> Some HTML <b>"
tests_dir = Path(__file__).absolute().parent
assets_dir = tests_dir / "assets"


def my_job(context, p1=1, p2="a-string"):
    # access input metadata, values, files, and secrets (passwords)
    print(f"Run: {context.name} (uid={context.uid})")
    print(f"Params: p1={p1}, p2={p2}")
    print(f"accesskey = {context.get_secret('ACCESS_KEY')}")
    input_file = context.get_input(
        str(tests_dir) + "/assets/test_kfp_input_file.txt"
    ).get()
    print(f"file\n{input_file}\n")

    # RUN some useful code e.g. ML training, data prep, etc.

    # log scalar result values (job result metrics)
    context.log_result("accuracy", p1 * 2)
    context.log_result("loss", p1 * 3)

    # log various types of artifacts (file, web page, table), will be
    # versioned and visible in the UI
    context.log_artifact("model", body=model_body, local_path="model.txt")
    context.log_artifact("results", local_path="results.html", body=results_body)

    # create a chart output (will show in the pipelines UI)
    x = np.arange(10)
    fig = go.Figure(data=go.Scatter(x=x, y=x**2))
    plotly = PlotlyArtifact(figure=fig, key="plotly")
    context.log_artifact(plotly)

    raw_data = {
        "first_name": ["Jason", "Molly", "Tina", "Jake", "Amy"],
        "last_name": ["Miller", "Jacobson", "Ali", "Milner", "Cooze"],
        "age": [42, 52, 36, 24, 73],
        "postTestScore": [25, 94, 57, 62, 70],
    }
    df = pd.DataFrame(
        raw_data, columns=["first_name", "last_name", "age", "postTestScore"]
    )
    context.log_dataset("mydf", df=df)


@pytest.fixture
def kfp_dirs(monkeypatch):
    with TemporaryDirectory() as tmpdir:
        meta_dir = Path(tmpdir) / "meta"
        artifacts_dir = Path(tmpdir) / "artifacts"
        output_dir = Path(tmpdir) / "output"
        for path in [meta_dir, artifacts_dir, output_dir]:
            os.mkdir(path)
        logger.info(
            "Created temp paths for kfp test",
            meta_dir=meta_dir,
            artifacts_dir=artifacts_dir,
            output_dir=output_dir,
        )
        monkeypatch.setattr(mlrun_pipelines.common.ops, "KFPMETA_DIR", str(meta_dir))
        monkeypatch.setattr(
            mlrun_pipelines.common.ops, "KFP_ARTIFACTS_DIR", str(artifacts_dir)
        )
        yield str(meta_dir), str(artifacts_dir), str(output_dir)


def test_kfp_function_run(kfp_dirs):
    meta_dir, artifacts_dir, output_dir = kfp_dirs
    p1 = 5
    expected_accuracy = 2 * p1
    expected_loss = 3 * p1
    task = _generate_task(p1, output_dir)
    result = new_function(kfp=True).run(task, handler=my_job)
    _assert_meta_dir(meta_dir, expected_accuracy, expected_loss)
    _assert_artifacts_dir(artifacts_dir, expected_accuracy, expected_loss)
    _assert_output_dir(output_dir, result.metadata.name)
    assert result.output("accuracy") == expected_accuracy
    assert result.output("loss") == expected_loss
    assert result.status.state == "completed"


def test_kfp_function_run_with_hyper_params(rundb_mock, kfp_dirs):
    meta_dir, artifacts_dir, output_dir = kfp_dirs
    p1 = [1, 2, 3]
    task = _generate_task(p1, output_dir)
    task.with_hyper_params({"p1": p1}, selector="min.loss")
    best_iteration = 1  # loss is 3 * p1, so min loss will be when p1=1
    expected_accuracy = 2 * p1[best_iteration - 1]
    expected_loss = 3 * p1[best_iteration - 1]
    result = new_function(kfp=True).run(task, handler=my_job)
    _assert_meta_dir(meta_dir, expected_accuracy, expected_loss, best_iteration)
    _assert_artifacts_dir(artifacts_dir, expected_accuracy, expected_loss)
    _assert_output_dir(output_dir, result.metadata.name, iterations=len(p1))
    assert result.output("accuracy") == expected_accuracy
    assert result.output("loss") == expected_loss
    assert result.status.state == "completed"


def test_run_function_with_retry_validation():
    project = mlrun.new_project("test-retry-validation")
    project.set_workflow("test-workflow", str(assets_dir / "localpipe.py"))
    workflow_spec = mlrun.projects.pipelines.WorkflowSpec(engine="kfp")
    workflow_spec.merge_args(project.workflows[0])
    mlrun.projects.pipeline_context.set(project, workflow_spec)
    function = new_function(
        "test-function",
        kind="job",
    )
    with pytest.raises(
        mlrun.errors.MLRunInvalidArgumentError,
        match="Retrying jobs is not supported when running a workflow with the kfp engine. Use KFP set_retry instead.",
    ):
        project.run_function(function, retry={"count": 3})


def _assert_output_dir(output_dir, name, iterations=1):
    output_prefix = f"{output_dir}/{name}/"
    for iteration in range(1, iterations):
        _assert_iteration_output_dir_files(output_prefix, iteration)
    if iterations > 1:
        iteration_results_file = output_prefix + "0/iteration_results.csv"
        with open(iteration_results_file) as file:
            count = 0
            for row in csv.DictReader(file):
                print(yaml.safe_dump(row))
                count += 1
        assert count == 3, "didnt see expected iterations file output"


def _assert_iteration_output_dir_files(output_dir, iteration):
    def file_path(key):
        return output_dir + f"{iteration}/{key}"

    with open(file_path("model.txt")) as model_file:
        contents = model_file.read()
        assert contents == model_body
    with open(file_path("results.html")) as results_file:
        contents = results_file.read()
        assert contents == results_body
    assert os.path.exists(file_path("plotly.html"))
    assert os.path.exists(file_path("mydf.parquet"))


def _assert_artifacts_dir(artifacts_dir, expected_accuracy, expected_loss):
    with open(artifacts_dir + "/accuracy") as accuracy_file:
        accuracy = accuracy_file.read()
        assert str(expected_accuracy) == accuracy
    with open(artifacts_dir + "/loss") as loss_file:
        loss = loss_file.read()
        assert str(expected_loss) == loss


def _assert_meta_dir(meta_dir, expected_accuracy, expected_loss, best_iteration=None):
    _assert_metrics_file(meta_dir, expected_accuracy, expected_loss, best_iteration)
    _assert_ui_metadata_file_existence(meta_dir)


def _assert_ui_metadata_file_existence(meta_dir):
    assert os.path.exists(meta_dir + "/mlpipeline-ui-metadata.json")


def _assert_metrics_file(
    meta_dir, expected_accuracy, expected_loss, best_iteration=None
):
    expected_data = {
        "metrics": [
            {"name": "accuracy", "numberValue": expected_accuracy},
            {"name": "loss", "numberValue": expected_loss},
        ]
    }
    if best_iteration is not None:
        expected_data["metrics"].insert(
            0, {"name": "best_iteration", "numberValue": best_iteration}
        )
    with open(meta_dir + "/mlpipeline-metrics.json") as metrics_file:
        data = json.load(metrics_file)
        assert data == expected_data


def _generate_task(p1, out_path):
    return new_task(
        params={"p1": p1},
        out_path=out_path,
        outputs=["accuracy", "loss"],
    ).set_label("tests", "kfp")


def test_merge_node_selectors_from_function_and_project_on_kfp_pod(
    ensure_project,
):
    function = new_function(kfp=True, kind="job", project=ensure_project.metadata.name)
    function_node_selector, function_val = "ns1", "val1"
    function.spec.node_selector = {function_node_selector: function_val}

    project_node_selector, project_val = "ns2", "val2"
    ensure_project.spec.default_function_node_selector = {
        project_node_selector: project_val
    }

    config_node_selector, config_val = "ns3", "val3"
    mlconf.default_function_node_selector = base64.b64encode(
        json.dumps({config_node_selector: config_val}).encode("utf-8")
    )
    cop = function.as_step()
    assert cop.node_selector == {
        function_node_selector: function_val,
        project_node_selector: project_val,
        config_node_selector: config_val,
    }


def test_kfp_pod_sets_gpu_resources_to_zero_when_gpu_requested(
    ensure_project,
):
    function = new_function(kfp=True, kind="job", project=ensure_project.metadata.name)
    gpu_type = "nvidia.com/gpu"
    function.with_limits(gpus=1, gpu_type=gpu_type)
    cop = function.as_step()
    assert gpu_type in cop.container.resources.limits
    assert cop.container.resources.limits[gpu_type] == 0


def test_enrich_node_selector_with_preemption_mode_prevent_on_kfp_pod(
    ensure_project,
):
    function = new_function(kfp=True, kind="job", project=ensure_project.metadata.name)

    # Set preemption mode to 'prevent'
    function.with_preemption_mode("prevent")

    # Set function-level node selector
    function_node_selector, function_val = "ns1", "val1"

    # Set project-level node selector
    project_node_selector, project_val = "ns2", "val2"
    ensure_project.spec.default_function_node_selector = {
        project_node_selector: project_val
    }

    # Set config-level (global) node selector
    config_node_selector, config_val = "ns3", "val3"
    mlconf.default_function_node_selector = base64.b64encode(
        json.dumps({config_node_selector: config_val}).encode("utf-8")
    )

    # Set config-level preemptible node selector (should be removed due to prevent mode)
    preemptible_node_selector, preemptioble_val = "spot", "true"
    mlconf.preemptible_nodes.node_selector = base64.b64encode(
        json.dumps({preemptible_node_selector: preemptioble_val}).encode("utf-8")
    )
    function.spec.node_selector = {
        function_node_selector: function_val,
        preemptible_node_selector: preemptioble_val,
    }

    # Convert function to step (triggers enrichment)
    cop = function.as_step()

    # Assert final node selector contains only non-preemptible ones
    assert cop.node_selector == {
        function_node_selector: function_val,
        project_node_selector: project_val,
        config_node_selector: config_val,
    }

    # Assert tolerations are pruned
    assert cop.tolerations == []

    # Assert affinity was enriched with anti-affinity for preemptible nodes
    affinity = cop.affinity
    assert affinity is not None
    assert affinity.node_affinity is not None
    required = (
        affinity.node_affinity.required_during_scheduling_ignored_during_execution
    )
    assert required is not None
    assert len(required.node_selector_terms) > 0
    match_expressions = required.node_selector_terms[0].match_expressions
    assert any(
        expr.key == "spot" and expr.operator == "NotIn" and "true" in expr.values
        for expr in match_expressions
    )
