# Copyright (c) 2025 Huawei Technologies Co., Ltd.
#
# openMind is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
#          http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.

import argparse
import ast
import sys
import tempfile

import os
from openmind.utils import is_vision_available, is_ms_available
from openmind.utils.arguments_utils import (
    _trans_args_list_to_dict,
    safe_load_yaml,
    validate_directory,
    validate_input_or_path,
)
from openmind import pipeline
from openmind.archived.pipelines.pipeline_utils import SUPPORTED_TASK_MAPPING, get_task_from_readme
from openmind.archived.pipelines.builder import _parse_native_json


def parse_args():

    parser = argparse.ArgumentParser(description="openMind Arguments")

    parser = _add_pipeline_arguments(parser)

    # pop pull/push/rm/list
    if sys.argv[0].endswith("openmind-cli"):
        sys.argv.pop(1)

    if len(sys.argv) >= 2 and not sys.argv[1].startswith("--"):
        repo_or_task = sys.argv.pop(1)
        known_args, unknown_args = parser.parse_known_args()

        if repo_or_task in SUPPORTED_TASK_MAPPING:
            vars(known_args)["task"] = repo_or_task
        else:
            vars(known_args)["model"] = repo_or_task
    else:
        known_args, unknown_args = parser.parse_known_args()

    unknown_args = _trans_args_list_to_dict(unknown_args)
    vars(known_args).update(unknown_args)

    cli_args_dict = {
        sys.argv[i].lstrip("--"): sys.argv[i + 1]
        for i in range(1, len(sys.argv) - 1)
        if sys.argv[i].startswith("--") and sys.argv[i] != "--yaml_path"
    }

    if known_args.yaml_path is not None:
        yaml_path = known_args.yaml_path

        yaml_all_args = safe_load_yaml(yaml_path)

        defined_params = {action.dest: action for action in parser._actions}
        yaml_args = {}

        for key, value in yaml_all_args.items():
            if key in defined_params:
                if key in cli_args_dict:
                    continue
                yaml_args[key] = value

        vars(known_args).update(yaml_args)

    return known_args


def _add_pipeline_arguments(parser):
    """Add arguments to the parser."""
    group = parser.add_argument_group(title="cli pipeline")
    group.add_argument(
        "--model",
        type=str,
        default=None,
        help="the repo_id of model or local path",
    )
    group.add_argument(
        "--input",
        type=str,
        default=None,
        required=True,
        help="input content or file path",
    )
    group.add_argument(
        "--task",
        type=str,
        default=None,
        help="task type",
    )
    group.add_argument(
        "--framework",
        type=str,
        default=None,
        help="framework type, choosing from pt or ms",
    )
    group.add_argument(
        "--backend",
        type=str,
        default=None,
        help="backend type, choosing the corresponding backend based on the framework",
    )
    group.add_argument(
        "--cache_dir",
        type=str,
        default=None,
        help="cache directory of downloaded models",
    )
    group.add_argument(
        "--yaml_path",
        type=str,
        default=None,
        help="path of yaml",
    )
    return parser


def try_to_trans_to_dict(input_or_path):
    # input_or_path can be prompt str or key-value str
    try:
        output = ast.literal_eval(input_or_path)
        return output if isinstance(output, dict) else input_or_path
    except Exception:
        return input_or_path


def _init_pipeline(**kwargs):
    model_path = kwargs.get("model", None)
    if model_path and os.path.exists(model_path):
        validate_directory(model_path)
    return pipeline(**kwargs)


def _extract_params(args):

    vars(args).pop("yaml_path")
    config_data = vars(args).copy()
    model_kwargs = config_data.get("model_kwargs", None)

    if model_kwargs:
        model_kwargs = try_to_trans_to_dict(model_kwargs)
        config_data["model_kwargs"] = model_kwargs

    input_or_path = config_data.pop("input")
    input_or_path = try_to_trans_to_dict(input_or_path)
    config_data["input"] = input_or_path

    return config_data


def _run_cmd_without_docker(params) -> None:

    input_or_path = params.pop("input")
    validate_input_or_path(input_or_path)
    is_pt_framework = params.get("framework", None) is None or params.get("framework") == "pt"
    if is_pt_framework and params.get("device", None) is None and params.get("device_map", None) is None:
        params["device"] = "npu:0"
    elif (
        params.get("framework") == "ms"
        and params.get("device_id", None) is None
        and params.get("device_target", None) is None
    ):
        params["device_id"] = 0

    model = params.get("model", None)
    task = params.get("task", None)
    backend = params.get("backend", None)
    framework = params.get("framework", None)
    if backend is None:
        if task is None and model is not None:
            if isinstance(model, str):
                task = get_task_from_readme(model)
            else:
                raise RuntimeError("task must be provided when the type of model is a model instance")

        _, framework, backend = _parse_native_json(task, framework, backend)
        params["framework"] = framework
        params["backend"] = backend
    # if framework is mindspore, set_context() is required.
    # However, set_context() will raise error when backend is mindone.
    if is_ms_available() and backend != "mindone":
        import mindspore as ms

        ms.set_context(
            mode=0, device_id=int(params.get("device_id", 0)), jit_config={"jit_level": "O0", "infer_boost": "on"}
        )

    pipe = _init_pipeline(**params)
    # input_or_path can be str or dict
    if isinstance(input_or_path, dict):
        output = pipe(**input_or_path)
    else:
        output = pipe(input_or_path)
    if is_vision_available():
        from PIL import Image

        if isinstance(output, Image.Image):
            _save_img(output)
        elif _is_mindone_output(output):
            _save_img(output[0][0])
        else:
            print(output)
    else:
        print(output)


def _is_mindone_output(output):
    from PIL import Image

    return (
        isinstance(output, tuple)
        and len(output) != 0
        and isinstance(output[0], list)
        and len(output[0]) != 0
        and isinstance(output[0][0], Image.Image)
    )


def _save_img(output):
    with tempfile.NamedTemporaryFile(suffix=".jpg", dir="./", delete=False) as f:
        output.save(f.name)
        print(f"Image has been saved to {f.name}")


def run_pipeline():
    args = parse_args()
    params = _extract_params(args)

    _run_cmd_without_docker(params)
