# Copyright 2024 the LlamaFactory team.
# Copyright (c) 2024 Huawei Technologies Co., Ltd.
#
# This code is inspired by the LLaMA-Factory.
# https://github.com/hiyouga/LLaMA-Factory/blob/main/src/llamafactory/chat/hf_engine.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import sys
import glob

from mindformers.core.context import build_context
from mindformers.tools.register.config import MindFormerConfig
from mindformers import LlamaConfig, LlamaForCausalLM

from openmind.utils.hub import OpenMindHub
from openmind.utils import logging

logger = logging.get_logger(allow_line_separator=True)
logging.set_verbosity_info()


def download_from_repo(repo_id: str, cache_dir: str) -> str:
    if not os.path.exists(repo_id):
        local_path = OpenMindHub.snapshot_download(repo_id=repo_id, cache_dir=cache_dir)
    else:
        local_path = repo_id
    return local_path


class MsEngine:
    def __init__(self, args) -> None:
        self.local_path = download_from_repo(args.model_name_or_path, args.cache_dir)
        sys.path.append(self.local_path)

        from chat_utils import chat
        from tokenizer import Tokenizer

        self.chat = chat

        if args.default_system is not None:
            self.default_system = args.default_system
        else:
            self.default_system = "You are a helpful assistant."

        yaml_path = os.path.join(self.local_path, "chat_model_config.yaml")
        if not os.path.exists(yaml_path):
            raise FileNotFoundError(yaml_path)

        config = MindFormerConfig(yaml_path)
        config.processor.tokenizer.vocab_file = os.path.join(self.local_path, "vocab.json")
        config.processor.tokenizer.merges_file = os.path.join(self.local_path, "merges.txt")

        self.tokenizer = Tokenizer(**config.processor.tokenizer)

        config.use_parallel = False
        if args.device is not None:
            config.context.device_id = args.device

        build_context(config)

        self.model_config = LlamaConfig.from_pretrained(yaml_path)

        if args.max_length is not None:
            self.model_config.max_decode_length = args.max_length
        if args.repetition_penalty is not None:
            self.model_config.repetition_penalty = args.repetition_penalty

        self.model_config.do_sample = False
        self.model_config.checkpoint_name_or_path = glob.glob(os.path.join(self.local_path, "*.ckpt"))[0]

        self.model = LlamaForCausalLM(self.model_config)
        self.history = []

    def start_chat(self) -> None:
        logger.info("Welcome to use openMind chat, use `clear` to remove chat history, use `exit` to stop the chat.")
        while True:
            try:
                user_query = input("\n[USER]>>>")
            except UnicodeDecodeError:
                logger.info(
                    "Decoding error occurred when processing user input content, please set terminal coding" "to utf-8."
                )
                continue
            except Exception as ex:
                raise RuntimeError(
                    f"Exception occurred when processing user input content, detail error message: {str(ex)}"
                ) from ex

            if user_query.strip() == "":
                logger.info("No valid input detected, please confirm your input.")
                continue

            if user_query.strip() == "clear":
                self.history.clear()
                continue

            elif user_query.strip() == "exit":
                break

            self.history = self.chat(
                self.model,
                self.model_config,
                self.tokenizer,
                user_query,
                self.history,
                system=self.default_system,
                verbose=False,
                chat_format="chatml",
                append_history=True,
            )
