# Copyright 2024 the LlamaFactory team.
# Copyright (c) 2024 Huawei Technologies Co., Ltd.
#
# This code is inspired by the LLaMA-Factory.
# https://github.com/hiyouga/LLaMA-Factory/blob/main/src/llamafactory/chat/chat_model.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import asyncio
import gc
from threading import Thread
from typing import List, Dict, Generator, AsyncGenerator

from openmind.utils import is_torch_available, logging

# Compatible with MindSpore
if is_torch_available():
    from .hf_engine import HfEngine
else:
    from .ms_engine import MsEngine

logger = logging.get_logger(allow_line_separator=True)
logging.set_verbosity_info()


class ChatModel:
    def __init__(self, args) -> None:

        if not args.backend or args.backend == "transformers":
            self.engine = HfEngine(args)

        self._loop = asyncio.new_event_loop()
        self._thread = Thread(target=_start_background_loop, args=(self._loop,), daemon=True)
        self._thread.start()

    def stream_chat(self, messages_context: List[Dict[str, str]]) -> Generator[str, None, None]:
        generator = self.astream_chat(messages_context)
        while True:
            try:
                task = asyncio.run_coroutine_threadsafe(generator.__anext__(), self._loop)
                yield task.result()
            except StopAsyncIteration:
                break

    async def astream_chat(self, messages_context: List[Dict[str, str]]) -> AsyncGenerator[str, None]:
        async for token in self.engine.stream_chat(messages_context):
            yield token


def _start_background_loop(loop: "asyncio.AbstractEventLoop") -> None:
    asyncio.set_event_loop(loop)
    loop.run_forever()


def _start_chat_without_docker(args) -> None:

    if args.backend == "mindformers":
        chat_model = MsEngine(args)
        chat_model.start_chat()
    elif args.backend is None or args.backend == "transformers":
        chat_model = ChatModel(args)

        message_context = []
        logger.info("Welcome to use openMind chat, use `clear` to remove chat history, use `exit` to stop the chat.")

        while True:
            try:
                user_query = input("\n[USER]>>>")
            except UnicodeDecodeError:
                logger.info(
                    "A decoding error occurred while processing the user input. Please set your terminal encoding to UTF-8."
                )
                continue
            except Exception as ex:
                raise RuntimeError(
                    f"Exception occurred when processing user input content, detail error message: {str(ex)}"
                ) from ex

            if user_query.strip() == "":
                logger.info("No valid input detected, please confirm your input.")
                continue

            if user_query.strip() == "exit":
                break

            if user_query.strip() == "clear":
                message_context.clear()
                gc.collect()
                if is_torch_available():
                    import torch

                    torch.npu.empty_cache()
                logger.info("Chat history has been cleared.")
                continue

            message_context.append({"role": "user", "content": user_query})

            print("[MODEL]>>>", end="", flush=True)

            model_response = ""
            for rsp in chat_model.stream_chat(message_context):
                print(rsp, end="", flush=True)
                model_response += rsp
            print()
            message_context.append({"role": "assistant", "content": model_response})
    else:
        raise ValueError("backend only supports transformers and mindformers.")
