import os
import flask
from flask_cors import CORS
from typing import List, Tuple
from flask import render_template
from llama_index import ServiceContext
from llama_index.llms import ChatMessage, LlamaCPP
from llama_index.chat_engine import SimpleChatEngine
from llama_index.chat_engine.types import BaseChatEngine

# ---------------------------------------------------------------------------- #
#                               Llm Engine Server                              #
# ---------------------------------------------------------------------------- #
app = flask.Flask(__name__, static_folder="templates",
                  template_folder="templates")
CORS(app)

def __respone_to_delta_gen(gen):
    for item in gen:
        yield str(item.delta)


def parse_request_json(messages):
    assert len(messages) >= 1
    assert isinstance(messages, (List, Tuple))
    messages = [ChatMessage(**m) for m in messages]
    history, message = messages[:-1], messages[-1]
    return message.content, history


@app.route("/", methods=["GET"])
def index():
    return render_template("index.html")


@app.route("/chat", methods=["POST"])
def chat():
    chat_engine: BaseChatEngine = app.config.get("chat_engine", None)
    if chat_engine is None:
        return "chat_engine not initialized", 500
    messages = flask.request.json
    message, history = parse_request_json(messages)
    chat_respone = chat_engine.chat(message, history)  # 强制清空Memory
    return flask.Response(str(chat_respone.response), content_type="text/plain")


@app.route("/stream_chat", methods=["POST"])
def stream_chat():
    chat_engine: BaseChatEngine = app.config.get("chat_engine", None)
    if chat_engine is None:
        return "chat_engine not initialized", 500
    messages = flask.request.json
    message, history = parse_request_json(messages)
    chat_respone = chat_engine.stream_chat(message, history)
    return flask.Response(__respone_to_delta_gen(chat_respone), content_type="text/event-stream")


class LocalHttpChatEngineServer:
    def __init__(self, chat_engine: BaseChatEngine, host, port=5000, debug=False) -> None:

        self.app = app
        self.host = host
        self.port = port
        self.debug = debug
        self.chat_engine = chat_engine
        self.app.config["chat_engine"] = chat_engine

    def run(self):
        self.app.run(host=self.host, port=self.port, debug=self.debug)


# ---------------------------------------------------------------------------- #
#                                  Llm Engine                                  #
# ---------------------------------------------------------------------------- #
def stream_chat(
    self, message: str, chat_history=None
):
    if chat_history is not None:
        self._memory.set(chat_history)
    self._memory.put(ChatMessage(content=message, role="user"))
    initial_token_count = len(
        self._memory.tokenizer_fn(
            " ".join([(m.content or "") for m in self._prefix_messages])
        )
    )
    all_messages = self._prefix_messages + self._memory.get(
        initial_token_count=initial_token_count
    )
    return self._llm.stream_chat(all_messages)


def build_simple_chat_engine(llm):
    service_context = ServiceContext.from_defaults(
        llm=llm,
        embed_model=None,
    )
    sce = SimpleChatEngine.from_defaults(
        service_context=service_context,
        system_prompt="You are a helpful assistant."
    )
    sce.stream_chat = stream_chat.__get__(sce)
    return sce


# ---------------------------------------------------------------------------- #
#                                      Llm                                     #
# ---------------------------------------------------------------------------- #
def build_llm():
    temperature = float(os.environ.get("TEMPERATURE", "0.2"))
    return LlamaCPP(
        model_path=find_gguf_file(),
        temperature=temperature,
        model_kwargs={
            "max_new_tokens": 16384,
            "n_gpu_layers": 1000,
            "flash_attn": True,
        },
        verbose=True
    )


# ---------------------------------------------------------------------------- #
#                                    config                                    #
# ---------------------------------------------------------------------------- #
def init_web_config():
    host = os.environ.get("HOST", "127.0.0.1")
    port = os.environ.get("PORT", "7500")
    config_content = (
        "window.webConfig = {"
        f'baseUrl: "http://{host}:{port}"'
        "}"
    )
    with open("templates/webConfig.js", "w") as f:
        f.write(config_content)

def find_gguf_file():
    for root, dirs, files in os.walk("."):
        for file in files:
            if file.endswith(".gguf"):
                return os.path.join(root, file)
    raise FileNotFoundError("gguf file not found")

if __name__ == "__main__":
    init_web_config()
    llm = build_llm()
    chat_engine = build_simple_chat_engine(llm)
    LocalHttpChatEngineServer(chat_engine, host="0.0.0.0", port=7500).run()
