# Copyright (c) 2024-present AI-Labs

import json
import re

from comps import Gateway, MicroService, ServiceOrchestrator, ServiceType, CustomLogger
from comps.cores.proto.api_protocol import (
    ChatCompletionRequest,
    ChatCompletionResponse
)
from comps.cores.proto.docarray import LLMParams
from fastapi import Request
from fastapi.responses import StreamingResponse
from langchain_core.prompts import PromptTemplate

from configs import config

logger = CustomLogger("Chat Service")
logflag = config.opea_gateway.chatservice.logs

class ChatTemplate:
    @staticmethod
    def generate_rag_prompt(question, documents):
        context_str = "\n".join(documents)
        if context_str and len(re.findall("[\u4E00-\u9FFF]", context_str)) / len(context_str) >= 0.3:
            # chinese context
            template = """
### 你将扮演一个乐于助人、尊重他人并诚实的助手，你的目标是帮助用户解答问题。有效地利用来自本地知识库的搜索结果。确保你的回答中只包含相关信息。如果你不确定问题的答案，请避免分享不准确的信息。
### 搜索结果：{context}
### 问题：{question}
### 回答：
"""
        else:
            template = """
### You are a helpful, respectful and honest assistant to help the user with questions. \
Please refer to the search results obtained from the local knowledge base. \
But be careful to not incorporate the information that you think is not relevant to the question. \
If you don't know the answer to a question, please don't share false information. \n
### Search results: {context} \n
### Question: {question} \n
### Answer:
"""
        return template.format(context=context_str, question=question)

def align_inputs(self, inputs, cur_node, runtime_graph, llm_parameters_dict, **kwargs):
    """
    输入信息对齐，主要是将OPEA平台的输入对象转换为后端服务可以正常识别的对象格式
    """
    if self.services[cur_node].service_type == ServiceType.EMBEDDING:
        inputs["inputs"] = inputs["text"]
        del inputs["text"]
    elif self.services[cur_node].service_type == ServiceType.RETRIEVER:
        # prepare the retriever params
        retriever_parameters = kwargs.get("retriever_parameters", None)
        if retriever_parameters:
            inputs.update(retriever_parameters.dict())
    elif self.services[cur_node].service_type == ServiceType.LLM:
        # convert TGI/vLLM to unified OpenAI /v1/chat/completions format
        # 将 TGI/vLLM 格式的输入转换为 OpenAI 识别的输入格式
        next_inputs = {}
        next_inputs["model"] = "chat"
        next_inputs["messages"] = inputs["messages"]
        next_inputs["max_tokens"] = inputs["max_tokens"]
        next_inputs["top_p"] = inputs["top_p"]
        next_inputs["stream"] = inputs["streaming"]
        next_inputs["frequency_penalty"] = inputs["frequency_penalty"]
        next_inputs["presence_penalty"] = inputs["presence_penalty"]
        next_inputs["repetition_penalty"] = inputs["repetition_penalty"]
        next_inputs["temperature"] = inputs["temperature"]
        tools_parameters = kwargs.get("tools_parameters", None)
        if tools_parameters:
            next_inputs["tools"] = tools_parameters
        inputs = next_inputs
    return inputs


def align_outputs(self, data, cur_node, inputs, runtime_graph, llm_parameters_dict, **kwargs):
    """
    输出信息对齐，主要是将当前节点的输出对象转换为下一个节点、输出节点可以正常识别的对象格式
    """
    next_data = {}
    if self.services[cur_node].service_type == ServiceType.EMBEDDING:
        assert isinstance(data, list)
        next_data = {"text": inputs["inputs"], "embedding": data[0]}
    elif self.services[cur_node].service_type == ServiceType.RETRIEVER:

        docs = [doc["text"] for doc in data["retrieved_docs"]]

        with_rerank = runtime_graph.downstream(cur_node)[0].startswith("rerank")
        if with_rerank and docs:
            # forward to rerank
            # prepare inputs for rerank
            next_data["query"] = data["initial_query"]
            next_data["texts"] = [doc["text"] for doc in data["retrieved_docs"]]
        else:
            # forward to llm
            if not docs and with_rerank:
                # delete the rerank from retriever -> rerank -> llm
                for ds in reversed(runtime_graph.downstream(cur_node)):
                    for nds in runtime_graph.downstream(ds):
                        runtime_graph.add_edge(cur_node, nds)
                    runtime_graph.delete_node_if_exists(ds)

            # handle template
            # if user provides template, then format the prompt with it
            # otherwise, use the default template
            prompt = data["initial_query"]
            chat_template = llm_parameters_dict["chat_template"]
            if chat_template:
                prompt_template = PromptTemplate.from_template(chat_template)
                input_variables = prompt_template.input_variables
                if sorted(input_variables) == ["context", "question"]:
                    prompt = prompt_template.format(question=data["initial_query"], context="\n".join(docs))
                elif input_variables == ["question"]:
                    prompt = prompt_template.format(question=data["initial_query"])
                else:
                    print(f"{prompt_template} not used, we only support 2 input variables ['question', 'context']")
                    prompt = ChatTemplate.generate_rag_prompt(data["initial_query"], docs)
            else:
                prompt = ChatTemplate.generate_rag_prompt(data["initial_query"], docs)

            next_data["inputs"] = prompt

    elif self.services[cur_node].service_type == ServiceType.RERANK:
        # rerank the inputs with the scores
        reranker_parameters = kwargs.get("reranker_parameters", None)
        top_n = reranker_parameters.top_n if reranker_parameters else 1
        docs = inputs["texts"]
        reranked_docs = []
        for best_response in data[:top_n]:
            reranked_docs.append(docs[best_response["index"]])

        # handle template
        # if user provides template, then format the prompt with it
        # otherwise, use the default template
        prompt = inputs["query"]
        chat_template = llm_parameters_dict["chat_template"]
        if chat_template:
            prompt_template = PromptTemplate.from_template(chat_template)
            input_variables = prompt_template.input_variables
            if sorted(input_variables) == ["context", "question"]:
                prompt = prompt_template.format(question=prompt, context="\n".join(reranked_docs))
            elif input_variables == ["question"]:
                prompt = prompt_template.format(question=prompt)
            else:
                print(f"{prompt_template} not used, we only support 2 input variables ['question', 'context']")
                prompt = ChatTemplate.generate_rag_prompt(prompt, reranked_docs)
        else:
            prompt = ChatTemplate.generate_rag_prompt(prompt, reranked_docs)

        next_data["inputs"] = prompt

    elif self.services[cur_node].service_type == ServiceType.LLM and not llm_parameters_dict["streaming"]:
        next_data.update(data)
        next_data["text"] = data["choices"][0]["message"]["content"]
    else:
        next_data = data

    return next_data


def align_generator(self, gen, **kwargs):
    """
    生成内容对齐，将 OpenAI 的输出转换成 OPEA 可以正常识别的内容格式
    """
    # openai reaponse format
    # b'data:{"id":"","object":"text_completion","created":1725530204,"model":"meta-llama/Meta-Llama-3-8B-Instruct","system_fingerprint":"2.0.1-native","choices":[{"index":0,"delta":{"role":"assistant","content":"?"},"logprobs":null,"finish_reason":null}]}\n\n'
    for line in gen:
        line = line.decode("utf-8")
        start = line.find("{")
        end = line.rfind("}") + 1

        json_str = line[start:end]
        try:
            # sometimes yield empty chunk, do a fallback here
            json_data = json.loads(json_str)
            if (
                json_data["choices"][0]["finish_reason"] != "eos_token"
                and "content" in json_data["choices"][0]["delta"]
            ):
                yield f"data: {repr(json_data['choices'][0]['delta']['content'])}\n\n"
        except Exception as e:
            yield f"data: {repr(json_str)}\n\n"
    yield "data: [DONE]\n\n"


"""
定义网关服务
"""
class ChatService(Gateway):
    def __init__(self, host="0.0.0.0", port=9082, endpoint="/v1/chat/completions"):
        self.host = host
        self.port = port
        self.endpoint = endpoint
        ServiceOrchestrator.align_inputs = align_inputs
        ServiceOrchestrator.align_outputs = align_outputs
        ServiceOrchestrator.align_generator = align_generator

        self.megaservice = ServiceOrchestrator()

    def add_remote_service(self):
        """
        添加后端远程服务，在ChatService中，我们只需要一个后端的chat服务就行，主要是用来响应用户的输入、生成输出内容
        """
        chat = MicroService(
            name="chat",
            host=config.opea_gateway.chatservice.backend_host,
            port=config.opea_gateway.chatservice.backend_port,
            endpoint="/v1/chat/completions",
            use_remote_service=True,
            service_type=ServiceType.LLM,
        )
        self.megaservice.add(chat)

    async def handle_request(self, request: Request):
        """
        处理请求内容，并响应用户
        """
        data = await request.json()
        chat_request = ChatCompletionRequest.parse_obj(data)
        if logflag:
            logger.info(f"接收请求：{chat_request}")
        # 将 OpenAI 的请求转换成 Opea 的请求
        parameters = LLMParams(
            max_tokens=chat_request.max_tokens if chat_request.max_tokens else 8192,
            max_new_tokens=chat_request.max_tokens if chat_request.max_tokens else 8192,
            top_k=chat_request.top_k if chat_request.top_k else 10,
            top_p=chat_request.top_p if chat_request.top_p else 0.95,
            temperature=chat_request.temperature if chat_request.temperature else 0.01,
            frequency_penalty=chat_request.frequency_penalty if chat_request.frequency_penalty else 0.0,
            presence_penalty=chat_request.presence_penalty if chat_request.presence_penalty else 0.0,
            repetition_penalty=chat_request.repetition_penalty if chat_request.repetition_penalty else 1.03,
            streaming=False,
            chat_template=chat_request.chat_template if chat_request.chat_template else None,
        )

        if logflag:
            logger.info(f"Tools参数：{chat_request.tools}")
        # 通过 MegaService 构建蓝图，构建运行图，并进行调度，调度初始节点即是 chat
        result_dict, runtime_graph = await self.megaservice.schedule(
            initial_inputs={"messages": chat_request.messages},
            llm_parameters=parameters,
            tools_parameters=[tools.dict() for tools in chat_request.tools] if chat_request.tools else None
        )

        for node, response in result_dict.items():
            if isinstance(response, StreamingResponse):
                return response

        last_node = runtime_graph.all_leaves()[-1]

        # 将响应内容组织成 OpenAI 响应格式，并进行客户端响应返回
        response = result_dict[last_node]
        if logflag:
            logger.info(f"模型生成：{response['text']}")
        # choices = []
        # usage = UsageInfo()
        # choices.append(
        #     ChatCompletionResponseChoice(
        #         index=0,
        #         message=ChatMessage(role="assistant", content=response),
        #         finish_reason="stop",
        #     )
        # )
        # chat_response = ChatCompletionResponse(model="chat", choices=choices, usage=usage)
        if logflag:
            logger.info(f"返回响应：{response}")

        # 返回响应内容
        return response

    def start(self):
        super().__init__(
            megaservice=self.megaservice,
            host=self.host,
            port=self.port,
            endpoint=self.endpoint,
            input_datatype=ChatCompletionRequest,
            output_datatype=ChatCompletionResponse,
        )

"""
启动网关服务
"""
def start():
    # 创建一个 ChatService 实例
    service = ChatService(host=config.opea_gateway.chatservice.host, port=config.opea_gateway.chatservice.port)
    # 为这个 Service 添加后端需要用到的服务
    service.add_remote_service()
    # 启动 Service
    service.start()
    if logflag:
        logger.info("ChatService 启动完成")

if __name__ == "__main__":
    start()
