# Copyright (c) 2024-present AI-Labs

from comps import Gateway, MicroService, ServiceOrchestrator, ServiceType, CustomLogger
from comps.cores.proto.api_protocol import (
    ChatCompletionRequest,
    ChatCompletionResponse
)
from comps.cores.proto.docarray import LLMParams
from fastapi import Request
from fastapi.responses import StreamingResponse

from configs import config

logger = CustomLogger("Text2Image Service")
logflag = config.opea_gateway.text2imageservice.logs


def align_inputs(self, inputs, cur_node, runtime_graph, llm_parameters_dict, **kwargs):
    """
    输入信息对齐，主要是将OPEA平台的输入对象转换为后端服务可以正常识别的对象格式
    """
    if self.services[cur_node].service_type == ServiceType.TEXT2IMAGE:
        text2image_parameters = kwargs.get("text2image_parameters", None)
        if text2image_parameters:
            inputs.update(text2image_parameters)
        inputs["prompt"] = inputs["text"]
        del inputs["text"]
        if logflag:
            logger.info(f"请求文生图内容：{inputs}")
    elif self.services[cur_node].service_type == ServiceType.LLM:
        # convert TGI/vLLM to unified OpenAI /v1/chat/completions format
        # 将 TGI/vLLM 格式的输入转换为 OpenAI 识别的输入格式
        next_inputs = {}
        next_inputs["model"] = "chat"
        next_inputs["messages"] = inputs["messages"]
        next_inputs["max_tokens"] = inputs["max_tokens"]
        next_inputs["top_p"] = inputs["top_p"]
        next_inputs["stream"] = inputs["streaming"]
        next_inputs["frequency_penalty"] = inputs["frequency_penalty"]
        next_inputs["presence_penalty"] = inputs["presence_penalty"]
        next_inputs["repetition_penalty"] = inputs["repetition_penalty"]
        next_inputs["temperature"] = inputs["temperature"]
        inputs = next_inputs
    return inputs


def align_outputs(self, data, cur_node, inputs, runtime_graph, llm_parameters_dict, **kwargs):
    """
    输出信息对齐，主要是将当前节点的输出对象转换为下一个节点、输出节点可以正常识别的对象格式
    """
    next_data = {}
    if self.services[cur_node].service_type == ServiceType.LLM and not llm_parameters_dict["streaming"]:
        next_data["text"] = data["choices"][0]["message"]["content"]
        if logflag:
            logger.info(f"大语言模型生成：{next_data['text']}")
    else:
        next_data = data

    return next_data


"""
定义网关服务
"""
class Text2ImageService(Gateway):
    def __init__(self, host="0.0.0.0", port=9092, endpoint="/v1/text2image"):
        self.host = host
        self.port = port
        self.endpoint = endpoint
        ServiceOrchestrator.align_inputs = align_inputs
        ServiceOrchestrator.align_outputs = align_outputs

        self.megaservice = ServiceOrchestrator()

    def add_remote_service(self):
        """
        添加后端远程服务，在 Text2ImageService 中，我们需要两个后端服务
        """
        # chat 服务用于处理用户的 prompt 信息
        chat = MicroService(
            name="chat",
            host=config.opea_gateway.chatservice.backend_host,
            port=config.opea_gateway.chatservice.backend_port,
            endpoint="/v1/chat/completions",
            use_remote_service=True,
            service_type=ServiceType.LLM,
        )
        # text2image 服务用于根据文本生成图片
        text2image = MicroService(
            name="text2image",
            host=config.opea_service.text2image.host,
            port=config.opea_service.text2image.port,
            endpoint="/v1/text2image",
            use_remote_service=True,
            service_type=ServiceType.TEXT2IMAGE,
        )
        # 添加两个服务
        self.megaservice.add(chat).add(text2image)
        # 定义两个服务之间的流程
        self.megaservice.flow_to(chat, text2image)

    async def handle_request(self, request: Request):
        """
        处理请求内容，并响应用户
        """
        data = await request.json()
        if logflag:
            logger.info(f"接收用户请求：{data}")
        # 获取用户的绘画请求
        prompt = data["prompt"]
        # 对绘画请求中的中文进行翻译
        messages = [{
                "role": "system",
                "content": "你是一个文生图 Prompt 翻译器，将中文翻译为英文，翻译简洁精准，如果已经是英文就直接原文返回。你只需要直接给出英文答案, 不要废话"
            },
            {
                "role": "user",
                "content": prompt
            }]
        # 生成 OpenAI 的请求
        chat_request = ChatCompletionRequest.parse_obj({"messages": messages})
        # 将 OpenAI 的请求转换成 Opea 的请求
        parameters = LLMParams(
            max_tokens=chat_request.max_tokens if chat_request.max_tokens else 8192,
            max_new_tokens=chat_request.max_tokens if chat_request.max_tokens else 8192,
            top_k=chat_request.top_k if chat_request.top_k else 10,
            top_p=chat_request.top_p if chat_request.top_p else 0.95,
            temperature=chat_request.temperature if chat_request.temperature else 0.01,
            frequency_penalty=chat_request.frequency_penalty if chat_request.frequency_penalty else 0.0,
            presence_penalty=chat_request.presence_penalty if chat_request.presence_penalty else 0.0,
            repetition_penalty=chat_request.repetition_penalty if chat_request.repetition_penalty else 1.03,
            streaming=False,
            chat_template=chat_request.chat_template if chat_request.chat_template else None,
        )

        # 通过 MegaService 构建蓝图，构建运行图，并进行调度，调度初始节点即是 chat
        result_dict, runtime_graph = await self.megaservice.schedule(
            initial_inputs={"messages": chat_request.messages},
            llm_parameters=parameters,
            text2image_parameters=data
        )

        for node, response in result_dict.items():
            if isinstance(response, StreamingResponse):
                return response

        last_node = runtime_graph.all_leaves()[-1]

        # 获取最后一个节点的输出内容
        images = result_dict[last_node]["images"]
        if logflag:
            logger.info("返回给用户")

        # 返回响应内容
        return images

    def start(self):
        super().__init__(
            megaservice=self.megaservice,
            host=self.host,
            port=self.port,
            endpoint=self.endpoint,
            input_datatype=ChatCompletionRequest,
            output_datatype=ChatCompletionResponse,
        )

"""
启动网关服务
"""
def start():
    # 创建一个 Text2ImageService 实例
    service = Text2ImageService(host=config.opea_gateway.text2imageservice.host, port=config.opea_gateway.text2imageservice.port)
    # 为这个 Service 添加后端需要用到的服务
    service.add_remote_service()
    # 启动 Service
    service.start()
    if logflag:
        logger.info("Text2ImageService 启动完成")

if __name__ == "__main__":
    start()
