
import asyncio
import os
import sys
import json
import re

from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.mega.utils import handle_message
from comps.cores.proto.api_protocol import ChatCompletionRequest, ChatCompletionResponse
from comps.cores.proto.docarray import LLMParams
from fastapi import Request


class PromptTemplate:
    @staticmethod
    def generate_prompt(question):
        
        template = """
### 你是一个文案写作专家，你擅长帮助用户写作微信公众号文章、小红书热点选题。请基于用户给出的主题，完成一篇主题鲜明、结构清晰的文章。在需要配图的位置留下[图片]字样的标记，方便后续流程进行配图。
### 限制：
    1.你需要严格遵守国家法律法规，遇到政治类、社会问题类等敏感题材，请拒绝用户请求。
    2.当用户输入的话题与文案写作无关，请拒绝用户请求。
    3.当用户输入的内容中包含字数要求时，请直接忽略字数要求，只关注文章主题即可。
### 主题：{question}
### 回答：
"""
        return template.format(question=question)
    


# 设置环境变量,包含默认值可防止遗漏
MEGA_SERVICE_PORT = int(os.getenv("MEGA_SERVICE_PORT", 8888))
LLM_SERVICE_HOST_IP = os.getenv("LLM_SERVICE_HOST_IP", "0.0.0.0")
LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000))  
LLM_MODEL = os.getenv("LLM_MODEL", "Intel/neural-chat-7b-v3-3")
TEXT2IMAGE_SERVICE_HOST_IP=os.getenv("TEXT2IMAGE_HOST_IP","0.0.0.0")
TEXT2IMAGE_SERVICE_PORT = int(os.getenv("TEXT2IMAGE_SERVICE_PORT", 9379))
TTS_SERVICE_HOST_IP = os.getenv("TTS_SERVICE_HOST_IP", "0.0.0.0")
TTS_SERVICE_PORT = int(os.getenv("TTS_SERVICE_PORT", 9088))
ANIMATION_SERVICE_HOST_IP = os.getenv("ANIMATION_SERVICE_HOST_IP", "0.0.0.0")
ANIMATION_SERVICE_PORT = int(os.getenv("ANIMATION_SERVICE_PORT", 9066))

# 输入输出对齐是组件编排的重要步骤，该方法会在ServiceOrchestrator中调用
def align_inputs(self, inputs, cur_node, runtime_graph, llm_parameters_dict, **kwargs):
    # 此处的目的是做数据格式对齐
    if self.services[cur_node].service_type == ServiceType.LLM:
        # 根据模板生成标准提示词
        prompt = PromptTemplate.generate_prompt(inputs["inputs"])
        next_inputs = {}
        next_inputs["model"] = LLM_MODEL
        next_inputs["messages"] = [{"role": "user", "content": prompt}]
        next_inputs["max_tokens"] = llm_parameters_dict["max_tokens"]
        next_inputs["top_p"] = llm_parameters_dict["top_p"]
        next_inputs["stream"] = inputs["stream"]
        next_inputs["frequency_penalty"] = inputs["frequency_penalty"]
        next_inputs["temperature"] = inputs["temperature"]
        inputs = next_inputs
    return inputs

def align_outputs(self, data, cur_node, inputs, runtime_graph, llm_parameters_dict, **kwargs):
    next_data = {}
    if self.services[cur_node].service_type == ServiceType.LLM and not llm_parameters_dict["stream"]:
        next_data["text"] = data["choices"][0]["message"]["content"]
    else:
        next_data = data

    return next_data
# 此处主要是对json格式的生成器进行对齐
def align_generator(self, gen, **kwargs):
    for line in gen:
        line = line.decode("utf-8")
        start = line.find("{")
        end = line.rfind("}") + 1

        json_str = line[start:end]
        try:
            json_data = json.loads(json_str)
            if (
                json_data["choices"][0]["finish_reason"] != "eos_token"
                and "content" in json_data["choices"][0]["delta"]
            ):
                yield f"data: {repr(json_data['choices'][0]['delta']['content'].encode('utf-8'))}\n\n"
        except Exception as e:
            yield f"data: {repr(json_str.encode('utf-8'))}\n\n"
    yield "data: [DONE]\n\n"

# 检查环境变量，如果某个变量没有设置，则退出程序
def check_env_vars(env_var_list):
    for var in env_var_list:
        if os.getenv(var) is None:
            print(f"Error: The environment variable '{var}' is not set.")
            sys.exit(1) 
    print("All environment variables are set.")


class SmartWritingService:
    def __init__(self, host="0.0.0.0", port=8888):
        self.host = host
        self.port = port
        ServiceOrchestrator.align_inputs = align_inputs
        ServiceOrchestrator.align_outputs = align_outputs
        ServiceOrchestrator.align_generator = align_generator
        self.megaservice = ServiceOrchestrator()
        # MegaServiceEndpoint常量有限，此处自定义设置
        self.endpoint = "/v1/smartwriting"

    def add_remote_service(self):
        
        # LLM用于生成文案，可用于写公众号文章写小红书文案
        llm = MicroService(
            name="llm",
            host=LLM_SERVICE_HOST_IP,
            port=LLM_SERVICE_PORT,
            endpoint="/v1/chat/completions",
            use_remote_service=True,
            service_type=ServiceType.LLM,
        )
        # 用于智能生图，选取文案中的场景生成相关图片
        text2image = MicroService(
            name="text2image",
            host=TEXT2IMAGE_SERVICE_HOST_IP,
            port=TEXT2IMAGE_SERVICE_PORT,
            endpoint="/v1/text2image",
            use_remote_service=True,
            service_type=ServiceType.TEXT2IMAGE,
        )
        # TTS用于合成语音，将LLM生成的文案合成语音
        tts = MicroService(
            name="tts",
            host=TTS_SERVICE_HOST_IP,
            port=TTS_SERVICE_PORT,
            endpoint="/v1/audio/speech",
            use_remote_service=True,
            service_type=ServiceType.TTS,
        )
        # 用于驱动唇形和动作
        animation = MicroService(
            name="animation",
            host=ANIMATION_SERVICE_HOST_IP,
            port=ANIMATION_SERVICE_PORT,
            endpoint="/v1/animation",
            use_remote_service=True,
            service_type=ServiceType.ANIMATION,
        )
        self.megaservice.add(llm).add(text2image).add(tts).add(animation)
        # 如果要生图，将下面这行注释取消
        # self.megaservice.flow_to(llm, text2image)
        self.megaservice.flow_to(llm, tts)
        self.megaservice.flow_to(tts, animation)

    async def handle_request(self, request: Request):
        data = await request.json()
        # 默认关闭流失输出，可根据实际需求调整
        stream_opt = data.get("stream", False)
        chat_request = ChatCompletionRequest.parse_obj(data)
        prompt = handle_message(chat_request.messages)
        parameters = LLMParams(
            max_tokens=chat_request.max_tokens if chat_request.max_tokens else 128,
            top_k=chat_request.top_k if chat_request.top_k else 10,
            top_p=chat_request.top_p if chat_request.top_p else 0.95,
            temperature=chat_request.temperature if chat_request.temperature else 0.01,
            repetition_penalty=chat_request.presence_penalty if chat_request.presence_penalty else 1.03,
            streaming=stream_opt,  
        )
        # 此处进行流程运行，要注意初始输入格式
        result_dict, runtime_graph = await self.megaservice.schedule(
            initial_inputs={"text": prompt}, llm_parameters=parameters
        )

        last_node = runtime_graph.all_leaves()[-1]
        response = result_dict[last_node]["video_path"]
        return response

    def start(self):
        self.service = MicroService(
            self.__class__.__name__,
            service_role=ServiceRoleType.MEGASERVICE,
            host=self.host,
            port=self.port,
            endpoint=self.endpoint,
            input_datatype=ChatCompletionRequest,
            output_datatype=ChatCompletionResponse,
        )
        self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
        self.service.start()


if __name__ == "__main__":
    check_env_vars(
        [
            "MEGA_SERVICE_HOST_IP",
            "MEGA_SERVICE_PORT",
            "LLM_SERVICE_HOST_IP",
            "LLM_SERVICE_PORT",
            "LLM_MODEL",
            "TEXT2IMAGE_SERVICE_HOST_IP",
            "TEXT2IMAGE_SERVICE_PORT",
            "TTS_SERVICE_HOST_IP",
            "TTS_SERVICE_PORT",
            "ANIMATION_SERVICE_HOST_IP",
            "ANIMATION_SERVICE_PORT",
        ]
    )

    smartwriting = SmartWritingService(port=MEGA_SERVICE_PORT)
    smartwriting.add_remote_service()
    smartwriting.start()
