# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import argparse
import json
import os
import re
import uvicorn
from comps import ChatQnAGateway, MicroService, ServiceOrchestrator, ServiceType
from langchain_core.prompts import PromptTemplate
from fastapi import FastAPI, Body

# 用户草稿框架图片上传功能
from backend.pic_model import upload_file_to_oss, get_response

class ChatTemplate:
    # 将用户图片上传触发状态作为类变量管理
    upload_image_status = False
    upload_image_local_path = "/home/user/uploads/test.png"
    
    @classmethod
    def set_upload_status(cls, status: bool):
        cls.upload_image_status = status
        
    @staticmethod
    def generate_rag_prompt(question, documents):
        context_str = "\n".join(documents)
        with open('log.txt', 'a', encoding='utf-8') as file:
            file.write(context_str)
            # chinese context
        if("项目的目录树如下所示" in question):
            template="""
** 回答语言： ** 中文
** 回答规范： **  ！！请严格注意换行！！！
**角色设定：**

您将扮演一位资深的软件架构师，负责分析和解释项目的整体架构。

**任务描述：**

请根据提供的项目文件，生成以下内容：

1. **项目概述：** 简要描述项目的目的、功能和主要特点。\n

2. **项目框架：** 列出项目的主要模块或组件，并简要说明每个模块的功能和相互关系。\n

3. **文件作用：** 对于项目中的每个文件，提供其名称、位置和主要功能描述。\n

**输入要求：**

请提供项目的源代码文件或相关文档，以便进行详细分析。

**输出格式：**

请以清晰、结构化的方式输出上述信息，使用适当的标题和列表，以便用户易于理解和参考。

**示例：**

假设您提供了一个名为"InventoryManagement"的项目文件夹，包含以下文件：

```
InventoryManagement/
├── main.py
├── models/
│   ├── product.py
│   └── category.py
├── views/
│   ├── product_view.py
│   └── category_view.py
└── controllers/
    ├── product_controller.py
    └── category_controller.py
```

一个可能的示例输出如下：

**项目概述：** \n

"InventoryManagement"是一个用于管理产品和类别信息的库存管理系统。<br />

**项目框架：** \n

- **models/**：包含与产品和类别相关的数据模型。 \n
- **views/**：负责呈现产品和类别的用户界面。 \n
- **controllers/**：处理产品和类别的业务逻辑。 \n

**文件作用：** \n

- **main.py**：项目的入口点，初始化应用程序并启动主循环。 \n
- **models/product.py**：定义产品数据模型，包括产品的属性和方法。 \n
- **models/category.py**：定义类别数据模型，包括类别的属性和方法。 \n
- **views/product_view.py**：负责显示产品信息的用户界面。 \n
- **views/category_view.py**：负责显示类别信息的用户界面。 \n
- **controllers/product_controller.py**：处理与产品相关的业务逻辑，如添加、删除和更新产品。 \n
- **controllers/category_controller.py**：处理与类别相关的业务逻辑，如添加、删除和更新类别。 \n
### 搜索结果：{context} \n
### 问题：{question} \n
### 回答：
"""
        else:
            template = """
### 回答语言：中文
### 你是一个文档书写助手，擅长根据用户的问题生成对应文档所需图片的mermaid代码，帮助用户快速绘制出所需要的图片。同时也擅长帮助用户解析项目结构。 \n
### 如果包含"生成mermaid代码"的要求，请结合知识库查找对应图表类型的mermaid代码规范，并根据规范生成准确无误的mermaid代码。如果上传了知识库且问题中不包含"生成Mermaid代码"的要求，请不要直接生成mermaid代码，而是根据用户具体的问题结合本地知识库进行分析。 \n
### 格式要求：若生成内容包含mermaid代码，请以```mermaid ```将生成的代码包含在内；若生成内容不包含mermaid代码，则无需遵循该要求。 \n
### 示例1：
- 问题 ：请生成需求文档中的流程图的Mermaid代码，图片中要表示的内容如下:系统从开始进入一个判断节点，询问某个条件是否满足。如果答案是"是"，则继续执行下一步并进入重新思考的阶段，随后再次回到判断节点进行检查，形成一个循环。如果答案是"否"，流程直接结束。
- 回答：
```mermaid \n
flowchart TD \n
    A[Start] --> B[Is it?] \n
    B -- Yes --> C[OK] \n
    C --> D[Rethink] \n
    D --> B \n
    B -- No ----> E[End] \n
``` \n
### 搜索结果：{context} \n
### 问题：{question} \n
### 回答：
"""
        
        
        # 使用类变量替代全局变量
        if ChatTemplate.upload_image_status:
            url = upload_file_to_oss(ChatTemplate.upload_image_local_path)
            response = get_response(url)
            template = f"### 基于草绘框架的简要需求描述：{response}\n" + template

        with open('log.txt', 'a', encoding='utf-8') as file:
            file.write(template.format(context=context_str, question=question))
        return template.format(context=context_str, question=question)


MEGA_SERVICE_HOST_IP = os.getenv("MEGA_SERVICE_HOST_IP", "0.0.0.0")
MEGA_SERVICE_PORT = int(os.getenv("MEGA_SERVICE_PORT", 8888))
GUARDRAIL_SERVICE_HOST_IP = os.getenv("GUARDRAIL_SERVICE_HOST_IP", "0.0.0.0")
GUARDRAIL_SERVICE_PORT = int(os.getenv("GUARDRAIL_SERVICE_PORT", 80))
EMBEDDING_SERVER_HOST_IP = os.getenv("EMBEDDING_SERVER_HOST_IP", "0.0.0.0")
EMBEDDING_SERVER_PORT = int(os.getenv("EMBEDDING_SERVER_PORT", 80))
RETRIEVER_SERVICE_HOST_IP = os.getenv("RETRIEVER_SERVICE_HOST_IP", "0.0.0.0")
RETRIEVER_SERVICE_PORT = int(os.getenv("RETRIEVER_SERVICE_PORT", 7000))
RERANK_SERVER_HOST_IP = os.getenv("RERANK_SERVER_HOST_IP", "0.0.0.0")
RERANK_SERVER_PORT = int(os.getenv("RERANK_SERVER_PORT", 80))
LLM_SERVER_HOST_IP = os.getenv("LLM_SERVER_HOST_IP", "0.0.0.0")
LLM_SERVER_PORT = int(os.getenv("LLM_SERVER_PORT", 80))
LLM_MODEL = os.getenv("LLM_MODEL", "Intel/neural-chat-7b-v3-3")


def align_inputs(self, inputs, cur_node, runtime_graph, llm_parameters_dict, **kwargs):
    print("inputstest:",inputs)
    # # 使用类方法设置状态
    # if "@Image" in inputs["inputs"]:
    #     ChatTemplate.set_upload_status(True)
    # else:
    #     ChatTemplate.set_upload_status(False)
    
    if self.services[cur_node].service_type == ServiceType.EMBEDDING:
        inputs["inputs"] = inputs["text"]
        del inputs["text"]
        print("EMBEDDINGinput",inputs)
        # EMBEDDINGinput {'inputs': '请生成   中的  的Mermaid代码，图片中要表示的内容如下: hi'}
        
        # 使用类方法设置状态
        if "@Image" in inputs["inputs"]:
            ChatTemplate.set_upload_status(True)
        else:
            ChatTemplate.set_upload_status(False)
    elif self.services[cur_node].service_type == ServiceType.RETRIEVER:
        # prepare the retriever params
        retriever_parameters = kwargs.get("retriever_parameters", None)
        if retriever_parameters:
            inputs.update(retriever_parameters.dict())
        print("RETRIEVERinput",inputs)
        # {'text': '请生成   中的  的Mermaid代码，图片中要表示的内容如下: hi', 'embedding': [
            
    elif self.services[cur_node].service_type == ServiceType.LLM:
        # convert TGI/vLLM to unified OpenAI /v1/chat/completions format
        next_inputs = {}
        next_inputs["model"] = LLM_MODEL
        next_inputs["messages"] = [{"role": "user", "content": inputs["inputs"]}]
        next_inputs["max_tokens"] = llm_parameters_dict["max_tokens"]
        next_inputs["top_p"] = llm_parameters_dict["top_p"]
        next_inputs["stream"] = inputs["streaming"]
        next_inputs["frequency_penalty"] = inputs["frequency_penalty"]
        # next_inputs["presence_penalty"] = inputs["presence_penalty"]
        # next_inputs["repetition_penalty"] = inputs["repetition_penalty"]
        next_inputs["temperature"] = inputs["temperature"]
        inputs = next_inputs
        print("LLMinput",inputs)
        # LLMinput {'model': '/data/Qwen2.5-Coder-7B-Instruct', 'messages': [{'role': 'user', 'content': '
        
    return inputs


def align_outputs(self, data, cur_node, inputs, runtime_graph, llm_parameters_dict, **kwargs):
    next_data = {}
    if self.services[cur_node].service_type == ServiceType.EMBEDDING:
        assert isinstance(data, list)
        next_data = {"text": inputs["inputs"], "embedding": data[0]}
        # print("embedding: ",next_data)
    elif self.services[cur_node].service_type == ServiceType.RETRIEVER:

        docs = [doc["text"] for doc in data["retrieved_docs"]]

        with_rerank = runtime_graph.downstream(cur_node)[0].startswith("rerank")
        if with_rerank and docs:
            # forward to rerank
            # prepare inputs for rerank
            next_data["query"] = data["initial_query"]
            next_data["texts"] = [doc["text"] for doc in data["retrieved_docs"]]
        else:
            # forward to llm
            if not docs and with_rerank:
                # delete the rerank from retriever -> rerank -> llm
                for ds in reversed(runtime_graph.downstream(cur_node)):
                    for nds in runtime_graph.downstream(ds):
                        runtime_graph.add_edge(cur_node, nds)
                    runtime_graph.delete_node_if_exists(ds)

            # handle template
            # if user provides template, then format the prompt with it
            # otherwise, use the default template
            prompt = data["initial_query"]
            chat_template = llm_parameters_dict["chat_template"]
            if chat_template:
                prompt_template = PromptTemplate.from_template(chat_template)
                input_variables = prompt_template.input_variables
                if sorted(input_variables) == ["context", "question"]:
                    prompt = prompt_template.format(question=data["initial_query"], context="\n".join(docs))
                elif input_variables == ["question"]:
                    prompt = prompt_template.format(question=data["initial_query"])
                else:
                    print(f"{prompt_template} not used, we only support 2 input variables ['question', 'context']")
                    prompt = ChatTemplate.generate_rag_prompt(data["initial_query"], docs)
            else:
                prompt = ChatTemplate.generate_rag_prompt(data["initial_query"], docs)

            next_data["inputs"] = prompt
        # print("retriever: ",next_data)
    elif self.services[cur_node].service_type == ServiceType.RERANK:
        # rerank the inputs with the scores
        reranker_parameters = kwargs.get("reranker_parameters", None)
        top_n = reranker_parameters.top_n if reranker_parameters else 1
        docs = inputs["texts"]
        reranked_docs = []
        for best_response in data[:top_n]:
            reranked_docs.append(docs[best_response["index"]])

        # handle template
        # if user provides template, then format the prompt with it
        # otherwise, use the default template
        prompt = inputs["query"]
        chat_template = llm_parameters_dict["chat_template"]
        if chat_template:
            prompt_template = PromptTemplate.from_template(chat_template)
            input_variables = prompt_template.input_variables
            if sorted(input_variables) == ["context", "question"]:
                prompt = prompt_template.format(question=prompt, context="\n".join(reranked_docs))
            elif input_variables == ["question"]:
                prompt = prompt_template.format(question=prompt)
            else:
                print(f"{prompt_template} not used, we only support 2 input variables ['question', 'context']")
                prompt = ChatTemplate.generate_rag_prompt(prompt, reranked_docs)
        else:
            prompt = ChatTemplate.generate_rag_prompt(prompt, reranked_docs)

        next_data["inputs"] = prompt
        # print("rerank: ",next_data)
    elif self.services[cur_node].service_type == ServiceType.LLM and not llm_parameters_dict["streaming"]:
        next_data["text"] = data["choices"][0]["message"]["content"]
        # print("llm: ",next_data)
    else:
        next_data = data
    print("final:",next_data)
    return next_data

# 流式输出
def align_generator(self, gen, **kwargs):
    # openai reaponse format
    # b'data:{"id":"","object":"text_completion","created":1725530204,"model":"meta-llama/Meta-Llama-3-8B-Instruct","system_fingerprint":"2.0.1-native","choices":[{"index":0,"delta":{"role":"assistant","content":"?"},"logprobs":null,"finish_reason":null}]}\n\n'
    for line in gen:
        line = line.decode("utf-8")
        start = line.find("{")
        end = line.rfind("}") + 1

        json_str = line[start:end]
        try:
            # sometimes yield empty chunk, do a fallback here
            json_data = json.loads(json_str)
            if (
                json_data["choices"][0]["finish_reason"] != "eos_token"
                and "content" in json_data["choices"][0]["delta"]
            ):
                yield f"data: {repr(json_data['choices'][0]['delta']['content'].encode('utf-8'))}\n\n"
        except Exception as e:
            yield f"data: {repr(json_str.encode('utf-8'))}\n\n"
    yield "data: [DONE]\n\n"

    
class ChatQnAService:
    def __init__(self, host="0.0.0.0", port=8000):
        self.host = host
        self.port = port
        ServiceOrchestrator.align_inputs = align_inputs
        ServiceOrchestrator.align_outputs = align_outputs
        ServiceOrchestrator.align_generator = align_generator
        self.megaservice = ServiceOrchestrator()
        
    
    def add_remote_service(self):

        embedding = MicroService(
            name="embedding",
            host=EMBEDDING_SERVER_HOST_IP,
            port=EMBEDDING_SERVER_PORT,
            endpoint="/embed",
            use_remote_service=True,
            service_type=ServiceType.EMBEDDING,
        )

        retriever = MicroService(
            name="retriever",
            host=RETRIEVER_SERVICE_HOST_IP,
            port=RETRIEVER_SERVICE_PORT,
            endpoint="/v1/retrieval",
            use_remote_service=True,
            service_type=ServiceType.RETRIEVER,
        )

        rerank = MicroService(
            name="rerank",
            host=RERANK_SERVER_HOST_IP,
            port=RERANK_SERVER_PORT,
            endpoint="/rerank",
            use_remote_service=True,
            service_type=ServiceType.RERANK,
        )

        llm = MicroService(
            name="llm",
            host=LLM_SERVER_HOST_IP,
            port=LLM_SERVER_PORT,
            endpoint="/v1/chat/completions",
            use_remote_service=True,
            service_type=ServiceType.LLM,
        )
        self.megaservice.add(embedding).add(retriever).add(rerank).add(llm)
        self.megaservice.flow_to(embedding, retriever)
        self.megaservice.flow_to(retriever, rerank)
        self.megaservice.flow_to(rerank, llm)
        self.gateway = ChatQnAGateway(megaservice=self.megaservice, host="0.0.0.0", port=self.port)

    def add_remote_service_without_rerank(self):

        embedding = MicroService(
            name="embedding",
            host=EMBEDDING_SERVER_HOST_IP,
            port=EMBEDDING_SERVER_PORT,
            endpoint="/embed",
            use_remote_service=True,
            service_type=ServiceType.EMBEDDING,
        )

        retriever = MicroService(
            name="retriever",
            host=RETRIEVER_SERVICE_HOST_IP,
            port=RETRIEVER_SERVICE_PORT,
            endpoint="/v1/retrieval",
            use_remote_service=True,
            service_type=ServiceType.RETRIEVER,
        )

        llm = MicroService(
            name="llm",
            host=LLM_SERVER_HOST_IP,
            port=LLM_SERVER_PORT,
            endpoint="/v1/chat/completions",
            use_remote_service=True,
            service_type=ServiceType.LLM,
        )
        self.megaservice.add(embedding).add(retriever).add(llm)
        self.megaservice.flow_to(embedding, retriever)
        self.megaservice.flow_to(retriever, llm)
        self.gateway = ChatQnAGateway(megaservice=self.megaservice, host="0.0.0.0", port=self.port)

    def add_remote_service_with_guardrails(self):
        guardrail_in = MicroService(
            name="guardrail_in",
            host=GUARDRAIL_SERVICE_HOST_IP,
            port=GUARDRAIL_SERVICE_PORT,
            endpoint="/v1/guardrails",
            use_remote_service=True,
            service_type=ServiceType.GUARDRAIL,
        )
        embedding = MicroService(
            name="embedding",
            host=EMBEDDING_SERVER_HOST_IP,
            port=EMBEDDING_SERVER_PORT,
            endpoint="/embed",
            use_remote_service=True,
            service_type=ServiceType.EMBEDDING,
        )
        retriever = MicroService(
            name="retriever",
            host=RETRIEVER_SERVICE_HOST_IP,
            port=RETRIEVER_SERVICE_PORT,
            endpoint="/v1/retrieval",
            use_remote_service=True,
            service_type=ServiceType.RETRIEVER,
        )
        rerank = MicroService(
            name="rerank",
            host=RERANK_SERVER_HOST_IP,
            port=RERANK_SERVER_PORT,
            endpoint="/rerank",
            use_remote_service=True,
            service_type=ServiceType.RERANK,
        )
        llm = MicroService(
            name="llm",
            host=LLM_SERVER_HOST_IP,
            port=LLM_SERVER_PORT,
            endpoint="/v1/chat/completions",
            use_remote_service=True,
            service_type=ServiceType.LLM,
        )
        # guardrail_out = MicroService(
        #     name="guardrail_out",
        #     host=GUARDRAIL_SERVICE_HOST_IP,
        #     port=GUARDRAIL_SERVICE_PORT,
        #     endpoint="/v1/guardrails",
        #     use_remote_service=True,
        #     service_type=ServiceType.GUARDRAIL,
        # )
        # self.megaservice.add(guardrail_in).add(embedding).add(retriever).add(rerank).add(llm).add(guardrail_out)
        self.megaservice.add(guardrail_in).add(embedding).add(retriever).add(rerank).add(llm)
        self.megaservice.flow_to(guardrail_in, embedding)
        self.megaservice.flow_to(embedding, retriever)
        self.megaservice.flow_to(retriever, rerank)
        self.megaservice.flow_to(rerank, llm)
        # self.megaservice.flow_to(llm, guardrail_out)
        self.gateway = ChatQnAGateway(megaservice=self.megaservice, host="0.0.0.0", port=self.port)


if __name__ == "__main__":
    # if not os.path.exists("/home/user/uploads/message.txt"):
    #     with open("/home/user/uploads/message.txt", "w") as file:
    #         file.write("")  # 创建一个空文件

    with open("/home/user/uploads/message.txt", "r") as file:
        # 读取文件内容并打印
        contents = file.read()
        print(contents)
    parser = argparse.ArgumentParser()
    parser.add_argument("--without-rerank", action="store_true")
    parser.add_argument("--with-guardrails", action="store_true")

    args = parser.parse_args()

    chatqna = ChatQnAService(host=MEGA_SERVICE_HOST_IP, port=MEGA_SERVICE_PORT)
    # uvicorn.run(app, host="0.0.0.0", port=9999)

    if args.without_rerank:
        chatqna.add_remote_service_without_rerank()
    elif args.with_guardrails:
        chatqna.add_remote_service_with_guardrails()
    else:
        chatqna.add_remote_service()
