import os
import json
from langchain_community.chat_models import ChatZhipuAI
from langchain_core.callbacks.manager import CallbackManager
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.schema import BaseMessage, HumanMessage, SystemMessage, AIMessage
from langchain_core.prompts import PromptTemplate

# 设置环境变量
os.environ["ZHIPUAI_API_KEY"] = "544d7293332c56b92739130d933f115b.x4GONk9O2pkdJFDh"

# 初始化ChatZhipuAI模型
zhipuai = ChatZhipuAI(
    model="glm-4",
    temperature=0.5,
    streaming=True,
    callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)

# 定义一个处理单个对话的函数
def process_conversation(conversation: dict):
    """
    使用大模型筛选出与公司业务、产品咨询、技术支持等相关的对话，并去除无关的部分
    """
    instruction = conversation.get("instruction", "")
    output = conversation.get("output", "")

    # 构建对话提示词
    prompt_template = PromptTemplate(
        template="1.只读取输入的数据进行提取和整理，不要再多说和扩展输入内容外的信息，"
                 "2.提取和整理有关云剪和服务相关性强的业务服务对话整理"
                 "3.只返回同样输入的格式要求对话形式输出，说中文"
    )

    # 渲染提示词为字符串
    prompt = prompt_template.format()

    # 构建完整的对话消息
    messages = [
        SystemMessage(content=prompt),
        HumanMessage(content=f"Instruction: {instruction}\nOutput: {output}")
    ]

    # 调用大模型处理并筛选相关对话
    response = zhipuai.invoke(messages)
    return response.content


if __name__ == "__main__":
    with open("./save/47966487100@chatroom_0_67.json", "r", encoding="utf-8") as f:
        data = json.load(f)
        for conversation in data:
            print(process_conversation(conversation))
