import model
import uuid
import prompts
import os
import json
from datetime import datetime

# chat_history_List = [
#     {
#         "chat_id": "xxx",
#         "embedding_id": "xxx",
#         "chat_type": "rag",
#         "saved_time": "2024-04-05 19:53",
#         "chat_history": [
#             {
#                 "role": "system",
#                 "content": "你是小红薯文案专家。。。。"
#             },        
#             {
#                 "role": "user",
#                 "content": "你是xxx，我想生成哈尔并的介绍"
#             },
#             {
#                 "role": "assistant",
#                 "content": "xxx"
#             },        
#             {
#                 "role": "user",
#                 "content": "帮我修改其中xxx片段"
#             },
#             {
#                 "role": "assistant",
#                 "content": "xxx"
#             }
#         ]
#     }
# ]
chat_history_List = []
def get_chat_history_by_id(id):
    for history in chat_history_List:
        if history.get("chat_id") == id:
            return history
    return None

def add_to_chat_history(chat_history):
    chat_history_List.append(chat_history)


def save_chat(unique_chat_id):
    history = get_chat_history_by_id(unique_chat_id)
    history_path = os.path.join("files_cache", 'chat_history_' + unique_chat_id + '.json')
    with open(history_path, 'a', encoding='utf-8') as file:
        json.dump(history, file, ensure_ascii=False, indent=4)

    return {
        "message": "对话保存成功"
    }

def get_all_saved_chats():
    history_path = "files_cache"
    ret = []
    for filename in os.listdir(history_path):
    # 检查文件扩展名是否为.json
        if filename.endswith('.json'):
            # 构建完整的文件路径
            filepath = os.path.join(history_path, filename)
            # 打开并读取JSON文件
            with open(filepath, 'r', encoding='utf-8') as f:
                try:
                    # 将JSON数据转换为字典
                    data = json.load(f)
                    ret.append({
                        "unique_chat_id": data.get("chat_id"),
                        "unique_embedding_id": data.get("embedding_id"),
                        "chat_type": data.get("chat_type"),
                        "abstract": data.get("chat_history")[1].get("content").strip(),
                        "saved_time": data.get("saved_time")
                    })
                except json.JSONDecodeError as e:
                    # 处理JSON解码错误
                    print(f"文件 {filename} 不是一个有效的JSON文件：{e}")
    return {
        "message": "返回所有对话摘要",
        'saved_chats': ret
    }

def generate_response(query, chat_type, unique_chat_id, unique_embedding_id):
    # 智谱图像大模型不能多轮对话修改
    print(f"""generate_response -> query: {query}\nchat_type: {chat_type}\nunique_chat_id: {unique_chat_id}\nunique_embedding_id: {unique_embedding_id}\n""")
    if chat_type.startswith("image_gen"):
        if not unique_chat_id:
            unique_chat_id = str(uuid.uuid4())
        image_gen_style = chat_type.split("-")[1]
        prompt = prompts.compressImagePrompts(image_gen_style, query)
        url = model.getImageLLMResponse(prompt)

        now = datetime.now()
        add_to_chat_history({
            "chat_id": unique_chat_id,
            "embedding_id": "",
            "chat_type": chat_type,
            "chat_history": [
                {
                    "role": "system",
                    "content": ""
                },
                {
                    "role": "user",
                    "content": query
                }],
            "saved_time": now.strftime('%Y-%m-%d %H:%M')
        })

        return {
            "url": url,
            "unique_chat_id": unique_chat_id
        }

    messages = ""
    vectorSearchResults = ""
    if unique_embedding_id:
        vectorSearchResults = model.get_vector_search(query, unique_embedding_id)
    print(f"""generate_response -> vectorSearchResults: {vectorSearchResults}\n""")

    prompts_all = {
        "systemPrompt": prompts.getSystemprompt(vectorSearchResults, chat_type),
        "userPrompt": query,
        "contextTexts": vectorSearchResults
    }

    if not unique_chat_id:
        unique_chat_id = str(uuid.uuid4())

        messages = prompts.compressTextPrompts(prompts_all, chat_type)
    else:
        _chat_history = get_chat_history_by_id(unique_chat_id).get("chat_history")
        _chat_history.append({
            "role": "user",
            "content": prompts.getUserPrompt(prompts_all, chat_type)
        })
        messages = _chat_history

    print(f"""generate_response -> messages: {messages}\n""")

    content = model.getTextLLMResponse(messages)

    messages.append({
        "role": "assistant",
        "content": content
    })
    now = datetime.now()
    add_to_chat_history({
        "chat_id": unique_chat_id,
        "embedding_id": unique_embedding_id,
        "chat_type": chat_type,
        "chat_history": messages,
        "saved_time": now.strftime('%Y-%m-%d %H:%M')
    })

    return {
        "message": content,
        "unique_embedding_id": unique_embedding_id,
        "unique_chat_id": unique_chat_id
    }