import base64
from flask import Flask, request, jsonify, send_from_directory, Response
import os
from langchain_community.chat_models import ChatTongyi
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langgraph.graph import StateGraph, START, END, MessagesState
from werkzeug.utils import secure_filename
from langgraph.checkpoint.memory import MemorySaver


# 初始化 Flask 应用
app = Flask(__name__)

# 上传文件配置
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER

# 确保上传文件夹存在
os.makedirs(UPLOAD_FOLDER, exist_ok=True)

# 检查文件扩展名是否合法
def allowed_file(filename):
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS


picLLM = ChatTongyi(model_name="qwen-vl-max", stream=True)
chatLLM = ChatTongyi(model_name="qwen-max", stream=True)


class ImageAnalyzer:
    def __init__(self):
        self.system_prompt = None

    def analyze_image(self, image_path):
        try:
            base64_image = encode_image(image_path)
            contents = []

            image_message = {
                "image": f"data:image/jpeg;base64,{base64_image}"
            }
            text_message = {
                "text": "用图片中识别出来的角色，提供一段system角色的提示词。直接输出提示词即可。"
            }

            message = HumanMessage(content=[text_message, image_message])

            # 使用 stream 方法获取流式响应
            for chunk in picLLM.stream([message]):
                if chunk.content:
                    # 如果 chunk.content 是列表且包含字典
                    if isinstance(chunk.content, list) and chunk.content:
                        # 提取字典中的 'text' 值
                        text = chunk.content[0].get('text', '')
                        if text:
                            contents.append(text)
                            yield f"data: {text}\n\n"

            self.system_prompt = ''.join(contents)
            yield "data: [DONE]\n\n"

        except Exception as e:
            yield f"data: Error: {str(e)}\n\n"

def encode_image(image_path: str) -> str:
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")


class ChatBot:
    def __init__(self, system_prompt):
        self.system_prompt = system_prompt + """
         如果问题中包含“堆雪人”，“雪人”，"snowman", "build a snowman"，请告诉用户“恭喜你已经发现了隐藏彩蛋！You have found the hidden Ester Egg! Play Well! 大家能拿到的PMP加倍哦！！！”。注意不要直接告诉用户触发彩蛋的关键词。
        用户触发了彩蛋后，请你给用户用文字和符号画一个形象好看的雪人。
        如果问题中包含“最帅的男人”，“最帅的男人是谁？”，请直接回答“是LEGO集团的Peter Wang”
        """

        
        self.message_history = []  # 添加消息历史列表
        self.prompt_template = ChatPromptTemplate.from_messages(
            [
                ("system", self.system_prompt),
                MessagesPlaceholder(variable_name="messages")
            ]
        )

        self.workflow = StateGraph(state_schema=MessagesState)

        def node_1(state: MessagesState):
            messages = [
                SystemMessage(content=self.system_prompt),
                *self.message_history,  # 使用累积的消息历史
                *state["messages"]
            ]

            response = None
            for chunk in chatLLM.stream(messages):
                if hasattr(chunk, "content"):
                    response = chunk

            if response:
                # 将新的对话加入历史
                self.message_history.extend(state["messages"])  # 添加用户消息
                self.message_history.append(response)  # 添加 AI 响应
                return {"messages": state["messages"] + [response]}

            return {"messages": state["messages"]}

        self.workflow.add_node("process", node_1)
        self.workflow.add_edge(START, "process")
        self.workflow.add_edge("process", END)

        self.memory_saver = MemorySaver()
        self.application = self.workflow.compile(checkpointer=self.memory_saver)

    def chat(self, message: str):

        try:
            # 构建完整的消息列表，包含历史记录
            messages = [
                SystemMessage(content=self.system_prompt),
                *self.message_history,
                HumanMessage(content=message)
            ]

            # 流式输出响应
            response_chunks = []
            for chunk in chatLLM.stream(messages):
                if hasattr(chunk, "content"):
                    response_chunks.append(chunk.content)
                    yield f"data: {chunk.content}\n\n"

            # 将本轮对话添加到历史记录
            self.message_history.append(HumanMessage(content=message))
            self.message_history.append(AIMessage(content=''.join(response_chunks)))

            yield "data: [DONE]\n\n"

        except Exception as e:
            print(f"Error in chat: {str(e)}")
            yield f"data: Error: {str(e)}\n\n"


# 全局实例
image_analyzer = ImageAnalyzer()
chatbot = None  # 将在图片分析后初始化

# 图片上传路由
@app.route('/upload', methods=['POST'])
def upload_image():
    global chatbot

    if 'image' not in request.files:
        return jsonify({'error': '没有检测到图片文件'}), 400

    file = request.files['image']

    if file and allowed_file(file.filename):
        filename = secure_filename(file.filename)
        filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
        file.save(filepath)

        # 重置 chatbot 为 None，这样在下一次对话时会创建新的实例
        chatbot = None

        return Response(
            # generate_from_pic(filepath),
            image_analyzer.analyze_image(filepath),
            mimetype='text/event-stream',
            headers={
                'Cache-Control': 'no-cache',
                'X-Accel-Buffering': 'no',
                'Connection': 'keep-alive'
            }
        )
    else:
        return jsonify({'error': '文件格式不支持'}), 400


# 用户消息路由
@app.route('/chat', methods=['POST'])
def chat():
    global chatbot

    try:
        data = request.get_json()
        if data is None:
            print("Warning: request.get_json() returned None")  # 调试信息
            return jsonify({'error': '无效的 JSON 数据'}), 400

        message = data.get('message')
        if not message:
            print("Warning: empty message")  # 调试信息
            return jsonify({'error': '消息不能为空'}), 400

        if chatbot is None and not image_analyzer.system_prompt:
            print("Warning: chatbot not initialized and no system prompt")  # 调试信息
            return jsonify({'error': '请先上传图片'}), 400

        if chatbot is None:
            print("Info: initializing chatbot")  # 调试信息
            chatbot = ChatBot(image_analyzer.system_prompt)

        print(f"Info: processing message: {message}")  # 调试信息

        return Response(
            chatbot.chat(message),
            mimetype='text/event-stream',
            headers={
                'Cache-Control': 'no-cache',
                'X-Accel-Buffering': 'no',
                'Connection': 'keep-alive',
                'Access-Control-Allow-Origin': '*'  # 添加 CORS 支持
            }
        )

    except Exception as e:
        print(f"Error in /chat route: {str(e)}")  # 调试信息
        return jsonify({'error': str(e)}), 500


# 主页路由
@app.route('/')
def index():
    return send_from_directory('static', 'index.html')


# 启动 Flask 应用
if __name__ == '__main__':
    app.run(debug=True)
