import base64, os
from openai import OpenAI
from flask import Flask, request, jsonify, send_from_directory, Response
from werkzeug.utils import secure_filename

app = Flask(__name__)

# 上传文件配置
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER

# 确保上传文件夹存在
os.makedirs(UPLOAD_FOLDER, exist_ok=True)

# 检查文件扩展名是否合法
def allowed_file(filename):
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS

# 定义大模型
chatLLM = OpenAI()
picLLM = OpenAI(
    api_key=os.getenv("DASHSCOPE_API_KEY"),
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)

# 解析图片成base64格式
def encode_image(image_path: str) -> str:
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")


class ChatBot:
    def __init__(self, system_prompt):
        self.system_prompt = system_prompt
        self.message = [
            {"role": "developer", "content": [{"type": "text", "text": self.system_prompt}]},
        ]

    def chat(self, user_input: str):
        question: list = [{"role": "user", "content": [{"type": "text", "text": user_input}]}]
        self.message.extend(question)

        # 创建流式对话
        completion = chatLLM.chat.completions.create(
            model="gpt-4o-mini-2024-07-18",
            messages=self.message,
            stream=True
        )

        # 用于收集完整回复
        full_response = []

        # 流式输出并收集回复
        for chunk in completion:
            if chunk.choices[0].delta.content is not None:
                content = chunk.choices[0].delta.content
                yield f"data: {content}\n\n"
        yield "data: [DONE]\n\n"

        # 将完整回复添加到对话历史
        assistant_response = "".join(full_response)
        self.message.extend([
            {"role": "assistant", "content": [{"type": "text", "text": assistant_response}]}
        ])


# 生成图片描述
class ImageAnalyzer:
    def __init__(self):
        self.system_prompt = None

    def analyze_image(self, image_path: str):
        base64_image = encode_image(image_path)
        contents = []

        try:
            completion = picLLM.chat.completions.create(
                model="qwen-vl-max",
                messages=[
                    {"role": "user", "content": [
                        {"type": "text", "text": "识别图片中的角色，以这个角色的第一人称，提供一段system角色的提示词。直接输出提示词即可，提示词需要告诉大家你的名字。"},
                        {"type": "image_url",
                         "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
                    ]}
                ],
                stream=True,
            )

            for chunk in completion:
                if hasattr(chunk.choices[0].delta, 'content'):
                    content = chunk.choices[0].delta.content
                    if content:
                        contents.append(content)
                        yield f"data: {content}\n\n"

            yield "data: [DONE]\n\n"

            self.system_prompt = ''.join(contents)

        except Exception as e:
            yield f"data: Error: {str(e)}\n\n"

# 全局实例
image_analyzer = ImageAnalyzer()
chatbot = None  # 将在图片分析后初始化

# 图片上传路由
@app.route('/upload', methods=['POST'])
def upload_image():
    global chatbot

    if 'image' not in request.files:
        return jsonify({'error': '没有检测到图片文件'}), 400

    file = request.files['image']

    if file and allowed_file(file.filename):
        filename = secure_filename(file.filename)
        filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
        file.save(filepath)

        # 重置 chatbot 为 None，这样在下一次对话时会创建新的实例
        chatbot = None

        return Response(
            image_analyzer.analyze_image(filepath),
            mimetype='text/event-stream',
            headers={
                'Cache-Control': 'no-cache',
                'X-Accel-Buffering': 'no',
                'Connection': 'keep-alive'
            }
        )
    else:
        return jsonify({'error': '文件格式不支持'}), 400

# 用户消息路由
@app.route('/chat', methods=['POST'])
def chat():
    global chatbot

    try:
        data = request.get_json()
        if data is None:
            print("Warning: request.get_json() returned None")  # 调试信息
            return jsonify({'error': '无效的 JSON 数据'}), 400

        message = data.get('message')
        if not message:
            print("Warning: empty message")  # 调试信息
            return jsonify({'error': '消息不能为空'}), 400

        if chatbot is None and not image_analyzer.system_prompt:
            print("Warning: chatbot not initialized and no system prompt")  # 调试信息
            return jsonify({'error': '请先上传图片'}), 400

        if chatbot is None:
            print("Info: initializing chatbot")  # 调试信息
            chatbot = ChatBot(image_analyzer.system_prompt)

        print(f"Info: processing message: {message}")  # 调试信息

        return Response(
            chatbot.chat(message),
            mimetype='text/event-stream',
            headers={
                'Cache-Control': 'no-cache',
                'X-Accel-Buffering': 'no',
                'Connection': 'keep-alive',
                'Access-Control-Allow-Origin': '*'  # 添加 CORS 支持
            }
        )

    except Exception as e:
        print(f"Error in /chat route: {str(e)}")  # 调试信息
        return jsonify({'error': str(e)}), 500

@app.route('/')
def index():
    return send_from_directory('static', 'index.html')


# 启动 Flask 应用
if __name__ == '__main__':
    app.run(debug=True)
