import uuid
from flask import Blueprint, Response, jsonify, request, send_file, stream_with_context
from tool.auth.cookieCheck import cookie_jwt_required
from webui.views.llm_serve.audioQA_ocr import qa
from tool.config import config as tool_config
from webui.views.llm_serve.contentGen import question_gen,listening_gen
from framework.frontend.speech_client import synthesize_speech

config = tool_config.get_config("config.toml")
llm_serve_bp = Blueprint('llm_serve', __name__)

# 存储回答文本，可换成 redis 或数据库
qa_result_cache = {}

@llm_serve_bp.route("/api/qa", methods=["POST"])
@cookie_jwt_required
def qa_view():
    user = request.user["user"]
    image_file = request.files.get("image")
    text = request.form.get("text", "").strip()
    # qtype = request.form.get("type", "default")

    if not image_file and not text:
        return {"error": "图片和文本不能同时为空"}, 400

    # 保存上传图片
    image_path = None
    if image_file:
        import os
        from werkzeug.utils import secure_filename
        upload_dir = config["FILE_SYSTEM"]["UPLOAD_FOLDER"]
        os.makedirs(upload_dir, exist_ok=True)
        filename = secure_filename(image_file.filename)
        image_path = os.path.join(upload_dir, filename)
        image_file.save(image_path)

    # 生成任务 ID
    task_id = str(uuid.uuid4())
    full_text = ""

    def generate_response():
        nonlocal full_text
        for chunk in qa(image_path, text, user):
            full_text += chunk
            yield chunk
        qa_result_cache[(user, task_id)] = full_text
        yield f"[task_id]:{task_id}\n"

    return Response(
        stream_with_context(generate_response()),
        mimetype="text/event-stream",
        headers={"Cache-Control": "no-cache", "Connection": "keep-alive"}
    )

@llm_serve_bp.route("/api/qa/audio", methods=["POST"])
@cookie_jwt_required
def qa_audio():
    user = request.user["user"]
    # user = "helloyutao"
    task_id = request.json.get("task_id")
    print(f"接收到的任务 ID: {task_id}")
    if not task_id or (user, task_id) not in qa_result_cache:
        return jsonify({"error": "未找到对应的回答文本"}), 400

    full_text = qa_result_cache[(user, task_id)]

    print(f"合成音频: {full_text}")
    # 合成语音
    audio_path = synthesize_speech(full_text, "./tmp.mp3")
    with open(audio_path, "rb") as f:
        audio_data = f.read()
    qa_result_cache.pop((user, task_id), None)  # 清除缓存
    return Response(audio_data, mimetype="audio/mpeg", headers={
        "Content-Disposition": "inline; filename=answer.mp3",
        "Cache-Control": "no-cache"
    })

@llm_serve_bp.route("/api/generate/question", methods=["POST"])
@cookie_jwt_required
def question_gen_view():
    user = request.user["user"]
    question_num = request.json.get("questionNum", 0)
    question = question_gen(user, question_num)
    return jsonify({"question": question}),200

@llm_serve_bp.route("/api/generate/listening", methods=["GET"])
@cookie_jwt_required
def listening_gen_view():
    user = request.user["user"]

    full_text = ""
    for chunk in listening_gen(user):
        print(chunk, end="", flush=True)
        full_text += chunk
    
    audio_path = synthesize_speech(full_text, "./tmp.mp3")
    with open(audio_path, "rb") as f:
        audio_data = f.read()
    return Response(audio_data, mimetype="audio/mpeg", headers={
        "Content-Disposition": "inline; filename=listening.mp3",
        "Cache-Control": "no-cache"
    })
    