import json
import time

from src.common.logger import getLogger
from flask import stream_with_context
from langchain_openai import ChatOpenAI
from flask import Blueprint, Response, request

logger = getLogger()

api = Blueprint('stream_chat', __name__)

@api.route('/llm/stream_chat', methods=['GET'])
def chat():
    logger.info("----- chat request prompt -----")

    prompt = request.args.get("prompt")
    logger.info(f"chat request prompt: {prompt}")

    def event_stream():
        # 发送初始连接确认
        yield "event: connect\ndata: Connection established\n\n"

        # 处理用户输入为空的情况
        if not prompt:
            yield "data: {\"error\": \"Responsing\"}\n\n"
            return

        # 生成响应流
        for data in generate_response(prompt):
            logger.info(f"event_stream data: {data}")
            yield f"data: {json.dumps(data)}\n\n"
            time.sleep(0.01)  # 控制流速

        # 发送完成事件
        yield "event: complete\ndata: Streaming finished\n\n"

    return Response(stream_with_context(event_stream()), mimetype="text/event-stream")

def generate_response(prompt):
    try:
        chat_llm = ChatOpenAI(
            base_url='https://ms-fc-9a6febb8-015a.api-inference.modelscope.cn/v1',
            api_key='ms-758ddc9b-5512-45a6-a8e8-d1e9e4466d42',
            model="Qwen/Qwen3-0.6B-GGUF",
            streaming=True,
        )
        logger.info(f"generate_response chat_llm: {chat_llm}")

        # 调用OpenAI流式接口
        for response in chat_llm.stream(prompt):
            logger.info(f"generate_response response: {response}")
            if hasattr(response, "content"):
                yield f"{response.content}\n\n"
    except Exception as e:
        yield f"{json.dumps({'error': str(e)})}\n\n"
