from datetime import datetime
import aiohttp
import asyncio
import json
from flask import Blueprint, request, current_app, Response
from util.result import Result

# 创建大模型调用的蓝图
api_bp = Blueprint('llm', __name__)


# 大模型流式输出接口
@api_bp.route('/llm/stream', methods=['POST'])
def llm_stream():
    try:
        # 1. 获取请求数据并校验
        data = request.get_json()
        if not data:
            return json.dumps({"error": "请输入请求数据"}), 400

        prompt = data.get('prompt', '').strip()
        if not prompt:
            return json.dumps({"error": "prompt 是必填项"}), 400

        # 2. 从config读取配置
        llm_api_url = current_app.config['LLM_API_URL']
        llm_auth_token = current_app.config['LLM_AUTH_TOKEN']
        llm_default_params = current_app.config['LLM_DEFAULT_PARAMS']

        # 3. 处理历史对话
        messages = []
        if 'history' in data and isinstance(data['history'], list) and len(data['history']) > 0:
            for item in data['history']:
                if isinstance(item, (list, tuple)) and len(item) == 2:
                    msg_type, msg_content = item
                    role = "user" if msg_type == "user" else "assistant"
                    cleaned_content = str(msg_content).strip()
                    if cleaned_content:
                        messages.append({"role": role, "content": cleaned_content})

        # 4. 追加当前用户输入
        messages.append({"role": "user", "content": prompt})

        # 5. 构建请求参数
        payload = {
            **llm_default_params,
            "messages": messages
        }

        # 6. 定义流式响应生成器 - 保持流式输出同时修复事件循环问题
        def stream_generator():
            # 创建事件循环
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)

            # 创建异步生成器任务
            async_gen = async_stream(llm_api_url, llm_auth_token, payload)
            gen_task = asyncio.ensure_future(async_gen.__anext__(), loop=loop)

            try:
                while True:
                    # 运行事件循环直到获取到一个结果
                    item = loop.run_until_complete(gen_task)
                    yield item

                    # 准备获取下一个结果
                    gen_task = asyncio.ensure_future(async_gen.__anext__(), loop=loop)
            except StopAsyncIteration:
                # 流式输出结束
                pass
            except Exception as e:
                # 处理异常
                yield f"data: {json.dumps({'error': str(e)})}\n\n"
            finally:
                # 确保异步生成器和事件循环正确关闭
                try:
                    # 取消可能存在的任务
                    if not gen_task.done():
                        gen_task.cancel()
                        loop.run_until_complete(gen_task)
                    # 关闭异步生成器
                    loop.run_until_complete(async_gen.aclose())
                finally:
                    # 关闭事件循环
                    loop.run_until_complete(loop.shutdown_asyncgens())
                    loop.close()

        # 7. 返回SSE格式响应
        return Response(
            stream_generator(),
            mimetype='text/event-stream',
            headers={
                'Cache-Control': 'no-cache, no-store',
                'Connection': 'keep-alive',
                'X-Accel-Buffering': 'no',
                'Transfer-Encoding': 'chunked'
            }
        )

    except Exception as e:
        error_msg = f"大模型流式调用异常: {str(e)}"
        current_app.logger.error(error_msg)
        return json.dumps({"error": "大模型调用失败，请稍后重试"}), 500


# 独立的异步流生成函数
async def async_stream(llm_api_url, llm_auth_token, payload):
    async with aiohttp.ClientSession() as session:
        async with session.post(
                url=llm_api_url,
                headers={
                    "Authorization": f"Bearer {llm_auth_token}",
                    "Content-Type": "application/json",
                    "Accept": "text/event-stream"
                },
                json=payload,
                timeout=None,
                ssl=False
        ) as response:
            if response.status != 200:
                error_detail = await response.text()
                error_msg = f"大模型接口请求失败 [{response.status}]: {error_detail[:200]}"
                yield f"data: {json.dumps({'error': error_msg})}\n\n"
                return

            while True:
                line = await response.content.readline()
                if not line:  # 读取到空字节表示流结束
                    break

                line_str = line.decode('utf-8').lstrip('data: ').strip()
                if not line_str:  # 跳过空行
                    continue

                if line_str == '[DONE]':
                    yield f"data: {json.dumps({'message': '流式对话已结束'})}\n\n"
                    break

                yield f"data: {line_str}\n\n"
