import json
from django.http import StreamingHttpResponse
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
import asyncio
from tanxi.client.utils.chat import AsyncChatManager
from tanxi.client.Handler.JsonResponse import create_response
from tanxi.client.Handler.Logger import log_info, log_error
# 初始化聊天管理器
chat_manager = AsyncChatManager()

@require_POST
@csrf_exempt
def chat(request):
    try:
        # 解析前端 JSON 数据
        data = json.loads(request.body)
        user_input = data.get('userinput')
        model_name = data.get('modelname')
        user_id = data.get('user_id', 'anonymous')
        log_info(f"接收 user_id={user_id}, input={user_input}, model={model_name}")
    
    except json.JSONDecodeError:
        return create_response(
            code=400,
            message='Error',
            remarks='无效的 JSON 数据'
        )

    if not user_input:
        return create_response(
            code=400,
            message='Error',
            remarks='没有提供输入'
        )

    # 流式响应生成器
    def event_stream():
        try:
            # 创建异步事件循环
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
            log_info(f"开始获取流式响应")
            # 定义异步生成器
            async def generate():
                async for chunk in chat_manager.stream_response(
                    user_id=user_id,
                    model_name=model_name or "tongyi",  # 默认使用通义模型
                    user_input=user_input
                ):
                    log_info(f"流式响应: {chunk}")
                    yield f"data: {json.dumps(chunk)}\n\n"
            
            # 运行异步生成器并同步返回结果
            gen = generate()
            while True:
                try:
                    # 使用 loop.run_until_complete 逐步获取每个 chunk
                    chunk = loop.run_until_complete(anext(gen))
                    yield chunk
                except StopAsyncIteration:
                    break  # 流结束时退出
                
        except Exception as e:
            log_error(f"流式响应错误: {e}")
            yield f"data: {json.dumps({'error': str(e)})}\n\n"
        finally:
            loop.close()

    return StreamingHttpResponse(
        event_stream(),
        content_type='text/event-stream',
        status=200
    )


@require_POST
@csrf_exempt
def vision(request):
    try:
        data = json.loads(request.body)
        user_input = data.get('input')
        image = data.get('image')
        isUrl = data.get('isUrl', False)
        user_id = data.get('user_id', 'anonymous')
        log_info(f"视觉请求 user_id={user_id}, input={user_input}")
    except json.JSONDecodeError:
        return create_response(
            code=400,
            message="Error",
            remarks="无效的JSON数据"
        )

    if not user_input or not image:
        return create_response(
            code=400,
            message="Error",
            remarks="没有提供输入或图片"
        )

    try:
        # 视觉模型处理
        answer = AsyncChatManager.chating_with_tongyi_vision(user_input, image, isUrl)
        
        # 记录到对话历史
        asyncio.run(chat_manager.add_message(
            user_id=user_id,
            role="user",
            content=f"[IMAGE] {user_input}"
        ))
        asyncio.run(chat_manager.add_message(
            user_id=user_id,
            role="assistant",
            content=answer
        ))
        
        return create_response(
            data=answer,
            code=200,
            message="Success",
            remarks="视觉回答成功"
        )
    except Exception as e:
        log_error(f"视觉处理错误: {e}")
        return create_response(
            code=500,
            message="Error",
            remarks=f"视觉处理失败: {str(e)}"
        )