import json
import math
import time

from fastapi import APIRouter, Depends, HTTPException, Query
from fastapi.responses import StreamingResponse
from sqlalchemy.ext.asyncio import AsyncSession
from openai import AsyncOpenAI
import codecs
from typing import AsyncGenerator

from app import crud, schemas
from app.api import deps
from app.core.config import settings
from app.core.logging_config import get_logger
from app.models import User
from app.schemas.base import UnifiedResponse, success_response

# Initialize AI client from settings
llm_client = AsyncOpenAI(
    api_key=settings.OPENAI_API_KEY,
    base_url=settings.OPENAI_BASE_URL
)

router = APIRouter()
logger = get_logger(__name__)


async def get_ai_response_streamer(db: AsyncSession, conversation_id: str, messages: list, thinking_mode: bool = False):
    """
    An async generator that streams the AI response and saves the full response to the DB when finished.
    """
    # Accumulator for the full response
    full_response_content = ""
    full_reasoning_content = ""

    try:
        # Select model based on thinking mode
        model_name = settings.CHAT_THINKING_MODEL if thinking_mode else settings.MARKITDOWN_MODEL
        extra_body = {
                "thinking": {
                    "type": "disabled"  # 强制关闭深度思考能力
                }
            }if not thinking_mode else {}
        
        # Call the AI model with streaming enabled
        stream = await llm_client.chat.completions.create(
            model=model_name,
            messages=messages,
            extra_body=extra_body,
            stream=True
        )

        # Yield each chunk as it arrives
        async for chunk in stream:
            if not chunk.choices:
                continue
            delta = chunk.choices[0].delta
            content = getattr(delta, 'content', None)
            reasoning_content = getattr(delta, 'reasoning_content', None)
            print(chunk.choices[0].delta.content, end='', flush=True)
            print(f" [时间: {time.time()}]")  # 打印时间戳看频率

            # Handle reasoning content (thinking mode)
            if reasoning_content and thinking_mode:
                full_reasoning_content += reasoning_content
                yield f"data: {json.dumps({'delta': reasoning_content, 'conversation_id': conversation_id, 'type': 1}, ensure_ascii=False)}\n\n"
            
            # Handle regular content
            if content:
                full_response_content += content
                yield f"data: {json.dumps({'delta': content, 'conversation_id': conversation_id, 'type': 0}, ensure_ascii=False)}\n\n"

    except Exception as e:
        logger.error(f"Error calling AI model: {e}")
        error_message = json.dumps({"error": "Failed to get response from AI model.", "conversation_id": conversation_id}, ensure_ascii=False)
        yield f"data: {error_message}\n\n"
        yield "data: [DONE]"
        return
    # After the stream is complete, save the assistant's message
    assistant_message_in = schemas.MessageCreate(
        conversation_id=conversation_id,
        role="assistant",
        content=full_response_content,
        reasoning_content = full_reasoning_content if full_reasoning_content else None,
    )
    await crud.crud_message.create(db, obj_in=assistant_message_in)
    # --- 添加结束标志 ---
    # 在所有数据块都发送完毕，并且（如果成功）数据库操作之前，发送 [DONE]
    # 这样前端可以立即知道流结束，而数据库操作可以在后台完成
    yield "data: [DONE]"




@router.post("")
async def chat_stream(
        request: schemas.ChatRequest,
        db: AsyncSession = Depends(deps.get_db),
        current_user: User = Depends(deps.get_current_user)
):
    """
    Handles chat requests, combines context, calls the AI model, and streams the response.
    """
    # 1. Get or Create Conversation
    if request.conversation_id:
        conversation = await crud.crud_conversation.get(db, id=request.conversation_id)
        if not conversation or conversation.user_id != current_user.id:
            raise HTTPException(status_code=404, detail="Conversation not found")
    else:
        conv_in = schemas.ConversationCreate(user_id=current_user.id, title=request.message[:50])
        conversation = await crud.crud_conversation.create(db, obj_in=conv_in)

    # Ensure conversation ID is a string for the streamer
    conversation_id_str = str(conversation.id)

    # 2. Save user's message first with all file IDs
    user_message_in = schemas.MessageCreate(
        conversation_id=conversation.id,
        role="user",
        content=request.message,
        file_ids=request.file_ids
    )
    await crud.crud_message.create(db, obj_in=user_message_in)

    # 3. Get context and construct prompt
    messages = []
    # Add system prompt at the beginning
    messages.append({
        "role": "system",
        "content": "你是医问AI助手，你是由博导公司研发，你负责帮助用户解答医疗问题"
    })
    prompt_content = ""
    message_content_parts = []
    has_images = False

    # Add file content if provided (handle multiple files)
    if request.file_ids:
        for i, file_id in enumerate(request.file_ids):
            file_record = await crud.crud_file.get(db, id=file_id)
            if file_record and file_record.user_id == current_user.id:
                # Check if file is an image
                if file_record.content_type and file_record.content_type.startswith('image/'):
                    has_images = True
                    # Convert file path to full MinIO URL
                    full_image_url = f"{settings.MINIO_ENDPOINT}/{settings.MINIO_BUCKET_NAME}/{file_record.file_path}"
                    # Add image to message content parts
                    message_content_parts.append({
                        "type": "image_url",
                        "image_url": {
                            "url": full_image_url
                        }
                    })
                elif file_record.parsed_content:
                    prompt_content += f"Reference Document {i+1}:\n---\n{file_record.parsed_content}\n---\n\n"
            else:
                raise HTTPException(status_code=404, detail=f"File with ID {file_id} not found")

    # Add historical messages
    history = await crud.crud_message.get_by_conversation(db, conversation_id=conversation.id)
    for msg in history:
        messages.append({"role": msg.role, "content": msg.content})

    # Prepare the current user message
    if has_images:
        # Add text content first
        prompt_content += request.message
        message_content_parts.insert(0, {
            "type": "text", 
            "text": prompt_content if prompt_content else request.message
        })
        messages.append({"role": "user", "content": message_content_parts})
    else:
        # Traditional text-only message
        prompt_content += request.message
        messages.append({"role": "user", "content": prompt_content})

    # 4. Get the streamer and return the streaming response
    response_streamer = get_ai_response_streamer(db, conversation_id_str, messages, request.thinkingMode)
    return StreamingResponse(
        response_streamer,
        media_type="text/event-stream; charset=utf-8",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Access-Control-Allow-Origin": "*",
            "Access-Control-Allow-Headers": "*"
        }
    )


@router.get("/conversations/count", response_model=UnifiedResponse[dict])
async def get_conversation_count(
        db: AsyncSession = Depends(deps.get_db),
        current_user: User = Depends(deps.get_current_user)
):
    """Get the total number of conversations for the current user."""
    conversations = await crud.crud_conversation.get_by_user(db, user_id=current_user.id)
    count = len(conversations) if conversations else 0
    return success_response({"count": count, "user_id": str(current_user.id)})


@router.get("/conversations", response_model=UnifiedResponse[schemas.ConversationListResponse])
async def get_conversations(
        page: int = Query(1, ge=1, description="页码，从1开始"),
        page_size: int = Query(20, ge=1, le=100, description="每页大小，最大100"),
        db: AsyncSession = Depends(deps.get_db),
        current_user: User = Depends(deps.get_current_user)
):
    """
    获取当前登录用户的对话列表（分页）
    
    - **page**: 页码，从1开始
    - **page_size**: 每页大小，默认20，最大100
    """
    conversations, total = await crud.crud_conversation.get_user_conversations_paginated(
        db, user_id=current_user.id, page=page, page_size=page_size
    )
    
    # 计算总页数
    total_pages = math.ceil(total / page_size) if total > 0 else 0
    
    # 获取每个对话的消息数量
    conversation_ids = [conv.id for conv in conversations]
    message_counts = await crud.crud_conversation.get_conversation_message_counts(
        db, conversation_ids=conversation_ids
    )
    
    # 转换为响应格式
    conversation_items = [
        schemas.ConversationListItem(
            id=conv.id,
            user_id=conv.user_id,
            title=conv.title,
            created_at=conv.created_at,
            updated_at=conv.updated_at,
            message_count=message_counts.get(conv.id, 0)
        )
        for conv in conversations
    ]
    
    return success_response(schemas.ConversationListResponse(
        items=conversation_items,
        total=total,
        page=page,
        page_size=page_size,
        total_pages=total_pages
    ))


@router.get("/conversations/{conversation_id}", response_model=UnifiedResponse[schemas.ConversationWithMessagesPaginated])
async def get_conversation(
        conversation_id: str,
        page: int = Query(1, ge=1, description="页码，从1开始"),
        page_size: int = Query(20, ge=1, le=100, description="每页大小，最大100"),
        db: AsyncSession = Depends(deps.get_db),
        current_user: User = Depends(deps.get_current_user)
):
    """
    获取指定对话的详情和分页消息列表
    
    - **conversation_id**: 对话ID
    - **page**: 页码，从1开始
    - **page_size**: 每页大小，默认20，最大100
    """
    conversation = await crud.crud_conversation.get(db, id=conversation_id)
    if not conversation or conversation.user_id != current_user.id:
        raise HTTPException(status_code=404, detail="Conversation not found")

    messages, total_messages = await crud.crud_message.get_by_conversation_paginated(
        db, conversation_id=conversation.id, page=page, page_size=page_size
    )
    
    # 计算总页数
    total_pages = math.ceil(total_messages / page_size) if total_messages > 0 else 0
    
    # 构建响应数据，避免直接修改SQLAlchemy对象
    conversation_response = schemas.ConversationWithMessagesPaginated(
        id=conversation.id,
        user_id=conversation.user_id,
        title=conversation.title,
        created_at=conversation.created_at,
        updated_at=conversation.updated_at,
        messages=[schemas.MessageInDB.model_validate(msg) for msg in messages],
        total=total_messages,
        page=page,
        page_size=page_size,
        total_pages=total_pages
    )
    
    return success_response(conversation_response)


@router.delete("/conversations/{conversation_id}", response_model=UnifiedResponse[dict])
async def delete_conversation(
        conversation_id: str,
        db: AsyncSession = Depends(deps.get_db),
        current_user: User = Depends(deps.get_current_user)
):
    """Delete a specific conversation and all its messages."""
    conversation = await crud.crud_conversation.get(db, id=conversation_id)
    if not conversation or conversation.user_id != current_user.id:
        raise HTTPException(status_code=404, detail="Conversation not found")

    await crud.crud_conversation.remove(db, id=conversation_id)
    return success_response({"message": "Conversation deleted successfully"})
