"""
字段提取API端点

提供字段提取功能的RESTful API
"""

from fastapi import APIRouter, HTTPException, status
from fastapi.responses import StreamingResponse
from loguru import logger
import json
import asyncio
import time
import uuid

from src.schemas.extraction import ExtractionRequest, ExtractionResponse, ExtractionData, ExtractionStatus
from src.schemas.base import success_response
from src.services.extraction.service import ExtractionService

router = APIRouter(prefix="/extraction", tags=["字段提取"])


# 创建提取服务实例
extraction_service = ExtractionService()


@router.post(
    "/sync",
    response_model=ExtractionResponse,
    status_code=status.HTTP_200_OK,
    summary="同步字段提取",
    description="""
    从多个附件中同步提取字段信息，返回完整结果。
    如果处理时间超过timeout，将返回taskId供后续查询。
    
    处理流程：
    1. 使用MinerU解析上传的文件
    2. 根据规则配置提取字段
    3. 对提取结果进行溯源定位
    4. 生成高亮预览图
    """,
    response_description="字段提取结果"
)
async def extract_fields_sync(
    request: ExtractionRequest
) -> ExtractionResponse:
    """
    同步字段提取
    
    Args:
        request: 提取请求
        
    Returns:
        ExtractionResponse: 提取响应
        
    Raises:
        HTTPException: 处理失败时抛出异常
    """
    try:
        logger.info(f"收到字段提取请求: request_id={request.request_id}, "
                   f"type={request.extraction_type}, "
                   f"files={len(request.files)}, "
                   f"rule_config_id={request.rule_config_id}, "
                   f"timeout={request.timeout}ms")
        
        # 将超时时间从毫秒转换为秒
        timeout_seconds = request.timeout / 1000 if request.timeout else 300
        
        # 生成任务ID（确保在创建任务前就有ID）
        # 为提取任务添加前缀，避免与审核任务冲突
        base_task_id = request.task_id or request.request_id or f"task_{uuid.uuid4().hex[:16]}"
        task_id = f"extraction_{base_task_id}"
        
        # 创建带task_id的请求对象
        request_with_task = ExtractionRequest(
            task_id=task_id,
            extraction_type=request.extraction_type,
            files=request.files,
            rule_config_id=request.rule_config_id,
            timeout=request.timeout,
            request_id=request.request_id
        )
        
        # 创建任务（不使用 wait_for，避免超时后被取消）
        extraction_task = asyncio.create_task(
            extraction_service.extract_fields(request_with_task)
        )
        
        # 使用 wait 等待任务完成或超时，但不取消任务
        done, pending = await asyncio.wait(
            {extraction_task},
            timeout=timeout_seconds,
            return_when=asyncio.FIRST_COMPLETED
        )
        
        if done:
            # 任务在超时前完成
            response = extraction_task.result()
            
            logger.info(f"字段提取完成: request_id={request.request_id}, "
                       f"status={response.status}, "
                       f"success={response.success_count}/{response.total_fields}")
            
            # 替换task_id为原始ID（不带前缀）
            response_data = response.model_dump()
            if response_data.get("task_id"):
                response_data["task_id"] = base_task_id
            
            # 包装响应
            return success_response(
                data=response_data,
                message="success"
            )
        else:
            # 超时，但任务继续在后台运行
            logger.warning(f"字段提取超时，任务转为后台继续执行: task_id={task_id}, "
                          f"timeout={timeout_seconds}s")
            
            # 返回 PENDING 状态和 task_id（返回原始task_id，不带前缀）
            # 任务会继续在后台执行，结果会保存到 task_store
            pending_data = ExtractionData(
                request_id=request.request_id or base_task_id or f"req_{int(time.time() * 1000)}",  
                task_id=base_task_id,  # 返回原始task_id，不带前缀
                status=ExtractionStatus.PENDING,
                extraction_type=request.extraction_type,
                rule_config_id=request.rule_config_id,  # 可以为None，任务完成后会有实际的配置ID
                results=[],
                total_fields=0,
                success_count=0,
                failed_count=0,
                processing_time_ms=int(timeout_seconds * 1000)
            )
            
            return success_response(
                data=pending_data.model_dump(),
                message="任务处理中，请稍后查询结果"
            )
        
    except ValueError as e:
        logger.error(f"字段提取参数错误: {e}")
        raise HTTPException(
            status_code=status.HTTP_400_BAD_REQUEST,
            detail=str(e)
        )
    except Exception as e:
        logger.error(f"字段提取失败: {e}", exc_info=True)
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"字段提取失败: {str(e)}"
        )


@router.post(
    "/stream",
    summary="流式字段提取",
    description="""
    使用Server-Sent Events (SSE)流式返回字段提取结果。
    客户端需要处理SSE事件流。
    
    事件类型：
    - task_start: 任务开始
    - field_progress: 字段处理进度
    - field_complete: 单个字段完成
    - task_complete: 任务完成
    - error: 错误事件
    """,
    response_class=StreamingResponse
)
async def extract_fields_stream(
    request: ExtractionRequest
):
    """
    流式字段提取
    
    使用SSE实时返回提取进度和结果
    
    Args:
        request: 提取请求
        
    Returns:
        StreamingResponse: SSE事件流
    """
    async def event_generator():
        """生成SSE事件流"""
        try:
            logger.info(f"开始流式字段提取: request_id={request.request_id}, "
                       f"type={request.extraction_type}, "
                       f"files={len(request.files)}")
            
            # 执行流式字段提取
            async for event in extraction_service.extract_fields_stream(request):
                # 发送SSE事件
                event_data = {
                    "event_type": event.event_type,
                    "data": event.data
                }
                yield f"event: {event.event_type}\n"
                yield f"data: {json.dumps(event_data['data'], ensure_ascii=False)}\n\n"
                
                # 确保数据被发送
                await asyncio.sleep(0.01)
            
            logger.info(f"流式字段提取完成: request_id={request.request_id}")
            
        except Exception as e:
            logger.error(f"流式字段提取失败: {e}", exc_info=True)
            # 发送错误事件
            error_event = {
                "error_message": str(e),
                "request_id": request.request_id
            }
            yield f"event: error\n"
            yield f"data: {json.dumps(error_event, ensure_ascii=False)}\n\n"
    
    return StreamingResponse(
        event_generator(),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "X-Accel-Buffering": "no"  # 禁用nginx缓冲
        }
    )

