"""
评估 API 端点
"""
from typing import List
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException, status, BackgroundTasks
from sqlalchemy.ext.asyncio import AsyncSession
import time
import asyncio

from app.db.session import get_db
from app.crud.record import RecordCRUD
from app.crud.assessment import AssessmentCRUD
from app.core.evaluator import QualityEvaluator
from app.schemas.common import ResponseModel, PaginatedResponse
from app.schemas.assessment import (
    AssessmentCreate,
    AssessmentResponse,
    AssessmentDetailResponse,
    BatchAssessmentRequest,
    BatchAssessmentResponse,
    AssessmentStatistics,
    EvaluationMode,
    AssessmentStatus
)
from app.core.logger import app_logger


router = APIRouter()


@router.post("/evaluate", response_model=ResponseModel[AssessmentDetailResponse])
async def evaluate_record(
    assessment_request: AssessmentCreate,
    db: AsyncSession = Depends(get_db)
):
    """
    评估单个病历
    """
    start_time = time.time()
    
    # 获取病历
    record = await RecordCRUD.get_by_id(db, assessment_request.record_id)
    if not record:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail="病历不存在"
        )
    
    # 准备评估数据
    record_data = {
        'patient_name': record.patient_name,
        'patient_age': record.patient_age,
        'patient_gender': record.patient_gender,
        'department': record.department,
        'visit_date': record.visit_date,
        'chief_complaint': record.chief_complaint,
        'present_illness': record.present_illness,
        'allergy_history': record.allergy_history,
        'past_history': record.past_history,
        'personal_history': record.personal_history,
        'physical_exam': record.physical_exam,
        'auxiliary_exam': record.auxiliary_exam,
        'diagnosis': record.diagnosis,
        'doctor_name': record.doctor_name,
        'raw_data': record.raw_data
    }
    
    # ⚡ 关键修复：在 LLM 长时间调用前提交当前事务，避免"空闲事务超时"
    # 读取操作已完成，提前释放数据库连接，防止 LLM 调用期间事务被杀
    await db.commit()
    app_logger.info(f"✅ 病历数据读取完成，已释放数据库事务，准备调用 LLM 评估")
    
    # 根据评估模式选择评估器
    try:
        if assessment_request.evaluation_mode == EvaluationMode.AI_BASED:
            # AI智能体评估
            from app.core.ai_evaluator import AIEvaluator
            evaluator = AIEvaluator(weights=assessment_request.weights)
            
            # ⏱️ 设置 5 分钟超时（防止无限等待）
            eval_result = await asyncio.wait_for(
                evaluator.evaluate(record_data),
                timeout=300.0  # 300秒 = 5分钟
            )
        elif assessment_request.evaluation_mode == EvaluationMode.HYBRID:
            # 混合评估（规则 + AI）
            from app.core.ai_evaluator import HybridEvaluator
            evaluator = HybridEvaluator(weights=assessment_request.weights)
            
            # ⏱️ 设置 5 分钟超时
            eval_result = await asyncio.wait_for(
                evaluator.evaluate(record_data),
                timeout=300.0
            )
        else:
            # 默认：基于规则的评估（同步，无需超时）
            evaluator = QualityEvaluator(weights=assessment_request.weights)
            eval_result = evaluator.evaluate(record_data)
    
    except asyncio.TimeoutError:
        # 评估超时，返回友好错误
        app_logger.error(f"❌ AI评估超时（5分钟）：record_id={assessment_request.record_id}")
        raise HTTPException(
            status_code=status.HTTP_504_GATEWAY_TIMEOUT,
            detail="AI评估超时（5分钟）。可能原因：LLM服务响应慢、模型加载失败。建议：1) 检查Ollama/BioGPT服务状态 2) 尝试使用规则评估模式 3) 减小病历长度"
        )
    
    # 计算耗时
    duration = int((time.time() - start_time) * 1000)  # 毫秒
    
    # 构建评估数据
    assessment_data = {
        'record_id': assessment_request.record_id,
        'evaluation_mode': assessment_request.evaluation_mode.value,
        'overall_score': eval_result['overall_score'],
        'completeness_score': eval_result['completeness_score'],
        'standardization_score': eval_result['standardization_score'],
        'consistency_score': eval_result['consistency_score'],
        'accuracy_score': eval_result['accuracy_score'],
        'summary': eval_result['summary'],
        'suggestions': eval_result['suggestions'],
        'assessment_duration': duration,
        # 默认状态为完成，后续根据解析情况调整为 failed
        'status': AssessmentStatus.COMPLETED.value
    }
    
    if assessment_request.weights:
        assessment_data.update({
            'completeness_weight': assessment_request.weights.completeness,
            'standardization_weight': assessment_request.weights.standardization,
            'consistency_weight': assessment_request.weights.consistency,
            'accuracy_weight': assessment_request.weights.accuracy,
        })
    
    # 检查是否已有评估记录
    existing_assessment = await AssessmentCRUD.get_by_record_id(db, assessment_request.record_id, latest_only=True)
    
    if existing_assessment:
        # 更新已有评估记录
        app_logger.info(f"更新已有评估记录: {existing_assessment.id}")
        db_assessment = await AssessmentCRUD.update(
            db=db,
            assessment_id=existing_assessment.id,
            assessment_data=assessment_data,
            issues_data=eval_result['issues']
        )
    else:
        # 如果 LLM 返回了原始响应，保存到 ai_response
        if eval_result.get('ai_raw_response') is not None:
            assessment_data['ai_response'] = {
                'raw': eval_result.get('ai_raw_response'),
                'parse_error': bool(eval_result.get('parse_error', False))
            }
            # 如果解析失败，把状态标记为 failed（便于人工复核）
            if eval_result.get('parse_error'):
                assessment_data['status'] = AssessmentStatus.FAILED.value

        # 创建新评估记录
        db_assessment = await AssessmentCRUD.create(
            db=db,
            assessment_data=assessment_data,
            issues_data=eval_result['issues']
        )
    
    # 重新加载以获取关联数据
    db_assessment = await AssessmentCRUD.get_by_id(db, db_assessment.id)
    
    return ResponseModel(
        message="评估完成",
        data=AssessmentDetailResponse.model_validate(db_assessment)
    )


@router.post("/batch-evaluate", response_model=ResponseModel[BatchAssessmentResponse])
async def batch_evaluate_records(
    batch_request: BatchAssessmentRequest,
    db: AsyncSession = Depends(get_db)
):
    """
    批量评估病历
    """
    results = []
    errors = []
    
    for record_id in batch_request.record_ids:
        try:
            # 创建单个评估请求
            assessment_request = AssessmentCreate(
                record_id=record_id,
                evaluation_mode=batch_request.evaluation_mode
            )
            
            # 执行评估（重用单个评估的逻辑）
            response = await evaluate_record(assessment_request, db)
            results.append(AssessmentResponse.model_validate(response.data))
            
        except HTTPException as e:
            errors.append({
                'record_id': str(record_id),
                'error': e.detail
            })
        except Exception as e:
            app_logger.error(f"批量评估失败 {record_id}: {e}")
            errors.append({
                'record_id': str(record_id),
                'error': str(e)
            })
    
    batch_result = BatchAssessmentResponse(
        total=len(batch_request.record_ids),
        success=len(results),
        failed=len(errors),
        results=results,
        errors=errors
    )
    
    return ResponseModel(
        message=f"批量评估完成：成功 {len(results)} 条，失败 {len(errors)} 条",
        data=batch_result
    )


@router.get("/list", response_model=ResponseModel[PaginatedResponse[AssessmentResponse]])
async def get_assessments_list(
    page: int = 1,
    page_size: int = 10,
    db: AsyncSession = Depends(get_db)
):
    """
    获取评估列表
    """
    assessments, total = await AssessmentCRUD.get_list(
        db=db,
        page=page,
        page_size=page_size
    )
    
    items = [AssessmentResponse.model_validate(a) for a in assessments]
    
    paginated_result = PaginatedResponse(
        total=total,
        page=page,
        page_size=page_size,
        items=items,
        total_pages=(total + page_size - 1) // page_size
    )
    
    return ResponseModel(data=paginated_result)


@router.get("/{assessment_id}", response_model=ResponseModel[AssessmentDetailResponse])
async def get_assessment_detail(
    assessment_id: UUID,
    db: AsyncSession = Depends(get_db)
):
    """
    获取评估详情
    """
    assessment = await AssessmentCRUD.get_by_id(db, assessment_id)
    
    if not assessment:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail="评估记录不存在"
        )
    
    return ResponseModel(data=AssessmentDetailResponse.model_validate(assessment))


@router.get("/record/{record_id}", response_model=ResponseModel[AssessmentDetailResponse])
async def get_record_latest_assessment(
    record_id: UUID,
    db: AsyncSession = Depends(get_db)
):
    """
    获取病历的最新评估
    """
    assessment = await AssessmentCRUD.get_by_record_id(db, record_id, latest_only=True)
    
    if not assessment:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail="该病历暂无评估记录"
        )
    
    return ResponseModel(data=AssessmentDetailResponse.model_validate(assessment))


@router.delete("/{assessment_id}", response_model=ResponseModel[dict])
async def delete_assessment(
    assessment_id: UUID,
    db: AsyncSession = Depends(get_db)
):
    """
    删除评估记录
    """
    success = await AssessmentCRUD.delete(db, assessment_id)
    
    if not success:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail="评估记录不存在"
        )
    
    return ResponseModel(
        message="评估记录删除成功",
        data={'assessment_id': str(assessment_id)}
    )


@router.get("/statistics/summary", response_model=ResponseModel[AssessmentStatistics])
async def get_assessment_statistics(
    db: AsyncSession = Depends(get_db)
):
    """
    获取评估统计信息
    """
    from app.crud.record import RecordCRUD
    
    # 获取病历统计
    record_stats = await RecordCRUD.get_statistics(db)
    
    # 获取评估统计
    assessment_stats = await AssessmentCRUD.get_statistics(db)
    
    # 合并数据
    statistics = AssessmentStatistics(
        total_records=record_stats['total_records'],
        assessed_records=record_stats['assessed_records'],
        average_score=assessment_stats['average_score'],
        score_distribution=assessment_stats['score_distribution'],
        common_issues=assessment_stats['common_issues']
    )
    
    return ResponseModel(data=statistics)

