﻿"""Lead import endpoints."""

from __future__ import annotations

from typing import Any
from uuid import UUID

from fastapi import APIRouter, Depends, HTTPException, UploadFile, status
from fastapi.responses import JSONResponse
from sqlalchemy.orm import Session

from ...deps import (
    get_current_user,
    get_import_service,
    get_db,
)
from ...utils import _ok
from ....models.import_batch import ImportStatus
from ....models.user import User
from ....repositories.import_batch_repository import ImportBatchRepository
from ....repositories.import_record_repository import ImportRecordRepository
from ....schemas.import_batch import ImportBatchOut
from ....schemas.import_record import ImportRecordOut, ImportRecordUpdate
from ....schemas.lead import LeadCreate
from ....services.lead_import_service import LeadImportService
from pydantic import BaseModel

router = APIRouter(tags=["imports"])


@router.post("/", response_model=ImportBatchOut, status_code=status.HTTP_202_ACCEPTED)
def start_import(
    file: UploadFile,
    channel_hint: str | None = None,
    import_service: LeadImportService = Depends(get_import_service),
    current_user: User = Depends(get_current_user),
    session: Session = Depends(get_db),
) -> JSONResponse:
    """旧版导入接口（向后兼容）."""
    filename = file.filename or "uploaded_import"
    batch = import_service.start_batch(current_user.id, filename, channel_hint)
    session.commit()
    return _ok(
        ImportBatchOut.model_validate(batch), "Import batch started", status.HTTP_202_ACCEPTED
    )


@router.post("/excel", status_code=status.HTTP_201_CREATED)
async def import_leads_from_excel(
    file: UploadFile,
    import_service: LeadImportService = Depends(get_import_service),
    current_user: User = Depends(get_current_user),
    session: Session = Depends(get_db),
) -> JSONResponse:
    """
    客户Excel智能导入（核心功能）.

    支持的Excel列（带智能映射）：
    - 姓名*（必填）
    - 手机号*（必填）
    - 需求类型（可选，默认: 整租）
    - 预算范围（可选）
    - 来源渠道（可选）
    - 备注（可选，AI将从此字段提取标签）

    示例Excel:
    | 姓名   | 手机号       | 需求类型 | 预算范围  | 备注                  |
    |--------|--------------|----------|-----------|---------------------|
    | 王先生 | 13800138000  | 租房-整租| 4000-6000 | 需要养宠物，靠近地铁 |
    """
    # 验证文件类型
    if not file.filename or not file.filename.endswith((".xlsx", ".xls")):
        raise HTTPException(
            status_code=status.HTTP_400_BAD_REQUEST, detail="仅支持Excel文件（.xlsx/.xls）"
        )

    try:
        # 调用导入服务（在事务中执行）
        result = await import_service.import_from_excel(file, current_user.id)

        # 只有在成功导入至少1条记录时才提交事务
        if result.success_count > 0:
            session.commit()
        else:
            # 全部失败则回滚
            session.rollback()

        return _ok(
            {
                "batch_id": str(result.batch_id),
                "total_rows": result.total_rows,
                "success_count": result.success_count,
                "duplicate_count": result.duplicate_count,
                "error_count": result.error_count,
                "errors": result.errors[:10],  # 最多返回前10个错误
            },
            f"导入完成：成功{result.success_count}条，重复{result.duplicate_count}条，失败{result.error_count}条",
            status.HTTP_201_CREATED,
        )

    except Exception as e:
        # 发生未预期异常时回滚所有操作
        session.rollback()
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"导入过程发生严重错误: {str(e)}",
        )


@router.post("/{batch_id}/complete")
def complete_import(
    batch_id: UUID,
    import_service: LeadImportService = Depends(get_import_service),
    session: Session = Depends(get_db),
) -> JSONResponse:
    batch = import_service.complete_batch(batch_id, 0)
    if batch is None:
        raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="导入批次不存在")
    session.commit()
    return _ok(ImportBatchOut.model_validate(batch), "Import batch completed")


@router.post("/{batch_id}/fail")
def fail_import(
    batch_id: UUID,
    import_service: LeadImportService = Depends(get_import_service),
    session: Session = Depends(get_db),
) -> JSONResponse:
    batch = import_service.fail_batch(batch_id)
    if batch is None:
        raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="导入批次不存在")
    session.commit()
    return _ok(ImportBatchOut.model_validate(batch), "Import batch failed")


@router.post("/{batch_id}/leads", status_code=status.HTTP_201_CREATED)
def bulk_import_leads(
    batch_id: UUID,
    payload: list[LeadCreate],
    import_service: LeadImportService = Depends(get_import_service),
    current_user: User = Depends(get_current_user),
    session: Session = Depends(get_db),
) -> JSONResponse:
    created = import_service.bulk_create_leads(batch_id, current_user.id, payload)
    session.commit()
    return _ok({"created": created}, "Leads imported", status.HTTP_201_CREATED)


@router.get("/history", response_model=list[dict[str, Any]])
def get_import_history(
    skip: int = 0,
    limit: int = 50,
    import_service: LeadImportService = Depends(get_import_service),
    current_user: User = Depends(get_current_user),
) -> JSONResponse:
    """
    获取导入历史列表.

    返回格式：
    [
        {
            "filename": "客户数据_20241004.xlsx",
            "type": "客户",
            "total": 150,
            "success": 145,
            "failed": 5,
            "import_time": "2024-10-04 10:30:25",
            "status": "completed"
        }
    ]
    """
    # 获取导入批次 - 管理员可以查看所有记录，普通用户只能查看自己的
    if current_user.is_admin:
        # 管理员查看所有记录
        batches = import_service.batch_repo.get_all(skip=skip, limit=limit)
    else:
        # 普通用户只查看自己的记录
        batches = import_service.batch_repo.get_by_user(current_user.id, skip=skip, limit=limit)

    # 按创建时间倒序排列
    batches = sorted(batches, key=lambda x: x.created_at, reverse=True)

    history = []
    for batch in batches:
        # 计算成功和失败数量
        success_count = batch.record_count - batch.errors_count
        failed_count = batch.errors_count

        # 确定类型（根据channel_hint或文件名）
        import_type = "客户"  # 默认
        if batch.channel_hint:
            if "property" in batch.channel_hint.lower():
                import_type = "房源"
            elif "performance" in batch.channel_hint.lower():
                import_type = "绩效"
        elif batch.filename:
            if "房源" in batch.filename or "property" in batch.filename.lower():
                import_type = "房源"
            elif "绩效" in batch.filename or "performance" in batch.filename.lower():
                import_type = "绩效"

        # 格式化时间
        import_time = batch.created_at.strftime("%Y-%m-%d %H:%M:%S")

        history.append(
            {
                "filename": batch.filename,
                "type": import_type,
                "total": batch.record_count,
                "success": success_count,
                "failed": failed_count,
                "import_time": import_time,
                "status": batch.status.value,
                "batch_id": str(batch.id),
            }
        )

    return _ok(history, "导入历史获取成功")


@router.get("/{batch_id}", response_model=ImportBatchOut)
def get_batch(
    batch_id: UUID,
    import_service: LeadImportService = Depends(get_import_service),
    _session: Session = Depends(get_db),
) -> JSONResponse:
    repo = import_service.batch_repo
    batch = repo.get(batch_id)
    if batch is None:
        raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="导入批次不存在")
    return _ok(ImportBatchOut.model_validate(batch))


@router.get("/", response_model=list[ImportBatchOut])
def list_batches(
    status_filter: ImportStatus | None = None,
    import_service: LeadImportService = Depends(get_import_service),
    current_user: User = Depends(get_current_user),
) -> JSONResponse:
    if status_filter is not None:
        batches = import_service.batch_repo.get_by_status(status_filter)
    else:
        batches = import_service.batch_repo.get_by_user(current_user.id)
    payload = [ImportBatchOut.model_validate(item) for item in batches]
    return _ok(payload)


@router.get("/{batch_id}/records", response_model=list[ImportRecordOut])
def get_batch_records(
    batch_id: UUID,
    skip: int = 0,
    limit: int = 100,
    status_filter: str | None = None,
    session: Session = Depends(get_db),
    _current_user: User = Depends(get_current_user),
) -> JSONResponse:
    """
    获取批次的导入记录列表（用于审核页面）.

    Args:
        batch_id: 批次ID
        skip: 跳过记录数
        limit: 返回记录数（最大100）
        status_filter: 状态过滤 (pending/confirmed/merged/rejected)
        session: 数据库会话
        current_user: 当前用户

    Returns:
        ImportRecord列表
    """
    record_repo = ImportRecordRepository(session)

    # 获取记录
    if status_filter:
        records = record_repo.get_by_status(batch_id, status_filter)
    else:
        records = record_repo.get_by_batch(batch_id, skip, limit)

    payload = [ImportRecordOut.model_validate(record) for record in records]
    return _ok(payload)


@router.post("/{batch_id}/finalize", status_code=status.HTTP_200_OK)
def finalize_batch(
    batch_id: UUID,
    import_service: LeadImportService = Depends(get_import_service),
    current_user: User = Depends(get_current_user),
    session: Session = Depends(get_db),
) -> JSONResponse:
    """
    Finalize批次：将临时表记录写入leads表.

    流程：
    1. 获取批次所有pending状态的记录
    2. 逐条写入leads表
    3. 更新记录finalized状态
    4. 返回统计信息

    Args:
        batch_id: 批次ID
        import_service: 导入服务
        current_user: 当前用户
        session: 数据库会话

    Returns:
        {"finalized_count": int, "skipped_count": int}
    """
    # 验证批次存在
    batch = import_service.batch_repo.get(batch_id)
    if not batch:
        raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="导入批次不存在")

    # 执行Finalize
    result = import_service.finalize_batch(batch_id, current_user.id)
    session.commit()

    return _ok(
        result,
        f"Finalize完成：已写入{result['finalized_count']}条，跳过{result['skipped_count']}条",
        status.HTTP_200_OK,
    )


@router.patch("/records/{record_id}", response_model=ImportRecordOut)
def update_import_record(
    record_id: int,
    payload: ImportRecordUpdate,
    session: Session = Depends(get_db),
    _current_user: User = Depends(get_current_user),
) -> JSONResponse:
    """
    更新单条导入记录（用于人工编辑）.

    可更新字段：
    - normalized_data: 标准化数据
    - status: 状态
    - review_notes: 审核备注

    Args:
        record_id: 记录ID
        payload: 更新数据
        session: 数据库会话
        current_user: 当前用户

    Returns:
        更新后的ImportRecord
    """
    record_repo = ImportRecordRepository(session)
    record = record_repo.get(record_id)

    if not record:
        raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="记录不存在")

    # 更新记录
    updated_record = record_repo.update(record, payload)
    session.commit()

    return _ok(ImportRecordOut.model_validate(updated_record), "记录已更新")


class BulkActionRequest(BaseModel):
    """批量操作请求."""

    record_ids: list[int]
    action: str  # "confirm" | "reject" | "merge"


@router.post("/{batch_id}/records/bulk-action", status_code=status.HTTP_200_OK)
def bulk_action_records(
    _batch_id: UUID,
    payload: BulkActionRequest,
    session: Session = Depends(get_db),
    _current_user: User = Depends(get_current_user),
) -> JSONResponse:
    """
    批量操作导入记录.

    支持的操作：
    - confirm: 批量确认（设置status=confirmed）
    - reject: 批量拒绝（设置status=rejected）

    Args:
        batch_id: 批次ID
        payload: 批量操作请求
        session: 数据库会话
        current_user: 当前用户

    Returns:
        {"affected_count": int}
    """
    record_repo = ImportRecordRepository(session)

    # 验证批量操作数量限制（最大20个）
    if len(payload.record_ids) > 20:
        raise HTTPException(
            status_code=status.HTTP_400_BAD_REQUEST,
            detail=f"批量操作最多支持20条记录，当前选择了{len(payload.record_ids)}条。请减少选择数量后重试。",
        )

    # 验证action
    if payload.action not in ["confirm", "reject"]:
        raise HTTPException(
            status_code=status.HTTP_400_BAD_REQUEST, detail="无效的操作类型，支持: confirm, reject"
        )

    # 映射action到status
    status_map = {
        "confirm": "confirmed",
        "reject": "rejected",
    }
    new_status = status_map[payload.action]

    # 批量更新
    affected_count = record_repo.bulk_update_status(payload.record_ids, new_status)
    session.commit()

    return _ok(
        {"affected_count": affected_count}, f"批量{payload.action}完成：影响{affected_count}条记录"
    )


@router.post("/{batch_id}/extract-tags", status_code=status.HTTP_200_OK)
def batch_extract_ai_tags(
    batch_id: UUID,
    import_service: LeadImportService = Depends(get_import_service),
    session: Session = Depends(get_db),
    _current_user: User = Depends(get_current_user),
) -> JSONResponse:
    """
    批量AI标签提取（独立操作）.

    前端点击按钮触发，对指定批次的所有记录进行AI标签提取。

    Args:
        batch_id: 导入批次ID
        import_service: 导入服务（包含AI引擎）
        session: 数据库会话
        current_user: 当前用户

    Returns:
        {"processed": int, "success": int, "failed": int}
    """
    # 验证批次存在
    batch_repo = ImportBatchRepository(session)
    batch = batch_repo.get(batch_id)
    if not batch:
        raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="导入批次不存在")

    # 获取该批次的所有记录
    record_repo = ImportRecordRepository(session)
    records = record_repo.get_by_batch(batch_id)

    if not records:
        return _ok({"processed": 0, "success": 0, "failed": 0}, "批次中无记录")

    # 批量AI提取
    processed = 0
    success_count = 0
    failed_count = 0

    for record in records:
        processed += 1

        # 从original_data中提取备注
        notes = record.original_data.get("notes", "") if record.original_data else ""

        if not notes or not notes.strip():
            record.ai_extraction_attempts += 1
            record.ai_extraction_error = "备注为空，无法提取标签"
            failed_count += 1
            continue

        # 调用AI引擎提取标签
        if import_service.ai_engine:
            try:
                record.ai_extraction_attempts += 1
                extraction = import_service.ai_engine.extract(notes)
                if extraction.tags:
                    ai_tags_data = {
                        "tags": [
                            {"name": t.name, "confidence": t.confidence} for t in extraction.tags
                        ],
                        "method": extraction.method,
                    }

                    # 更新记录的ai_tags字段
                    record.ai_tags = ai_tags_data
                    record.ai_extraction_error = None  # 清除之前的错误
                    success_count += 1
                else:
                    record.ai_extraction_error = "AI未返回任何标签"
                    failed_count += 1
            except Exception as e:
                error_message = f"AI提取异常: {type(e).__name__} - {str(e)}"
                record.ai_extraction_error = error_message
                print(f"AI提取失败 record_id={record.id}: {error_message}")
                failed_count += 1
        else:
            record.ai_extraction_attempts += 1
            record.ai_extraction_error = "AI引擎未初始化（缺少API密钥配置）"
            failed_count += 1

    session.commit()

    return _ok(
        {"processed": processed, "success": success_count, "failed": failed_count},
        f"批量AI提取完成: 成功{success_count}条，失败{failed_count}条",
    )


@router.get("/{batch_id}/statistics", status_code=status.HTTP_200_OK)
def get_batch_statistics(
    batch_id: UUID,
    session: Session = Depends(get_db),
    _current_user: User = Depends(get_current_user),
) -> JSONResponse:
    """
    获取批次统计详情.

    返回：
    - 各状态记录数
    - errors_count, warnings_count, duplicates_count
    - 平均置信度

    Args:
        batch_id: 批次ID
        session: 数据库会话
        current_user: 当前用户

    Returns:
        统计信息字典
    """
    batch_repo = ImportBatchRepository(session)
    record_repo = ImportRecordRepository(session)

    # 获取批次信息
    batch = batch_repo.get(batch_id)
    if not batch:
        raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="导入批次不存在")

    # 统计各状态记录数
    pending_count = record_repo.count_by_status(batch_id, "pending")
    confirmed_count = record_repo.count_by_status(batch_id, "confirmed")
    rejected_count = record_repo.count_by_status(batch_id, "rejected")
    error_count = record_repo.count_by_status(batch_id, "error")

    # 计算平均置信度
    records = record_repo.get_by_batch(batch_id, skip=0, limit=1000)
    avg_confidence = sum(r.confidence for r in records) / len(records) if records else 0.0

    statistics = {
        "batch_id": str(batch_id),
        "total_records": batch.record_count,
        "status_breakdown": {
            "pending": pending_count,
            "confirmed": confirmed_count,
            "rejected": rejected_count,
            "error": error_count,
        },
        "errors_count": batch.errors_count,
        "warnings_count": batch.warnings_count,
        "duplicates_count": batch.duplicates_count,
        "average_confidence": round(avg_confidence, 3),
    }

    return _ok(statistics, "统计信息获取成功")
