"""Lead import service with Excel parsing and AI tagging."""

from __future__ import annotations

import io
from difflib import get_close_matches
from typing import Any
from uuid import UUID

import pandas as pd
from fastapi import UploadFile
from pydantic import BaseModel, Field
from sqlalchemy.exc import IntegrityError

from ..models.import_batch import ImportBatch, ImportStatus
from ..models.enums import DemandType, SourceChannel
from ..models.lead import Lead
from ..repositories.import_batch_repository import ImportBatchRepository
from ..repositories.import_record_repository import ImportRecordRepository
from ..repositories.lead_repository import LeadRepository
from ..schemas.import_batch import ImportBatchCreate
from ..schemas.import_record import ImportRecordCreate
from ..schemas.lead import LeadCreate
from ..services.ai_tag_engine import HybridTagEngine
from ..services.data_quality_service import (
    normalize_phone,
    normalize_name,
    normalize_area,
    calculate_duplicate_score,
)


class ImportResult(BaseModel):
    """Excel导入结果."""

    batch_id: UUID = Field(..., description="批次ID")
    total_rows: int = Field(..., description="总行数")
    success_count: int = Field(..., description="成功导入数")
    duplicate_count: int = Field(..., description="重复跳过数")
    error_count: int = Field(..., description="错误数")
    errors: list[str] = Field(default_factory=list, description="错误详情")


class LeadImportService:
    """客户导入服务（含Pandas解析 + AI标签提取）."""

    # 列名映射字典（中文 → 英文字段）
    COLUMN_MAPPING = {
        "姓名": "name",
        "客户姓名": "name",
        "名字": "name",
        "手机号": "phone",
        "电话": "phone",
        "手机": "phone",
        "联系方式": "phone",
        "需求类型": "demand_type",
        "需求": "demand_type",
        "类型": "demand_type",
        "预算范围": "budget_range",
        "预算": "budget_range",
        "价格范围": "budget_range",
        "来源渠道": "source_channel",
        "来源": "source_channel",
        "渠道": "source_channel",
        "备注": "notes",
        "说明": "notes",
        "需求说明": "notes",
    }

    # 需求类型映射
    DEMAND_TYPE_MAPPING = {
        "租房-整租": DemandType.RENT_WHOLE,
        "整租": DemandType.RENT_WHOLE,
        "租房-合租": DemandType.RENT_SHARED,
        "合租": DemandType.RENT_SHARED,
        "租房-短租": DemandType.RENT_SHORT,
        "短租": DemandType.RENT_SHORT,
        "买房": DemandType.BUY,
        "购房": DemandType.BUY,
        "卖房": DemandType.SELL,
        "出售": DemandType.SELL,
    }

    def __init__(
        self,
        batch_repo: ImportBatchRepository,
        record_repo: ImportRecordRepository,
        lead_repo: LeadRepository,
        ai_engine: HybridTagEngine | None = None,
        tag_service: Any | None = None,  # TagService（避免循环导入）
    ) -> None:
        self.batch_repo = batch_repo
        self.record_repo = record_repo
        self.lead_repo = lead_repo
        self.ai_engine = ai_engine
        self.tag_service = tag_service

    async def import_from_excel(self, file: UploadFile, owner_id: int) -> ImportResult:
        """
        从Excel导入客户数据（核心功能）.

        流程:
        1. Pandas读取Excel
        2. 智能列映射（模糊匹配）
        3. 数据清洗标准化
        4. 去重检测（手机号）
        5. AI标签提取（备注字段）
        6. 批量入库

        Args:
            file: 上传的Excel文件
            owner_id: 负责人ID

        Returns:
            导入结果统计
        """
        # 创建导入批次
        batch = self.batch_repo.create(
            ImportBatchCreate(
                filename=file.filename or "unknown.xlsx",
                channel_hint=None,
                record_count=0,
                status=ImportStatus.PROCESSING,
                imported_by=owner_id,
            )
        )

        errors: list[str] = []
        success_count = 0

        try:
            # 1. 读取Excel (异步读取)
            # UploadFile.read() is asynchronous in FastAPI
            contents: bytes = await file.read()
            df = pd.read_excel(io.BytesIO(contents))

            # 验证导入数量限制（AI自动化标签最多处理20条）
            if len(df) > 20:
                self.batch_repo.update_status(batch.id, ImportStatus.FAILED)
                return ImportResult(
                    batch_id=batch.id,
                    total_rows=len(df),
                    success_count=0,
                    duplicate_count=0,
                    error_count=1,
                    errors=[
                        f"AI自动化导入最多支持20条记录，当前文件包含{len(df)}条。请拆分文件后重试。"
                    ],
                )

            # 2. 智能列映射
            column_map = self._auto_map_columns(df.columns.tolist())
            if "name" not in column_map or "phone" not in column_map:
                self.batch_repo.update_status(batch.id, ImportStatus.FAILED)
                return ImportResult(
                    batch_id=batch.id,
                    total_rows=len(df),
                    success_count=0,
                    duplicate_count=0,
                    error_count=1,
                    errors=["缺少必填列：姓名、手机号"],
                )

            # 3-6. 逐行处理（写入临时表）
            warnings_count = 0
            duplicates_found = 0

            for idx, row in df.iterrows():
                try:
                    # 提取字段
                    lead_data = self._extract_lead_data(row, column_map)

                    # 标准化处理
                    norm_phone = normalize_phone(lead_data["phone"])
                    norm_name = normalize_name(lead_data["name"])
                    norm_area = normalize_area(lead_data.get("notes", "") or "")

                    # 构建normalized_data
                    normalized_data = {
                        "name": norm_name["normalized"],
                        "name_pinyin": norm_name["pinyin"],
                        "phone": norm_phone,
                        "area": norm_area["normalized"],
                        "demand_type": lead_data["demand_type"].value,
                        "budget_range": lead_data.get("budget_range"),
                        "notes": lead_data.get("notes"),
                    }

                    # AI标签提取
                    ai_tags_data = None
                    if self.ai_engine and lead_data.get("notes"):
                        extraction = self.ai_engine.extract(lead_data["notes"])
                        if extraction.tags:
                            ai_tags_data = {
                                "tags": [
                                    {"name": t.name, "confidence": t.confidence}
                                    for t in extraction.tags
                                ],
                                "method": extraction.method,
                            }

                    # 去重检测（在现有leads表和当前批次中）
                    duplicate_candidates = []
                    confidence_score = 1.0

                    # 优化：使用数据库精确查询替代全表扫描
                    # 只查询phone完全匹配的记录
                    if norm_phone:
                        existing_lead = (
                            self.lead_repo.session.query(Lead)
                            .filter(Lead.phone == norm_phone, Lead.is_deleted == False)
                            .first()
                        )

                        if existing_lead:
                            # 发现重复，计算相似度分数
                            dup_result = calculate_duplicate_score(
                                norm_phone,
                                norm_name["normalized"],
                                str(norm_area["normalized"]),
                                existing_lead.phone,
                                existing_lead.name,
                                "",
                            )
                            duplicate_candidates.append(
                                {
                                    "lead_id": existing_lead.id,
                                    "score": dup_result["score"],
                                    "category": dup_result["category"],
                                    "match_type": "existing_lead",
                                }
                            )
                            confidence_score = min(confidence_score, 1.0 - dup_result["score"])

                    if duplicate_candidates:
                        duplicates_found += 1

                    # 创建ImportRecord（临时表记录）
                    record_create = ImportRecordCreate(
                        import_batch_id=batch.id,
                        original_data=lead_data,
                        normalized_data=normalized_data,
                        ai_tags=ai_tags_data,
                        duplicate_candidates=(
                            {"candidates": duplicate_candidates} if duplicate_candidates else None
                        ),
                        confidence=confidence_score,
                        status="pending",
                        finalized=False,
                    )
                    self.record_repo.create(record_create)
                    success_count += 1

                except (ValueError, KeyError, AttributeError, TypeError) as e:
                    # fix: 确保 idx 转换为 int 后再进行算术运算
                    row_num = int(idx) if isinstance(idx, (int, float)) else 0
                    errors.append(f"第{row_num + 2}行错误: {str(e)}")

            # 更新批次状态和统计信息
            self.batch_repo.update_status(
                batch.id,
                ImportStatus.COMPLETED if success_count > 0 else ImportStatus.FAILED,
                record_count=success_count,
                errors_count=len(errors),
                warnings_count=warnings_count,
                duplicates_count=duplicates_found,
            )

            return ImportResult(
                batch_id=batch.id,
                total_rows=len(df),
                success_count=success_count,
                duplicate_count=duplicates_found,
                error_count=len(errors),
                errors=errors,
            )

        except (pd.errors.ParserError, pd.errors.EmptyDataError, ValueError, IOError) as e:
            self.batch_repo.update_status(batch.id, ImportStatus.FAILED)
            return ImportResult(
                batch_id=batch.id,
                total_rows=0,
                success_count=0,
                duplicate_count=0,
                error_count=1,
                errors=[f"文件解析失败: {str(e)}"],
            )

    def _auto_map_columns(self, columns: list[str]) -> dict[str, str]:
        """
        智能列映射（模糊匹配）.

        优化说明:
        - cutoff从0.6提升至0.8,提高匹配准确率
        - 减少误匹配,避免"说明"被错误映射到"需求"
        """
        column_map: dict[str, str] = {}
        normalized_cols = {col.strip(): col for col in columns}

        for display_name, field_name in self.COLUMN_MAPPING.items():
            # 精确匹配
            if display_name in normalized_cols:
                column_map[field_name] = normalized_cols[display_name]
                continue

            # 模糊匹配 (提高阈值从0.6到0.8)
            matches = get_close_matches(display_name, normalized_cols.keys(), n=1, cutoff=0.8)
            if matches:
                column_map[field_name] = normalized_cols[matches[0]]

        return column_map

    def _extract_lead_data(self, row: pd.Series, column_map: dict[str, str]) -> dict[str, Any]:
        """从Excel行提取并标准化客户数据."""
        data: dict[str, Any] = {}

        # 必填字段
        data["name"] = str(row[column_map["name"]]).strip()
        data["phone"] = str(row[column_map["phone"]]).strip()

        if not data["name"] or not data["phone"]:
            raise ValueError("姓名和手机号不能为空")

        # 需求类型（必填，带映射）
        demand_raw = (
            str(row[column_map.get("demand_type", "")]) if "demand_type" in column_map else ""
        )
        data["demand_type"] = self.DEMAND_TYPE_MAPPING.get(
            demand_raw.strip(),
            DemandType.RENT_WHOLE,  # 默认值
        )

        # 可选字段
        data["budget_range"] = (
            str(row[column_map["budget_range"]]).strip() if "budget_range" in column_map else None
        )
        data["notes"] = str(row[column_map["notes"]]).strip() if "notes" in column_map else None

        # 来源渠道（可选）
        source_raw = (
            str(row[column_map.get("source_channel", "")]) if "source_channel" in column_map else ""
        )
        try:
            data["source_channel"] = SourceChannel(source_raw.lower())
        except ValueError:
            data["source_channel"] = SourceChannel.OTHER

        return data

    # 保留原有方法（向后兼容）
    def start_batch(
        self, user_id: int, filename: str, channel_hint: str | None = None
    ) -> ImportBatch:
        batch = self.batch_repo.create(
            ImportBatchCreate(
                filename=filename,
                channel_hint=channel_hint,
                record_count=0,
                status=ImportStatus.PROCESSING,
                imported_by=user_id,
            )
        )
        return batch

    def complete_batch(self, batch_id: UUID, record_count: int) -> ImportBatch | None:
        return self.batch_repo.update_status(batch_id, ImportStatus.COMPLETED, record_count)

    def fail_batch(self, batch_id: UUID) -> ImportBatch | None:
        return self.batch_repo.update_status(batch_id, ImportStatus.FAILED)

    def bulk_create_leads(self, batch_id: UUID, owner_id: int, leads: list[LeadCreate]) -> int:
        created = 0
        for lc in leads:
            data = lc.model_copy(update={"import_batch_id": batch_id, "owner_id": owner_id})
            if self.lead_repo.exists_by_phone(data.phone):
                continue
            self.lead_repo.create(LeadCreate(**data.model_dump()))
            created += 1
        self.batch_repo.update_status(batch_id, ImportStatus.COMPLETED, created)
        return created

    def finalize_batch(self, batch_id: UUID, owner_id: int) -> dict[str, Any]:
        """
        Finalize批次：将临时表记录写入leads表.

        流程：
        1. 获取批次所有pending状态的记录
        2. 逐条写入leads表
        3. 更新import_record.finalized = True
        4. 更新批次状态为COMPLETED

        Args:
            batch_id: 批次ID
            owner_id: 负责人ID

        Returns:
            dict: {"finalized_count": int, "skipped_count": int}
        """
        # 获取pending记录
        pending_records = self.record_repo.get_pending_records(batch_id)

        finalized_count = 0
        skipped_count = 0

        for record in pending_records:
            try:
                # 从normalized_data中提取字段
                norm_data = record.normalized_data
                phone = norm_data.get("phone", "")

                # 检查重复（最终检查）
                if self.lead_repo.exists_by_phone(phone):
                    skipped_count += 1
                    record.status = "skipped"
                    continue

                # 从备注中匹配标签
                notes = norm_data.get("notes", "")
                matched_tags = []
                tag_names_str = ""

                if self.tag_service and notes:
                    try:
                        # 使用TagService匹配标签
                        matched_tags = self.tag_service.match_tags_from_text(notes, max_tags=4)
                        # 构建tag_names字符串
                        if matched_tags:
                            tag_names_str = ",".join([tag["tag_name"] for tag in matched_tags])
                    except Exception as tag_err:
                        print(f"标签匹配失败: {str(tag_err)}")

                # 创建Lead
                lead_create = LeadCreate(
                    name=norm_data["name"],
                    phone=phone,
                    demand_type=DemandType(norm_data["demand_type"]),
                    budget_range=norm_data.get("budget_range"),
                    notes=notes,
                    tag_names=tag_names_str or None,  # 设置tag_names字段
                    source_channel=SourceChannel.OTHER,
                    import_batch_id=batch_id,
                    owner_id=owner_id,
                )
                try:
                    created_lead = self.lead_repo.create(lead_create)
                except IntegrityError:
                    # 数据库唯一约束保护（并发场景）
                    skipped_count += 1
                    record.status = "skipped"
                    record.ai_extraction_error = "手机号已存在（并发冲突）"
                    continue

                # 创建LeadTag关联
                if self.tag_service and matched_tags:
                    try:
                        self.tag_service.attach_tags_to_lead(
                            lead_id=created_lead.id,
                            matched_tags=matched_tags,
                            session=self.lead_repo.session,
                        )
                    except Exception as tag_err:
                        print(f"创建LeadTag关联失败: {str(tag_err)}")

                # 更新记录状态
                record.finalized = True
                record.finalized_lead_id = created_lead.id
                record.status = "confirmed"
                finalized_count += 1

            except (ImportError, AttributeError, RuntimeError) as e:
                record.status = "error"
                record.review_notes = f"Finalize失败: {str(e)}"
                skipped_count += 1

        # 更新批次状态
        self.batch_repo.update_status(
            batch_id, ImportStatus.COMPLETED, record_count=finalized_count
        )

        return {"finalized_count": finalized_count, "skipped_count": skipped_count}
