"""
====================================================================
数据迁移工具 / Data Migration Utility
====================================================================
功能说明：
1. 将旧的 equipment_analysis 数据迁移到新集合
2. 支持双写模式验证
3. 提供数据一致性检查
4. 支持增量迁移和全量迁移

使用方法：
    # 预览迁移
    python -c "from utils.migration_util import preview_migration; preview_migration()"

    # 执行迁移
    python -c "from utils.migration_util import migrate_all_data; migrate_all_data()"

    # 验证数据一致性
    python -c "from utils.migration_util import verify_consistency; verify_consistency()"

作者: Data Analysis Team
日期: 2025-11-06
====================================================================
"""

import json
from typing import Dict, List, Any, Tuple
from datetime import datetime
import pandas as pd
import numpy as np
from utils.mongo_util import db
from dao.mongo_dao.equipment_score_dao import (
    EquipmentScoreDAO,
    AggregatedScoreDAO,
    WeeklyStatsDAO,
    AnalysisReportDAO,
    LegacyAnalysisDAO,
)

# 颜色输出
class Colors:
    GREEN = '\033[92m'
    YELLOW = '\033[93m'
    RED = '\033[91m'
    BLUE = '\033[94m'
    END = '\033[0m'


def preview_migration(limit: int = 5) -> None:
    """
    预览迁移：显示旧数据的结构和数量

    参数:
        limit: 预览的文档数量
    """
    print(f"\n{Colors.BLUE}{'='*70}")
    print("数据迁移预览")
    print(f"{'='*70}{Colors.END}\n")

    # 统计旧数据
    legacy_count = LegacyAnalysisDAO.count_legacy_data()
    print(f"旧集合 (equipment_analysis) 中的数据:")
    print(f"  总文档数: {legacy_count}")

    if legacy_count == 0:
        print(f"  {Colors.YELLOW}⚠{Colors.END} 没有旧数据需要迁移")
        return

    # 获取样本数据
    legacy_docs = db["equipment_analysis"].find({}).limit(limit)

    for i, doc in enumerate(legacy_docs, 1):
        print(f"\n  [{i}] 文档样本:")
        print(f"    memberId: {doc.get('memberId')}")
        print(f"    时间范围: {doc.get('time_range', {})}")
        print(f"    包含数据:")

        if doc.get('monthly_scores'):
            print(f"      - monthly_scores: {len(doc['monthly_scores'])} 条设备评分")
        if doc.get('weekly_summary'):
            print(f"      - weekly_summary: {len(doc['weekly_summary'])} 条周度统计")
        if doc.get('region_scores'):
            print(f"      - region_scores: {len(doc['region_scores'])} 个区域")
        if doc.get('project_scores'):
            print(f"      - project_scores: {len(doc['project_scores'])} 个项目")
        if doc.get('ai_result'):
            print(f"      - ai_result: {len(doc['ai_result'])} 字符")

    print(f"\n{Colors.BLUE}{'='*70}{Colors.END}\n")


def migrate_single_document(legacy_doc: Dict[str, Any]) -> Tuple[bool, int, str]:
    """
    迁移单个旧文档到新集合

    参数:
        legacy_doc: 旧格式的文档

    返回:
        (成功与否, 迁移的记录数, 错误信息)
    """
    member_id = legacy_doc.get("memberId")
    time_range = legacy_doc.get("time_range", {})
    period_start = time_range.get("start")
    period_end = time_range.get("end")

    if not period_start or not period_end:
        return False, 0, "时间范围缺失"

    try:
        migrated_count = 0

        # ================================================================
        # 1. 迁移设备评分明细
        # ================================================================
        detail_records = []
        for score_record in legacy_doc.get("monthly_scores", []):
            detail_record = {
                "memberId": member_id,
                "imei": score_record.get("imei"),
                "region_name": score_record.get("region_name"),
                "region_id": score_record.get("region_id"),
                "project_name": score_record.get("project_name"),
                "project_id": score_record.get("project_id"),
                "equipment_type": score_record.get("type"),
                "period_type": "monthly",
                "period_start": period_start,
                "period_end": period_end,
                "year": period_start.year if isinstance(period_start, datetime) else int(str(period_start)[:4]),
                "month": period_start.month if isinstance(period_start, datetime) else 1,
                "stats": {
                    "规则标准类型": score_record.get("规则标准类型"),
                    "达标天数": score_record.get("达标天数"),
                    "达标时长": score_record.get("达标时长"),
                    "实际统计天数": score_record.get("实际统计天数"),
                    "正常天数": score_record.get("正常天数"),
                    "总工作时间": score_record.get("总工作时间"),
                    "未进场天数": score_record.get("未进场天数"),
                    "维修停机天数": score_record.get("维修停机天数"),
                    "维修天数": score_record.get("维修天数"),
                    "出勤日运行时间均值": score_record.get("出勤日运行时间均值"),
                    "所有运行时间均值": score_record.get("所有运行时间均值"),
                    "运行时间标准差": score_record.get("运行时间标准差"),
                    "一致性": score_record.get("一致性"),
                    "出勤天数": score_record.get("出勤天数"),
                    "达标率": score_record.get("达标率"),
                    "可工作天数": score_record.get("可工作天数"),
                    "可工作率": score_record.get("可工作率"),
                    "总时长达标率": score_record.get("总时长达标率"),
                },
                "scores": {
                    "A_可用性": score_record.get("A_可用性", 0),
                    "C_出勤覆盖": score_record.get("C_出勤覆盖", 0),
                    "U_强度利用": score_record.get("U_强度利用", 0),
                    "S_一致性": score_record.get("S_一致性", 0),
                    "P_合规达标": score_record.get("P_合规达标", 0),
                    "R_可靠性": score_record.get("R_可靠性", 0),
                    "综合分": score_record.get("综合分", 0),
                },
                "created_at": datetime.utcnow(),
                "updated_at": datetime.utcnow(),
                "version": 1,
            }
            detail_records.append(detail_record)

        if detail_records:
            EquipmentScoreDAO.batch_insert_scores(detail_records)
            migrated_count += len(detail_records)

        # ================================================================
        # 2. 迁移周度统计
        # ================================================================
        weekly_records = []
        for week_record in legacy_doc.get("weekly_summary", []):
            try:
                week_start = week_record.get("week_period")
                if isinstance(week_start, str):
                    # 尝试解析周期字符串
                    from dateutil.parser import parse as parse_date
                    week_start = parse_date(week_start)

                weekly_record = {
                    "memberId": member_id,
                    "imei": week_record.get("imei"),
                    "region_name": week_record.get("region_name"),
                    "project_name": week_record.get("project_name"),
                    "week_period": week_record.get("week_period"),
                    "week_start": week_start if isinstance(week_start, datetime) else period_start,
                    "week_end": week_start if isinstance(week_start, datetime) else period_start,
                    "year": week_start.year if isinstance(week_start, datetime) else period_start.year,
                    "week": 1,
                    "stats": {
                        "met_days": week_record.get("met_days", 0),
                        "counted_days": week_record.get("counted_days", 0),
                        "frequency": week_record.get("frequency", 0),
                        "is_week_met": week_record.get("is_week_met", 0),
                    },
                    "created_at": datetime.utcnow(),
                }
                weekly_records.append(weekly_record)
            except Exception as e:
                print(f"    {Colors.YELLOW}⚠{Colors.END} 周度数据解析失败: {e}")

        if weekly_records:
            WeeklyStatsDAO.batch_insert_weekly_stats(weekly_records)
            migrated_count += len(weekly_records)

        # ================================================================
        # 3. 迁移AI报告
        # ================================================================
        if legacy_doc.get("ai_result"):
            # 计算报告摘要
            monthly_scores = legacy_doc.get("monthly_scores", [])
            scores_list = [s.get("综合分", 0) for s in monthly_scores]
            high_risk = len([s for s in scores_list if s < 60])

            report_data = {
                "memberId": member_id,
                "report_type": "equipment_analysis",
                "period_start": period_start,
                "period_end": period_end,
                "scope": {
                    "level": "company",
                    "region_ids": list(set([s.get("region_name") for s in monthly_scores if s.get("region_name")])),
                    "project_ids": list(set([s.get("project_name") for s in monthly_scores if s.get("project_name")])),
                },
                "ai_result": legacy_doc.get("ai_result"),
                "model": "gpt-4o",
                "summary": {
                    "total_equipment": len(monthly_scores),
                    "avg_score": float(np.mean(scores_list)) if scores_list else 0,
                    "high_risk_count": high_risk,
                },
                "created_at": datetime.utcnow(),
                "ttl": datetime.utcnow() + pd.Timedelta(days=90),
            }
            AnalysisReportDAO.insert_report(report_data)
            migrated_count += 1

        return True, migrated_count, ""

    except Exception as e:
        return False, 0, str(e)


def migrate_all_data(dry_run: bool = False) -> None:
    """
    执行全量迁移

    参数:
        dry_run: 是否为模拟运行
    """
    print(f"\n{Colors.BLUE}{'='*70}")
    print("执行数据迁移")
    print(f"{'='*70}{Colors.END}\n")

    legacy_docs = LegacyAnalysisDAO.get_all_legacy()
    total_docs = len(legacy_docs)
    total_migrated = 0
    success_count = 0

    if total_docs == 0:
        print(f"{Colors.YELLOW}⚠{Colors.END} 没有旧数据需要迁移")
        return

    print(f"开始迁移 {total_docs} 个文档...\n")

    for i, legacy_doc in enumerate(legacy_docs, 1):
        success, count, error = migrate_single_document(legacy_doc)

        if success:
            total_migrated += count
            success_count += 1
            status = f"{Colors.GREEN}✓{Colors.END}"
        else:
            status = f"{Colors.RED}✗{Colors.END}"

        print(f"  [{i}/{total_docs}] {status} memberId={legacy_doc.get('memberId')}: "
              f"迁移 {count} 条记录" + (f" (错误: {error})" if error else ""))

    print(f"\n{Colors.BLUE}{'='*70}")
    print(f"迁移完成: {success_count}/{total_docs} 个文档成功, 共迁移 {total_migrated} 条记录")
    print(f"{'='*70}{Colors.END}\n")

    if dry_run:
        print(f"{Colors.YELLOW}ℹ{Colors.END} 这是模拟运行，数据尚未实际写入")


def verify_consistency() -> bool:
    """
    验证数据一致性

    返回:
        bool: 所有检查是否都通过
    """
    print(f"\n{Colors.BLUE}{'='*70}")
    print("数据一致性验证")
    print(f"{'='*70}{Colors.END}\n")

    all_pass = True

    # 检查1：新集合是否已创建
    print("检查1: 新集合是否已创建")
    collections = db.list_collection_names()
    required_collections = [
        "equipment_scores_detail",
        "equipment_scores_aggregated",
        "equipment_weekly_stats",
        "analysis_reports",
    ]

    for coll_name in required_collections:
        if coll_name in collections:
            count = db[coll_name].count_documents({})
            print(f"  {Colors.GREEN}✓{Colors.END} {coll_name}: {count} 条文档")
        else:
            print(f"  {Colors.RED}✗{Colors.END} {coll_name}: 不存在")
            all_pass = False

    # 检查2：旧集合数据是否已迁移
    print("\n检查2: 旧集合数据是否已迁移")
    legacy_count = LegacyAnalysisDAO.count_legacy_data()
    detail_count = EquipmentScoreDAO.count_by_member(None) if hasattr(EquipmentScoreDAO, 'count_by_member') else db["equipment_scores_detail"].count_documents({})

    print(f"  旧集合 equipment_analysis: {legacy_count} 条文档")
    print(f"  新集合 equipment_scores_detail: {detail_count} 条文档")

    if legacy_count > 0 and detail_count == 0:
        print(f"  {Colors.RED}✗{Colors.END} 旧数据未迁移")
        all_pass = False
    elif detail_count > 0:
        print(f"  {Colors.GREEN}✓{Colors.END} 新数据已写入")

    # 检查3: 索引是否已创建
    print("\n检查3: 是否已创建必要的索引")
    for coll_name in required_collections:
        if coll_name not in collections:
            continue

        collection = db[coll_name]
        indices = list(collection.list_indexes())
        # 减1是因为 _id 是默认索引
        index_count = len(indices) - 1

        if index_count > 0:
            print(f"  {Colors.GREEN}✓{Colors.END} {coll_name}: {index_count} 个自定义索引")
        else:
            print(f"  {Colors.YELLOW}⚠{Colors.END} {coll_name}: 无自定义索引（仅有_id索引）")

    print(f"\n{Colors.BLUE}{'='*70}")
    if all_pass:
        print(f"{Colors.GREEN}✓ 所有检查通过！数据迁移成功{Colors.END}")
    else:
        print(f"{Colors.YELLOW}⚠ 某些检查未通过，请查看上述输出{Colors.END}")
    print(f"{'='*70}{Colors.END}\n")

    return all_pass


# ====================================================================
# 命令行工具
# ====================================================================

if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(description="数据迁移工具")
    parser.add_argument("--preview", action="store_true", help="预览迁移")
    parser.add_argument("--migrate", action="store_true", help="执行迁移")
    parser.add_argument("--verify", action="store_true", help="验证数据一致性")
    parser.add_argument("--dry-run", action="store_true", help="模拟运行（不写入数据）")

    args = parser.parse_args()

    if args.preview:
        preview_migration()
    elif args.migrate:
        migrate_all_data(dry_run=args.dry_run)
    elif args.verify:
        verify_consistency()
    else:
        parser.print_help()
