#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
目录团队分配质量检查工具

用于检查和验证目录团队分配的质量和准确性。
"""

import os
import sqlite3
import logging
from typing import Dict, List, Tuple
from collections import defaultdict

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
logger = logging.getLogger(__name__)

class DirectoryAssignmentQualityChecker:
    """目录团队分配质量检查器"""

    def __init__(self, db_path: str = None):
        """初始化检查器"""
        if db_path is None:
            # 默认数据库路径
            current_dir = os.path.dirname(os.path.abspath(__file__))
            project_root = os.path.dirname(os.path.dirname(current_dir))
            db_path = os.path.join(project_root, 'backend', 'data', 'uma_insight.db')

        self.db_path = db_path

    def connect_database(self) -> sqlite3.Connection:
        """连接数据库"""
        try:
            conn = sqlite3.connect(self.db_path)
            conn.row_factory = sqlite3.Row
            return conn
        except Exception as e:
            logger.error(f"连接数据库失败: {e}")
            raise

    def get_assignment_statistics(self) -> Dict:
        """获取分配统计信息"""
        conn = self.connect_database()
        try:
            cursor = conn.cursor()

            # 总体统计
            cursor.execute("SELECT COUNT(*) as total FROM directories")
            total_dirs = cursor.fetchone()['total']

            cursor.execute("SELECT COUNT(*) as assigned FROM directories WHERE team_id IS NOT NULL")
            assigned_dirs = cursor.fetchone()['assigned']

            coverage_rate = (assigned_dirs / total_dirs * 100) if total_dirs > 0 else 0

            # 平均置信度
            cursor.execute("""
                SELECT AVG(assignment_confidence) as avg_confidence
                FROM directories
                WHERE team_id IS NOT NULL AND assignment_confidence > 0
            """)
            avg_confidence = cursor.fetchone()['avg_confidence'] or 0

            # 置信度分布
            cursor.execute("""
                SELECT
                    CASE
                        WHEN assignment_confidence >= 90 THEN '高 (90-100)'
                        WHEN assignment_confidence >= 70 THEN '中 (70-89)'
                        WHEN assignment_confidence >= 50 THEN '低 (50-69)'
                        ELSE '极低 (0-49)'
                    END as confidence_level,
                    COUNT(*) as count
                FROM directories
                WHERE team_id IS NOT NULL
                GROUP BY confidence_level
                ORDER BY count DESC
            """)
            confidence_distribution = dict(cursor.fetchall())

            return {
                'total_directories': total_dirs,
                'assigned_directories': assigned_dirs,
                'unassigned_directories': total_dirs - assigned_dirs,
                'coverage_rate': coverage_rate,
                'average_confidence': round(avg_confidence, 1),
                'confidence_distribution': confidence_distribution
            }

        finally:
            conn.close()

    def get_team_assignment_stats(self) -> List[Dict]:
        """获取团队分配统计"""
        conn = self.connect_database()
        try:
            cursor = conn.cursor()
            cursor.execute("""
                SELECT
                    t.name as team_name,
                    COUNT(d.id) as directory_count,
                    AVG(d.assignment_confidence) as avg_confidence,
                    MIN(d.assignment_confidence) as min_confidence,
                    MAX(d.assignment_confidence) as max_confidence
                FROM directories d
                JOIN teams t ON d.team_id = t.id
                GROUP BY t.id, t.name
                ORDER BY directory_count DESC
            """)

            team_stats = []
            for row in cursor.fetchall():
                team_stats.append({
                    'team_name': row['team_name'],
                    'directory_count': row['directory_count'],
                    'avg_confidence': round(row['avg_confidence'], 1) if row['avg_confidence'] else 0,
                    'min_confidence': row['min_confidence'],
                    'max_confidence': row['max_confidence']
                })

            return team_stats

        finally:
            conn.close()

    def get_low_confidence_assignments(self, threshold: int = 50) -> List[Dict]:
        """获取低置信度分配"""
        conn = self.connect_database()
        try:
            cursor = conn.cursor()
            cursor.execute("""
                SELECT
                    d.path,
                    d.name,
                    d.assignment_confidence,
                    d.assignment_method,
                    t.name as team_name
                FROM directories d
                JOIN teams t ON d.team_id = t.id
                WHERE d.assignment_confidence < ?
                ORDER BY d.assignment_confidence ASC
                LIMIT 20
            """, (threshold,))

            low_confidence = []
            for row in cursor.fetchall():
                low_confidence.append({
                    'path': row['path'],
                    'name': row['name'],
                    'confidence': row['assignment_confidence'],
                    'method': row['assignment_method'],
                    'team_name': row['team_name']
                })

            return low_confidence

        finally:
            conn.close()

    def get_unassigned_directories(self) -> List[Dict]:
        """获取未分配的目录"""
        conn = self.connect_database()
        try:
            cursor = conn.cursor()
            cursor.execute("""
                SELECT
                    path,
                    name,
                    metric_count
                FROM directories
                WHERE team_id IS NULL
                ORDER BY metric_count DESC, path ASC
                LIMIT 50
            """)

            unassigned = []
            for row in cursor.fetchall():
                unassigned.append({
                    'path': row['path'],
                    'name': row['name'],
                    'metric_count': row['metric_count']
                })

            return unassigned

        finally:
            conn.close()

    def check_assignment_consistency(self) -> Dict:
        """检查分配一致性"""
        conn = self.connect_database()
        try:
            cursor = conn.cursor()

            # 检查外键约束完整性
            cursor.execute("""
                SELECT COUNT(*) as invalid_assignments
                FROM directories d
                LEFT JOIN teams t ON d.team_id = t.id
                WHERE d.team_id IS NOT NULL AND t.id IS NULL
            """)
            invalid_assignments = cursor.fetchone()['invalid_assignments']

            # 检查重复分配方法
            cursor.execute("""
                SELECT assignment_method, COUNT(*) as count
                FROM directories
                WHERE assignment_method IS NOT NULL
                GROUP BY assignment_method
                ORDER BY count DESC
            """)
            method_distribution = dict(cursor.fetchall())

            return {
                'invalid_assignments': invalid_assignments,
                'method_distribution': method_distribution,
                'foreign_key_integrity': invalid_assignments == 0
            }

        finally:
            conn.close()

    def analyze_keyword_effectiveness(self) -> Dict:
        """分析关键词匹配效果"""
        conn = self.connect_database()
        try:
            cursor = conn.cursor()

            # 按分配方法分组统计
            cursor.execute("""
                SELECT
                    assignment_method,
                    COUNT(*) as count,
                    AVG(assignment_confidence) as avg_confidence
                FROM directories
                WHERE assignment_method IS NOT NULL
                GROUP BY assignment_method
                ORDER BY count DESC
            """)
            method_effectiveness = []
            for row in cursor.fetchall():
                method_effectiveness.append({
                    'method': row['assignment_method'],
                    'count': row['count'],
                    'avg_confidence': round(row['avg_confidence'], 1)
                })

            return {
                'method_effectiveness': method_effectiveness
            }

        finally:
            conn.close()

    def generate_quality_report(self) -> Dict:
        """生成完整的质量报告"""
        logger.info("开始生成质量报告...")

        report = {
            'timestamp': logging.Formatter().formatTime(logging.LogRecord(
                '', 0, '', 0, '', (), None
            )),
            'general_statistics': self.get_assignment_statistics(),
            'team_statistics': self.get_team_assignment_stats(),
            'low_confidence_assignments': self.get_low_confidence_assignments(),
            'unassigned_directories': self.get_unassigned_directories(),
            'consistency_check': self.check_assignment_consistency(),
            'keyword_analysis': self.analyze_keyword_effectiveness()
        }

        return report

    def print_quality_report(self, report: Dict):
        """打印质量报告"""
        print(f"\n{'='*60}")
        print(f"目录团队分配质量报告")
        print(f"{'='*60}")
        print(f"生成时间: {report['timestamp']}")
        print(f"\n{'='*40} 总体统计 {'='*40}")

        gen_stats = report['general_statistics']
        print(f"总目录数: {gen_stats['total_directories']}")
        print(f"已分配: {gen_stats['assigned_directories']} ({gen_stats['coverage_rate']:.2f}%)")
        print(f"未分配: {gen_stats['unassigned_directories']}")
        print(f"平均置信度: {gen_stats['average_confidence']}")

        print(f"\n置信度分布:")
        for level, count in gen_stats['confidence_distribution'].items():
            print(f"  {level}: {count}个")

        print(f"\n{'='*40} 团队分配统计 {'='*40}")
        team_stats = report['team_statistics']
        if team_stats:
            print(f"{'团队名称':<20} {'目录数':<8} {'平均置信度':<10} {'置信度范围'}")
            print(f"{'-'*70}")
            for team in team_stats[:10]:  # 显示前10个团队
                conf_range = f"{team['min_confidence']}-{team['max_confidence']}"
                print(f"{team['team_name']:<20} {team['directory_count']:<8} "
                      f"{team['avg_confidence']:<10} {conf_range}")

        low_conf = report['low_confidence_assignments']
        if low_conf:
            print(f"\n{'='*40} 低置信度分配 (<50) {'='*40}")
            print(f"{'目录路径':<50} {'置信度':<8} {'分配方法'}")
            print(f"{'-'*80}")
            for item in low_conf[:10]:  # 显示前10个
                method_short = item['method'][:25] + "..." if len(item['method']) > 25 else item['method']
                print(f"{item['path']:<50} {item['confidence']:<8} {method_short}")

        unassigned = report['unassigned_directories']
        if unassigned:
            print(f"\n{'='*40} 未分配目录 {'='*40}")
            print(f"{'目录路径':<60} {'指标数':<8}")
            print(f"{'-'*70}")
            for item in unassigned[:15]:  # 显示前15个
                print(f"{item['path']:<60} {item['metric_count']:<8}")

        consistency = report['consistency_check']
        print(f"\n{'='*40} 一致性检查 {'='*40}")
        print(f"外键约束完整性: {'✓ 正常' if consistency['foreign_key_integrity'] else '✗ 异常'}")
        print(f"无效分配数: {consistency['invalid_assignments']}")

        if consistency['method_distribution']:
            print(f"\n分配方法分布:")
            for method, count in consistency['method_distribution'].items():
                print(f"  {method}: {count}个")

        print(f"\n{'='*60}")

    def run_quality_check(self):
        """执行质量检查"""
        logger.info("开始目录团队分配质量检查")

        try:
            report = self.generate_quality_report()
            self.print_quality_report(report)

            # 保存报告到文件
            import json
            report_file = "assignment_quality_report.json"
            with open(report_file, 'w', encoding='utf-8') as f:
                json.dump(report, f, ensure_ascii=False, indent=2)
            logger.info(f"质量报告已保存到: {report_file}")

            return report

        except Exception as e:
            logger.error(f"质量检查过程出错: {e}")
            return None


def main():
    """主函数"""
    import argparse

    parser = argparse.ArgumentParser(description='目录团队分配质量检查工具')
    parser.add_argument('--db-path', help='数据库文件路径')
    parser.add_argument('--threshold', type=int, default=50,
                       help='低置信度阈值 (默认: 50)')
    parser.add_argument('--team-stats-only', action='store_true',
                       help='仅显示团队统计')
    parser.add_argument('--low-conf-only', action='store_true',
                       help='仅显示低置信度分配')
    parser.add_argument('--unassigned-only', action='store_true',
                       help='仅显示未分配目录')

    args = parser.parse_args()

    try:
        checker = DirectoryAssignmentQualityChecker(args.db_path)

        if args.team_stats_only:
            stats = checker.get_team_assignment_stats()
            print(f"{'团队名称':<20} {'目录数':<8} {'平均置信度':<10}")
            print(f"{'-'*40}")
            for team in stats:
                print(f"{team['team_name']:<20} {team['directory_count']:<8} {team['avg_confidence']:<10}")

        elif args.low_conf_only:
            low_conf = checker.get_low_confidence_assignments(args.threshold)
            print(f"{'目录路径':<60} {'置信度':<8} {'分配方法'}")
            print(f"{'-'*90}")
            for item in low_conf:
                print(f"{item['path']:<60} {item['confidence']:<8} {item['method']}")

        elif args.unassigned_only:
            unassigned = checker.get_unassigned_directories()
            print(f"{'目录路径':<60} {'指标数':<8}")
            print(f"{'-'*70}")
            for item in unassigned:
                print(f"{item['path']:<60} {item['metric_count']:<8}")

        else:
            # 执行完整质量检查
            checker.run_quality_check()

    except Exception as e:
        logger.error(f"执行过程出错: {e}")
        return 1

    return 0


if __name__ == "__main__":
    exit(main())