"""
分块质量评估模块

提供分块质量验证和统计分析功能。
"""

from typing import List, Dict, Any
import numpy as np

from llama_index.core.schema import Document, Node

from ..config import config


class ChunkingQualityAnalyzer:
    """
    分块质量分析器

    提供全面的分块质量评估，包括大小分布、语义完整性、层次结构等方面。
    """

    def __init__(self):
        """初始化质量分析器"""
        self.quality_metrics = {
            "size_variance_weight": 0.3,
            "size_distribution_weight": 0.3,
            "content_completeness_weight": 0.2,
            "hierarchy_consistency_weight": 0.2
        }

    def analyze_chunking_quality(
        self,
        original_docs: List[Document],
        chunks: List[Node]
    ) -> Dict[str, Any]:
        """
        全面分析分块质量

        Args:
            original_docs: 原始文档列表
            chunks: 分块后的节点列表

        Returns:
            Dict[str, Any]: 详细的质量分析报告
        """
        if not chunks:
            return {
                "overall_quality_score": 0.0,
                "size_analysis": self._empty_size_analysis(),
                "content_analysis": {"issues": ["没有分块数据"]},
                "hierarchy_analysis": {"issues": ["没有分块数据"]},
                "recommendations": ["需要重新进行分块"]
            }

        # 分项分析
        size_analysis = self._analyze_chunk_sizes(chunks)
        content_analysis = self._analyze_content_completeness(original_docs, chunks)
        hierarchy_analysis = self._analyze_hierarchy_consistency(chunks)

        # 计算综合质量得分
        size_score = size_analysis["quality_score"]
        content_score = content_analysis["quality_score"]
        hierarchy_score = hierarchy_analysis["quality_score"]

        overall_score = (
            size_score * self.quality_metrics["size_variance_weight"] +
            size_score * self.quality_metrics["size_distribution_weight"] +
            content_score * self.quality_metrics["content_completeness_weight"] +
            hierarchy_score * self.quality_metrics["hierarchy_consistency_weight"]
        )

        # 生成改进建议
        recommendations = self._generate_recommendations(
            size_analysis, content_analysis, hierarchy_analysis
        )

        return {
            "overall_quality_score": round(overall_score, 2),
            "size_analysis": size_analysis,
            "content_analysis": content_analysis,
            "hierarchy_analysis": hierarchy_analysis,
            "recommendations": recommendations,
            "summary": self._generate_quality_summary(
                overall_score, len(chunks), size_analysis
            )
        }

    def _analyze_chunk_sizes(self, chunks: List[Node]) -> Dict[str, Any]:
        """分析分块大小分布"""
        chunk_sizes = [len(chunk.text) for chunk in chunks if chunk.text.strip()]

        if not chunk_sizes:
            return self._empty_size_analysis()

        # 基本统计
        min_size = min(chunk_sizes)
        max_size = max(chunk_sizes)
        mean_size = np.mean(chunk_sizes)
        median_size = np.median(chunk_sizes)
        std_size = np.std(chunk_sizes)
        variance_size = np.var(chunk_sizes)

        # 大小分布分析
        small_chunks = sum(1 for size in chunk_sizes if size < config.min_chunk_size)
        large_chunks = sum(1 for size in chunk_sizes if size > config.max_chunk_size)
        optimal_chunks = len(chunk_sizes) - small_chunks - large_chunks

        # 大小均匀性分析
        cv = std_size / mean_size if mean_size > 0 else float('inf')  # 变异系数
        size_consistency = max(0, 1 - cv)  # 标准化为0-1

        # 计算大小质量得分
        optimal_ratio = optimal_chunks / len(chunk_sizes)
        small_penalty = (small_chunks / len(chunk_sizes)) * 0.5
        large_penalty = (large_chunks / len(chunk_sizes)) * 0.3
        consistency_bonus = size_consistency * 0.2

        quality_score = min(1.0, optimal_ratio + consistency_bonus - small_penalty - large_penalty)

        return {
            "quality_score": round(quality_score, 2),
            "statistics": {
                "total_chunks": len(chunk_sizes),
                "min_size": min_size,
                "max_size": max_size,
                "mean_size": round(mean_size, 1),
                "median_size": round(median_size, 1),
                "std_dev": round(std_size, 1),
                "variance": round(variance_size, 1),
                "coefficient_of_variation": round(cv, 2)
            },
            "distribution": {
                "small_chunks": small_chunks,
                "large_chunks": large_chunks,
                "optimal_chunks": optimal_chunks,
                "optimal_ratio": round(optimal_ratio, 2),
                "size_consistency": round(size_consistency, 2)
            },
            "issues": self._identify_size_issues(small_chunks, large_chunks, cv)
        }

    def _analyze_content_completeness(
        self,
        original_docs: List[Document],
        chunks: List[Node]
    ) -> Dict[str, Any]:
        """分析内容完整性"""
        issues = []
        quality_factors = []

        # 检查空分块
        empty_chunks = sum(1 for chunk in chunks if not chunk.text.strip())
        if empty_chunks > 0:
            issues.append(f"发现 {empty_chunks} 个空分块")
            quality_factors.append(0.5)
        else:
            quality_factors.append(1.0)

        # 检查过短分块（可能缺乏上下文）
        short_chunks = sum(
            1 for chunk in chunks
            if chunk.text.strip() and len(chunk.text) < 20
        )
        if short_chunks > 0:
            issues.append(f"发现 {short_chunks} 个过短分块（<20字符）")
            quality_factors.append(0.8)
        else:
            quality_factors.append(1.0)

        # 检查内容覆盖率
        if original_docs and chunks:
            total_original = sum(len(doc.text) for doc in original_docs)
            total_chunked = sum(len(chunk.text) for chunk in chunks)
            coverage_ratio = total_chunked / total_original if total_original > 0 else 0

            if coverage_ratio < 0.9:
                issues.append(f"内容覆盖率较低: {coverage_ratio:.1%}")
                quality_factors.append(0.7)
            elif coverage_ratio > 1.1:
                issues.append(f"内容重复过多: {coverage_ratio:.1%}")
                quality_factors.append(0.8)
            else:
                quality_factors.append(1.0)

        # 检查重复内容
        chunk_texts = [chunk.text.strip() for chunk in chunks if chunk.text.strip()]
        if chunk_texts:
            unique_texts = set(chunk_texts)
            duplicate_ratio = 1 - (len(unique_texts) / len(chunk_texts))

            if duplicate_ratio > 0.1:
                issues.append(f"发现较多重复内容: {duplicate_ratio:.1%}")
                quality_factors.append(0.8)
            else:
                quality_factors.append(1.0)

        quality_score = np.mean(quality_factors) if quality_factors else 0.0

        return {
            "quality_score": round(quality_score, 2),
            "empty_chunks": empty_chunks,
            "short_chunks": short_chunks,
            "content_coverage": self._calculate_content_coverage(original_docs, chunks),
            "duplicate_ratio": self._calculate_duplicate_ratio(chunks),
            "issues": issues
        }

    def _analyze_hierarchy_consistency(self, chunks: List[Node]) -> Dict[str, Any]:
        """分析层次结构一致性"""
        issues = []
        quality_factors = []

        # 统计层次分块
        parent_chunks = [
            chunk for chunk in chunks
            if chunk.metadata.get("hierarchy_level") == "parent"
        ]
        child_chunks = [
            chunk for chunk in chunks
            if chunk.metadata.get("hierarchy_level") == "child"
        ]

        # 检查层次关系的完整性
        if parent_chunks:
            # 检查每个父级分块是否有对应的子级分块
            orphaned_parents = 0
            for parent in parent_chunks:
                child_ids = parent.metadata.get("child_chunk_ids", [])
                if not child_ids:
                    orphaned_parents += 1

            if orphaned_parents > 0:
                issues.append(f"发现 {orphaned_parents} 个没有子级的父级分块")
                quality_factors.append(0.8)
            else:
                quality_factors.append(1.0)

            # 检查子级分块的父级引用
            orphaned_children = 0
            for child in child_chunks:
                parent_id = child.metadata.get("parent_chunk_id")
                if not parent_id:
                    orphaned_children += 1

            if orphaned_children > 0:
                issues.append(f"发现 {orphaned_children} 个没有父级引用的子级分块")
                quality_factors.append(0.8)
            else:
                quality_factors.append(1.0)

            # 检查层次大小合理性
            if parent_chunks and child_chunks:
                parent_sizes = [len(p.text) for p in parent_chunks]
                child_sizes = [len(c.text) for c in child_chunks]

                if parent_sizes and child_sizes:
                    avg_parent_size = np.mean(parent_sizes)
                    avg_child_size = np.mean(child_sizes)

                    # 父级应该比子级大
                    if avg_parent_size <= avg_child_size:
                        issues.append("父级分块平均大小不应小于子级分块")
                        quality_factors.append(0.7)
                    else:
                        quality_factors.append(1.0)
        else:
            # 非层次分块，这是正常的
            quality_factors.append(1.0)

        # 检查分块ID的一致性
        id_issues = self._check_id_consistency(chunks)
        if id_issues:
            issues.extend(id_issues)
            quality_factors.append(0.9)
        else:
            quality_factors.append(1.0)

        quality_score = np.mean(quality_factors) if quality_factors else 1.0

        return {
            "quality_score": round(quality_score, 2),
            "parent_chunks": len(parent_chunks),
            "child_chunks": len(child_chunks),
            "hierarchy_type": "hierarchical" if parent_chunks else "flat",
            "issues": issues
        }

    def _calculate_content_coverage(
        self,
        original_docs: List[Document],
        chunks: List[Node]
    ) -> float:
        """计算内容覆盖率"""
        if not original_docs or not chunks:
            return 0.0

        total_original = sum(len(doc.text) for doc in original_docs)
        total_chunked = sum(len(chunk.text) for chunk in chunks)

        return total_chunked / total_original if total_original > 0 else 0.0

    def _calculate_duplicate_ratio(self, chunks: List[Node]) -> float:
        """计算重复内容比例"""
        chunk_texts = [chunk.text.strip() for chunk in chunks if chunk.text.strip()]
        if not chunk_texts:
            return 0.0

        unique_texts = set(chunk_texts)
        return 1 - (len(unique_texts) / len(chunk_texts))

    def _identify_size_issues(self, small_chunks: int, large_chunks: int, cv: float) -> List[str]:
        """识别大小相关问题"""
        issues = []

        if small_chunks > 0:
            issues.append(f"过小分块过多: {small_chunks} 个")

        if large_chunks > 0:
            issues.append(f"过大分块过多: {large_chunks} 个")

        if cv > 1.0:  # 变异系数过大
            issues.append(f"分块大小不均匀: 变异系数 {cv:.2f}")

        return issues

    def _check_id_consistency(self, chunks: List[Node]) -> List[str]:
        """检查ID一致性"""
        issues = []
        doc_ids = set()

        for chunk in chunks:
            doc_id = chunk.metadata.get("source_doc_id")
            if doc_id:
                doc_ids.add(doc_id)

        # 检查是否有未命名的文档
        unnamed_chunks = sum(
            1 for chunk in chunks
            if not chunk.metadata.get("source_doc_id")
        )
        if unnamed_chunks > 0:
            issues.append(f"发现 {unnamed_chunks} 个未指定源文档的分块")

        return issues

    def _generate_recommendations(
        self,
        size_analysis: Dict,
        content_analysis: Dict,
        hierarchy_analysis: Dict
    ) -> List[str]:
        """生成改进建议"""
        recommendations = []

        # 基于大小分析的建议
        if size_analysis["quality_score"] < 0.8:
            if "过小分块" in str(size_analysis["issues"]):
                recommendations.append("考虑增加分块大小或使用语义分块策略")

            if "过大分块" in str(size_analysis["issues"]):
                recommendations.append("考虑减少分块大小或使用递归分块策略")

            if "不均匀" in str(size_analysis["issues"]):
                recommendations.append("考虑使用语义分块策略提高分块均匀性")

        # 基于内容分析的建议
        if content_analysis["quality_score"] < 0.8:
            if content_analysis["empty_chunks"] > 0:
                recommendations.append("过滤空分块或调整分块参数")

            if content_analysis.get("duplicate_ratio", 0) > 0.1:
                recommendations.append("减少分块重叠或调整分块策略")

        # 基于层次分析的建议
        if hierarchy_analysis["quality_score"] < 0.8:
            if hierarchy_analysis["hierarchy_type"] == "hierarchical":
                recommendations.append("检查层次分块的父子关系一致性")

        if not recommendations:
            recommendations.append("分块质量良好，无需特别调整")

        return recommendations

    def _generate_quality_summary(
        self,
        overall_score: float,
        chunk_count: int,
        size_analysis: Dict
    ) -> str:
        """生成质量摘要"""
        if overall_score >= 0.9:
            quality_level = "优秀"
        elif overall_score >= 0.8:
            quality_level = "良好"
        elif overall_score >= 0.6:
            quality_level = "一般"
        else:
            quality_level = "需要改进"

        avg_size = size_analysis["statistics"]["mean_size"]

        return (
            f"分块质量: {quality_level} (得分: {overall_score:.2f}) | "
            f"分块数量: {chunk_count} | "
            f"平均大小: {avg_size:.0f} 字符"
        )

    def _empty_size_analysis(self) -> Dict[str, Any]:
        """返回空的大小分析结果"""
        return {
            "quality_score": 0.0,
            "statistics": {
                "total_chunks": 0,
                "min_size": 0,
                "max_size": 0,
                "mean_size": 0,
                "median_size": 0,
                "std_dev": 0,
                "variance": 0
            },
            "distribution": {
                "small_chunks": 0,
                "large_chunks": 0,
                "optimal_chunks": 0,
                "optimal_ratio": 0
            },
            "issues": ["没有分块数据"]
        }