"""
数据完整性验证器

验证恢复数据的完整性、一致性等。
"""

import json
import hashlib
from pathlib import Path
from typing import Dict, List, Optional, Any, Set
from dataclasses import dataclass
from datetime import datetime, timezone

from ..utils.logger import get_module_logger
from ..utils.es_client import ESClient
from ..utils.exceptions import DataValidationError
from .storage import StorageManager
from .metadata import MetadataManager

logger = get_module_logger(__name__)


@dataclass
class ValidationResult:
    """验证结果"""
    valid: bool
    total_checks: int
    passed_checks: int
    failed_checks: int
    warnings: List[str]
    errors: List[str]
    details: Dict[str, Any]


class DataIntegrityValidator:
    """数据完整性验证器
    
    验证恢复数据的完整性和一致性。
    """
    
    def __init__(self, es_client: ESClient):
        """初始化验证器
        
        Args:
            es_client: ES客户端
        """
        self.es_client = es_client
        self.storage_manager = StorageManager()
        self.metadata_manager = MetadataManager()
        
        logger.info("数据完整性验证器初始化完成")
    
    def validate_restore_integrity(
        self,
        backup_id: str,
        target_index: str,
        validation_options: Optional[Dict[str, Any]] = None
    ) -> ValidationResult:
        """验证恢复完整性
        
        Args:
            backup_id: 备份ID
            target_index: 目标索引
            validation_options: 验证选项
            
        Returns:
            验证结果
        """
        logger.info(f"开始验证恢复完整性: {backup_id} -> {target_index}")
        
        options = validation_options or {}
        
        result = ValidationResult(
            valid=True,
            total_checks=0,
            passed_checks=0,
            failed_checks=0,
            warnings=[],
            errors=[],
            details={}
        )
        
        try:
            # 获取备份信息
            backup_info = self.metadata_manager.get_backup_info(backup_id)
            if not backup_info:
                result.errors.append(f"备份不存在: {backup_id}")
                result.valid = False
                return result
            
            backup_metadata = backup_info.get("full_metadata", {})
            
            # 执行各种验证检查
            checks = [
                ("文档数量验证", self._validate_document_count),
                ("数据内容验证", self._validate_data_content),
                ("索引结构验证", self._validate_index_structure),
                ("数据一致性验证", self._validate_data_consistency)
            ]
            
            if options.get("validate_checksums", True):
                checks.append(("校验和验证", self._validate_checksums))
            
            if options.get("validate_mappings", True):
                checks.append(("映射验证", self._validate_mappings))
            
            result.total_checks = len(checks)
            
            for check_name, check_func in checks:
                logger.info(f"执行检查: {check_name}")
                
                try:
                    check_result = check_func(backup_metadata, target_index, options)
                    
                    if check_result["passed"]:
                        result.passed_checks += 1
                    else:
                        result.failed_checks += 1
                        result.valid = False
                    
                    # 收集详细信息
                    result.details[check_name] = check_result
                    
                    # 收集警告和错误
                    result.warnings.extend(check_result.get("warnings", []))
                    result.errors.extend(check_result.get("errors", []))
                    
                except Exception as e:
                    logger.error(f"检查失败: {check_name}, {e}")
                    result.failed_checks += 1
                    result.valid = False
                    result.errors.append(f"{check_name}执行失败: {e}")
            
            logger.info(f"验证完成: 通过 {result.passed_checks}/{result.total_checks} 项检查")
            
        except Exception as e:
            logger.error(f"验证过程中出错: {e}")
            result.valid = False
            result.errors.append(f"验证过程出错: {e}")
        
        return result
    
    def _validate_document_count(
        self,
        backup_metadata: Dict[str, Any],
        target_index: str,
        options: Dict[str, Any]
    ) -> Dict[str, Any]:
        """验证文档数量"""
        result = {
            "passed": False,
            "warnings": [],
            "errors": [],
            "details": {}
        }
        
        try:
            # 获取备份中的文档数量
            backup_doc_count = backup_metadata.get("backed_up_documents", 0)
            
            # 获取目标索引中的文档数量
            count_response = self.es_client.client.count(index=target_index)
            target_doc_count = count_response["count"]
            
            result["details"] = {
                "backup_documents": backup_doc_count,
                "target_documents": target_doc_count,
                "difference": abs(backup_doc_count - target_doc_count)
            }
            
            # 检查数量是否匹配
            if backup_doc_count == target_doc_count:
                result["passed"] = True
            else:
                difference = abs(backup_doc_count - target_doc_count)
                tolerance = options.get("count_tolerance", 0)
                
                if difference <= tolerance:
                    result["passed"] = True
                    result["warnings"].append(
                        f"文档数量差异在容忍范围内: 差异 {difference}, 容忍度 {tolerance}"
                    )
                else:
                    result["errors"].append(
                        f"文档数量不匹配: 备份 {backup_doc_count}, 目标 {target_doc_count}"
                    )
            
        except Exception as e:
            result["errors"].append(f"文档数量验证失败: {e}")
        
        return result
    
    def _validate_data_content(
        self,
        backup_metadata: Dict[str, Any],
        target_index: str,
        options: Dict[str, Any]
    ) -> Dict[str, Any]:
        """验证数据内容"""
        result = {
            "passed": False,
            "warnings": [],
            "errors": [],
            "details": {}
        }
        
        try:
            # 采样验证策略
            sample_size = options.get("sample_size", 100)
            
            # 从备份中读取样本文档
            backup_path = backup_metadata["backup_path"]
            backup_storage = self.storage_manager.create_backup_storage(Path(backup_path))
            
            sample_docs = []
            doc_count = 0
            
            for document in backup_storage.read_all_documents():
                if doc_count >= sample_size:
                    break
                
                sample_docs.append({
                    "id": document.get("_id"),
                    "source": document.get("_source", {})
                })
                doc_count += 1
            
            # 验证样本文档在目标索引中的存在性和一致性
            matched_docs = 0
            mismatched_docs = 0
            missing_docs = 0
            
            for doc in sample_docs:
                doc_id = doc["id"]
                expected_source = doc["source"]
                
                try:
                    # 从目标索引获取文档
                    response = self.es_client.client.get(
                        index=target_index,
                        id=doc_id,
                        _source=True
                    )
                    
                    actual_source = response["_source"]
                    
                    # 比较文档内容
                    if self._compare_documents(expected_source, actual_source):
                        matched_docs += 1
                    else:
                        mismatched_docs += 1
                        result["warnings"].append(f"文档内容不匹配: {doc_id}")
                
                except Exception:
                    missing_docs += 1
                    result["warnings"].append(f"文档缺失: {doc_id}")
            
            result["details"] = {
                "sample_size": len(sample_docs),
                "matched_documents": matched_docs,
                "mismatched_documents": mismatched_docs,
                "missing_documents": missing_docs,
                "match_rate": matched_docs / len(sample_docs) if sample_docs else 0
            }
            
            # 判断是否通过
            match_threshold = options.get("match_threshold", 0.95)
            match_rate = matched_docs / len(sample_docs) if sample_docs else 0
            
            if match_rate >= match_threshold:
                result["passed"] = True
            else:
                result["errors"].append(
                    f"数据内容匹配率过低: {match_rate:.2%} < {match_threshold:.2%}"
                )
            
        except Exception as e:
            result["errors"].append(f"数据内容验证失败: {e}")
        
        return result
    
    def _validate_index_structure(
        self,
        backup_metadata: Dict[str, Any],
        target_index: str,
        options: Dict[str, Any]
    ) -> Dict[str, Any]:
        """验证索引结构"""
        result = {
            "passed": False,
            "warnings": [],
            "errors": [],
            "details": {}
        }
        
        try:
            # 获取目标索引的映射和设置
            target_mapping = self.es_client.get_mapping(target_index)
            target_settings = self.es_client.get_settings(target_index)
            
            # 从备份中获取原始映射和设置
            backup_path = backup_metadata["backup_path"]
            index_metadata_file = Path(backup_path) / "index_metadata.json"
            
            if index_metadata_file.exists():
                with open(index_metadata_file, 'r', encoding='utf-8') as f:
                    index_metadata = json.load(f)
                
                source_index = backup_metadata["source_index"]
                
                # 比较映射
                mapping_compatible = True
                if "mappings" in index_metadata and source_index in index_metadata["mappings"]:
                    original_mapping = index_metadata["mappings"][source_index]["mappings"]
                    current_mapping = target_mapping[target_index]["mappings"]
                    
                    mapping_diff = self._compare_mappings(original_mapping, current_mapping)
                    if mapping_diff:
                        mapping_compatible = False
                        result["warnings"].extend([f"映射差异: {diff}" for diff in mapping_diff])
                
                result["details"]["mapping_compatible"] = mapping_compatible
                result["passed"] = mapping_compatible
            else:
                result["warnings"].append("备份中缺少索引元数据文件")
                result["passed"] = True  # 无法比较时认为通过
            
        except Exception as e:
            result["errors"].append(f"索引结构验证失败: {e}")
        
        return result
    
    def _validate_data_consistency(
        self,
        backup_metadata: Dict[str, Any],
        target_index: str,
        options: Dict[str, Any]
    ) -> Dict[str, Any]:
        """验证数据一致性"""
        result = {
            "passed": False,
            "warnings": [],
            "errors": [],
            "details": {}
        }
        
        try:
            # 检查重复文档
            duplicate_check = self._check_duplicate_documents(target_index)
            result["details"]["duplicate_check"] = duplicate_check
            
            if duplicate_check["has_duplicates"]:
                result["warnings"].append(
                    f"发现重复文档: {duplicate_check['duplicate_count']} 个"
                )
            
            # 检查数据类型一致性
            type_check = self._check_data_types(target_index, options.get("type_sample_size", 50))
            result["details"]["type_check"] = type_check
            
            if not type_check["consistent"]:
                result["warnings"].extend(type_check["inconsistencies"])
            
            # 综合判断
            critical_issues = len([e for e in result["errors"] if "critical" in e.lower()])
            result["passed"] = critical_issues == 0
            
        except Exception as e:
            result["errors"].append(f"数据一致性验证失败: {e}")
        
        return result
    
    def _validate_checksums(
        self,
        backup_metadata: Dict[str, Any],
        target_index: str,
        options: Dict[str, Any]
    ) -> Dict[str, Any]:
        """验证校验和"""
        result = {
            "passed": False,
            "warnings": [],
            "errors": [],
            "details": {}
        }
        
        try:
            # 计算目标索引数据的校验和
            target_checksum = self._calculate_index_checksum(target_index)
            
            # 从备份元数据获取原始校验和
            backup_checksum = backup_metadata.get("checksum")
            
            result["details"] = {
                "target_checksum": target_checksum,
                "backup_checksum": backup_checksum
            }
            
            if backup_checksum:
                if target_checksum == backup_checksum:
                    result["passed"] = True
                else:
                    result["errors"].append("数据校验和不匹配，可能存在数据损坏")
            else:
                result["warnings"].append("备份中缺少校验和信息，无法验证")
                result["passed"] = True  # 无法验证时认为通过
            
        except Exception as e:
            result["errors"].append(f"校验和验证失败: {e}")
        
        return result
    
    def _validate_mappings(
        self,
        backup_metadata: Dict[str, Any],
        target_index: str,
        options: Dict[str, Any]
    ) -> Dict[str, Any]:
        """验证映射"""
        result = {
            "passed": False,
            "warnings": [],
            "errors": [],
            "details": {}
        }
        
        try:
            # 获取目标索引映射
            target_mapping = self.es_client.get_mapping(target_index)
            
            # 分析映射结构
            mapping_analysis = self._analyze_mapping_structure(target_mapping[target_index]["mappings"])
            result["details"]["mapping_analysis"] = mapping_analysis
            
            # 检查映射的合理性
            if mapping_analysis["field_count"] == 0:
                result["errors"].append("目标索引没有字段映射")
            elif mapping_analysis["field_count"] > 1000:
                result["warnings"].append(f"字段数量过多: {mapping_analysis['field_count']}")
            
            result["passed"] = len(result["errors"]) == 0
            
        except Exception as e:
            result["errors"].append(f"映射验证失败: {e}")
        
        return result
    
    def _compare_documents(self, doc1: Dict[str, Any], doc2: Dict[str, Any]) -> bool:
        """比较两个文档是否相同"""
        try:
            # 简单的JSON比较
            return json.dumps(doc1, sort_keys=True) == json.dumps(doc2, sort_keys=True)
        except Exception:
            return False
    
    def _compare_mappings(self, mapping1: Dict[str, Any], mapping2: Dict[str, Any]) -> List[str]:
        """比较两个映射的差异"""
        differences = []
        
        # 简化的映射比较
        # 实际实现应该更详细地比较字段类型、分析器等
        
        return differences
    
    def _check_duplicate_documents(self, index: str) -> Dict[str, Any]:
        """检查重复文档"""
        try:
            # 使用聚合查询检查重复的_id
            agg_query = {
                "size": 0,
                "aggs": {
                    "duplicate_ids": {
                        "terms": {
                            "field": "_id",
                            "min_doc_count": 2,
                            "size": 100
                        }
                    }
                }
            }
            
            response = self.es_client.client.search(index=index, body=agg_query)
            duplicates = response["aggregations"]["duplicate_ids"]["buckets"]
            
            return {
                "has_duplicates": len(duplicates) > 0,
                "duplicate_count": len(duplicates),
                "duplicate_ids": [bucket["key"] for bucket in duplicates]
            }
            
        except Exception as e:
            logger.warning(f"检查重复文档失败: {e}")
            return {"has_duplicates": False, "duplicate_count": 0, "duplicate_ids": []}
    
    def _check_data_types(self, index: str, sample_size: int) -> Dict[str, Any]:
        """检查数据类型一致性"""
        try:
            # 获取样本文档
            search_query = {
                "size": sample_size,
                "_source": True
            }
            
            response = self.es_client.client.search(index=index, body=search_query)
            documents = response["hits"]["hits"]
            
            # 分析字段类型
            field_types = {}
            inconsistencies = []
            
            for doc in documents:
                source = doc["_source"]
                self._analyze_document_types(source, field_types, "", inconsistencies)
            
            return {
                "consistent": len(inconsistencies) == 0,
                "field_types": field_types,
                "inconsistencies": inconsistencies
            }
            
        except Exception as e:
            logger.warning(f"检查数据类型失败: {e}")
            return {"consistent": True, "field_types": {}, "inconsistencies": []}
    
    def _analyze_document_types(
        self,
        obj: Any,
        field_types: Dict[str, Set[str]],
        prefix: str,
        inconsistencies: List[str]
    ) -> None:
        """分析文档中的字段类型"""
        if isinstance(obj, dict):
            for key, value in obj.items():
                field_path = f"{prefix}.{key}" if prefix else key
                value_type = type(value).__name__
                
                if field_path not in field_types:
                    field_types[field_path] = set()
                
                field_types[field_path].add(value_type)
                
                # 检查类型不一致
                if len(field_types[field_path]) > 1:
                    inconsistencies.append(f"字段 {field_path} 类型不一致: {field_types[field_path]}")
                
                # 递归处理嵌套对象
                if isinstance(value, (dict, list)):
                    self._analyze_document_types(value, field_types, field_path, inconsistencies)
        
        elif isinstance(obj, list):
            for i, item in enumerate(obj):
                if isinstance(item, (dict, list)):
                    self._analyze_document_types(item, field_types, f"{prefix}[{i}]", inconsistencies)
    
    def _calculate_index_checksum(self, index: str) -> str:
        """计算索引数据的校验和"""
        try:
            # 简化的校验和计算
            # 实际实现应该考虑文档顺序、分片等因素
            
            # 获取索引统计信息
            stats = self.es_client.client.indices.stats(index=index)
            
            # 使用文档数量和存储大小作为简单校验和
            doc_count = stats["indices"][index]["total"]["docs"]["count"]
            store_size = stats["indices"][index]["total"]["store"]["size_in_bytes"]
            
            checksum_data = f"{doc_count}:{store_size}"
            return hashlib.sha256(checksum_data.encode()).hexdigest()
            
        except Exception as e:
            logger.warning(f"计算索引校验和失败: {e}")
            return ""
    
    def _analyze_mapping_structure(self, mapping: Dict[str, Any]) -> Dict[str, Any]:
        """分析映射结构"""
        analysis = {
            "field_count": 0,
            "nested_fields": 0,
            "text_fields": 0,
            "keyword_fields": 0,
            "numeric_fields": 0,
            "date_fields": 0
        }
        
        def count_fields(properties: Dict[str, Any], prefix: str = ""):
            for field_name, field_config in properties.items():
                analysis["field_count"] += 1
                
                field_type = field_config.get("type", "")
                
                if field_type == "nested":
                    analysis["nested_fields"] += 1
                elif field_type == "text":
                    analysis["text_fields"] += 1
                elif field_type == "keyword":
                    analysis["keyword_fields"] += 1
                elif field_type in ["integer", "long", "float", "double"]:
                    analysis["numeric_fields"] += 1
                elif field_type == "date":
                    analysis["date_fields"] += 1
                
                # 递归处理嵌套属性
                if "properties" in field_config:
                    count_fields(field_config["properties"], f"{prefix}{field_name}.")
        
        if "properties" in mapping:
            count_fields(mapping["properties"])
        
        return analysis
