"""
数据质量检查

提供数据质量检查、验证和清洗功能。
"""

from typing import Dict, List, Any, Optional, Tuple, Union
from datetime import datetime, timedelta
from dataclasses import dataclass
from enum import Enum
import pandas as pd
import numpy as np
from abc import ABC, abstractmethod


class QualityLevel(Enum):
    """数据质量等级"""
    EXCELLENT = "excellent"  # 优秀
    GOOD = "good"           # 良好
    FAIR = "fair"           # 一般
    POOR = "poor"           # 较差
    CRITICAL = "critical"   # 严重问题


class QualityIssueType(Enum):
    """数据质量问题类型"""
    MISSING_DATA = "missing_data"           # 缺失数据
    DUPLICATE_DATA = "duplicate_data"       # 重复数据
    OUTLIER = "outlier"                     # 异常值
    INCONSISTENT = "inconsistent"           # 不一致
    INVALID_FORMAT = "invalid_format"       # 格式错误
    STALE_DATA = "stale_data"              # 过期数据
    INCOMPLETE = "incomplete"               # 不完整
    ACCURACY_ISSUE = "accuracy_issue"       # 准确性问题


@dataclass
class QualityIssue:
    """数据质量问题"""
    issue_type: QualityIssueType
    severity: QualityLevel
    description: str
    field: Optional[str] = None
    value: Optional[Any] = None
    suggestion: Optional[str] = None
    timestamp: Optional[datetime] = None
    
    def __post_init__(self):
        if self.timestamp is None:
            self.timestamp = datetime.now()


@dataclass
class QualityReport:
    """数据质量报告"""
    data_source: str
    data_type: str
    check_timestamp: datetime
    overall_quality: QualityLevel
    total_records: int
    valid_records: int
    issues: List[QualityIssue]
    metrics: Dict[str, Any]
    recommendations: List[str]
    
    @property
    def quality_score(self) -> float:
        """计算质量分数（0-100）"""
        if self.total_records == 0:
            return 0.0
        
        base_score = (self.valid_records / self.total_records) * 100
        
        # 根据问题严重程度调整分数
        penalty = 0
        for issue in self.issues:
            if issue.severity == QualityLevel.CRITICAL:
                penalty += 20
            elif issue.severity == QualityLevel.POOR:
                penalty += 10
            elif issue.severity == QualityLevel.FAIR:
                penalty += 5
        
        return max(0, base_score - penalty)
    
    @property
    def issue_summary(self) -> Dict[QualityIssueType, int]:
        """问题汇总"""
        summary = {}
        for issue in self.issues:
            summary[issue.issue_type] = summary.get(issue.issue_type, 0) + 1
        return summary


class QualityChecker(ABC):
    """数据质量检查器基类"""
    
    def __init__(self, name: str):
        """
        初始化质量检查器
        
        Args:
            name: 检查器名称
        """
        self.name = name
    
    @abstractmethod
    async def check(self, data: Any, metadata: Optional[Dict[str, Any]] = None) -> List[QualityIssue]:
        """
        执行数据质量检查
        
        Args:
            data: 要检查的数据
            metadata: 元数据
            
        Returns:
            质量问题列表
        """
        pass


class CompletenessChecker(QualityChecker):
    """完整性检查器"""
    
    def __init__(self, required_fields: List[str], threshold: float = 0.95):
        """
        初始化完整性检查器
        
        Args:
            required_fields: 必需字段列表
            threshold: 完整性阈值
        """
        super().__init__("completeness")
        self.required_fields = required_fields
        self.threshold = threshold
    
    async def check(self, data: Any, metadata: Optional[Dict[str, Any]] = None) -> List[QualityIssue]:
        """
        检查数据完整性
        
        Args:
            data: 要检查的数据
            metadata: 元数据
            
        Returns:
            质量问题列表
        """
        issues = []
        
        if isinstance(data, pd.DataFrame):
            # DataFrame完整性检查
            total_rows = len(data)
            
            for field in self.required_fields:
                if field not in data.columns:
                    issues.append(QualityIssue(
                        issue_type=QualityIssueType.MISSING_DATA,
                        severity=QualityLevel.CRITICAL,
                        description=f"Required field '{field}' is missing",
                        field=field,
                        suggestion=f"Add the missing field '{field}'"
                    ))
                    continue
                
                # 检查缺失值
                missing_count = data[field].isna().sum()
                completeness_ratio = (total_rows - missing_count) / total_rows
                
                if completeness_ratio < self.threshold:
                    severity = QualityLevel.CRITICAL if completeness_ratio < 0.5 else QualityLevel.POOR
                    issues.append(QualityIssue(
                        issue_type=QualityIssueType.MISSING_DATA,
                        severity=severity,
                        description=f"Field '{field}' has {missing_count} missing values ({completeness_ratio:.2%} complete)",
                        field=field,
                        suggestion=f"Fill missing values in field '{field}'"
                    ))
        
        elif isinstance(data, dict):
            # 字典完整性检查
            for field in self.required_fields:
                if field not in data:
                    issues.append(QualityIssue(
                        issue_type=QualityIssueType.MISSING_DATA,
                        severity=QualityLevel.CRITICAL,
                        description=f"Required field '{field}' is missing",
                        field=field,
                        suggestion=f"Add the missing field '{field}'"
                    ))
                elif data[field] is None:
                    issues.append(QualityIssue(
                        issue_type=QualityIssueType.MISSING_DATA,
                        severity=QualityLevel.POOR,
                        description=f"Field '{field}' has null value",
                        field=field,
                        value=data[field],
                        suggestion=f"Provide a valid value for field '{field}'"
                    ))
        
        return issues


class ConsistencyChecker(QualityChecker):
    """一致性检查器"""
    
    def __init__(self, consistency_rules: Dict[str, Any]):
        """
        初始化一致性检查器
        
        Args:
            consistency_rules: 一致性规则
        """
        super().__init__("consistency")
        self.consistency_rules = consistency_rules
    
    async def check(self, data: Any, metadata: Optional[Dict[str, Any]] = None) -> List[QualityIssue]:
        """
        检查数据一致性
        
        Args:
            data: 要检查的数据
            metadata: 元数据
            
        Returns:
            质量问题列表
        """
        issues = []
        
        if isinstance(data, pd.DataFrame):
            # 检查数据类型一致性
            for field, expected_type in self.consistency_rules.get("data_types", {}).items():
                if field in data.columns:
                    actual_type = data[field].dtype
                    if str(actual_type) != expected_type:
                        issues.append(QualityIssue(
                            issue_type=QualityIssueType.INCONSISTENT,
                            severity=QualityLevel.FAIR,
                            description=f"Field '{field}' has inconsistent data type: expected {expected_type}, got {actual_type}",
                            field=field,
                            suggestion=f"Convert field '{field}' to {expected_type}"
                        ))
            
            # 检查数值范围一致性
            for field, range_rule in self.consistency_rules.get("value_ranges", {}).items():
                if field in data.columns and pd.api.types.is_numeric_dtype(data[field]):
                    min_val, max_val = range_rule.get("min"), range_rule.get("max")
                    
                    if min_val is not None:
                        below_min = data[data[field] < min_val]
                        if not below_min.empty:
                            issues.append(QualityIssue(
                                issue_type=QualityIssueType.INCONSISTENT,
                                severity=QualityLevel.POOR,
                                description=f"Field '{field}' has {len(below_min)} values below minimum {min_val}",
                                field=field,
                                suggestion=f"Check values in field '{field}' that are below {min_val}"
                            ))
                    
                    if max_val is not None:
                        above_max = data[data[field] > max_val]
                        if not above_max.empty:
                            issues.append(QualityIssue(
                                issue_type=QualityIssueType.INCONSISTENT,
                                severity=QualityLevel.POOR,
                                description=f"Field '{field}' has {len(above_max)} values above maximum {max_val}",
                                field=field,
                                suggestion=f"Check values in field '{field}' that are above {max_val}"
                            ))
        
        return issues


class OutlierChecker(QualityChecker):
    """异常值检查器"""
    
    def __init__(self, method: str = "iqr", threshold: float = 1.5):
        """
        初始化异常值检查器
        
        Args:
            method: 检测方法 ("iqr", "zscore", "isolation_forest")
            threshold: 阈值
        """
        super().__init__("outlier")
        self.method = method
        self.threshold = threshold
    
    async def check(self, data: Any, metadata: Optional[Dict[str, Any]] = None) -> List[QualityIssue]:
        """
        检查异常值
        
        Args:
            data: 要检查的数据
            metadata: 元数据
            
        Returns:
            质量问题列表
        """
        issues = []
        
        if isinstance(data, pd.DataFrame):
            numeric_columns = data.select_dtypes(include=[np.number]).columns
            
            for column in numeric_columns:
                outliers = self._detect_outliers(data[column])
                
                if len(outliers) > 0:
                    severity = QualityLevel.FAIR if len(outliers) < len(data) * 0.05 else QualityLevel.POOR
                    issues.append(QualityIssue(
                        issue_type=QualityIssueType.OUTLIER,
                        severity=severity,
                        description=f"Field '{column}' has {len(outliers)} outliers using {self.method} method",
                        field=column,
                        suggestion=f"Review outliers in field '{column}' and consider data cleaning"
                    ))
        
        return issues
    
    def _detect_outliers(self, series: pd.Series) -> List[int]:
        """
        检测异常值
        
        Args:
            series: 数据序列
            
        Returns:
            异常值索引列表
        """
        if self.method == "iqr":
            Q1 = series.quantile(0.25)
            Q3 = series.quantile(0.75)
            IQR = Q3 - Q1
            lower_bound = Q1 - self.threshold * IQR
            upper_bound = Q3 + self.threshold * IQR
            return series[(series < lower_bound) | (series > upper_bound)].index.tolist()
        
        elif self.method == "zscore":
            z_scores = np.abs((series - series.mean()) / series.std())
            return series[z_scores > self.threshold].index.tolist()
        
        else:
            # 默认使用IQR方法
            return self._detect_outliers_iqr(series)
    
    def _detect_outliers_iqr(self, series: pd.Series) -> List[int]:
        """使用IQR方法检测异常值"""
        Q1 = series.quantile(0.25)
        Q3 = series.quantile(0.75)
        IQR = Q3 - Q1
        lower_bound = Q1 - 1.5 * IQR
        upper_bound = Q3 + 1.5 * IQR
        return series[(series < lower_bound) | (series > upper_bound)].index.tolist()


class DuplicateChecker(QualityChecker):
    """重复数据检查器"""
    
    def __init__(self, key_fields: Optional[List[str]] = None):
        """
        初始化重复数据检查器
        
        Args:
            key_fields: 用于判断重复的关键字段
        """
        super().__init__("duplicate")
        self.key_fields = key_fields
    
    async def check(self, data: Any, metadata: Optional[Dict[str, Any]] = None) -> List[QualityIssue]:
        """
        检查重复数据
        
        Args:
            data: 要检查的数据
            metadata: 元数据
            
        Returns:
            质量问题列表
        """
        issues = []
        
        if isinstance(data, pd.DataFrame):
            if self.key_fields:
                # 基于关键字段检查重复
                available_fields = [f for f in self.key_fields if f in data.columns]
                if available_fields:
                    duplicates = data.duplicated(subset=available_fields, keep=False)
                    duplicate_count = duplicates.sum()
                    
                    if duplicate_count > 0:
                        severity = QualityLevel.POOR if duplicate_count > len(data) * 0.1 else QualityLevel.FAIR
                        issues.append(QualityIssue(
                            issue_type=QualityIssueType.DUPLICATE_DATA,
                            severity=severity,
                            description=f"Found {duplicate_count} duplicate records based on fields: {available_fields}",
                            suggestion="Remove or consolidate duplicate records"
                        ))
            else:
                # 检查完全重复的行
                duplicates = data.duplicated(keep=False)
                duplicate_count = duplicates.sum()
                
                if duplicate_count > 0:
                    issues.append(QualityIssue(
                        issue_type=QualityIssueType.DUPLICATE_DATA,
                        severity=QualityLevel.FAIR,
                        description=f"Found {duplicate_count} completely duplicate records",
                        suggestion="Remove duplicate records"
                    ))
        
        return issues


class TimelinessChecker(QualityChecker):
    """时效性检查器"""
    
    def __init__(self, max_age: timedelta, timestamp_field: str = "timestamp"):
        """
        初始化时效性检查器
        
        Args:
            max_age: 最大允许数据年龄
            timestamp_field: 时间戳字段名
        """
        super().__init__("timeliness")
        self.max_age = max_age
        self.timestamp_field = timestamp_field
    
    async def check(self, data: Any, metadata: Optional[Dict[str, Any]] = None) -> List[QualityIssue]:
        """
        检查数据时效性
        
        Args:
            data: 要检查的数据
            metadata: 元数据
            
        Returns:
            质量问题列表
        """
        issues = []
        current_time = datetime.now()
        
        if isinstance(data, pd.DataFrame):
            if self.timestamp_field in data.columns:
                # 检查过期数据
                cutoff_time = current_time - self.max_age
                stale_count = 0
                
                # 遍历每一行检查时间戳
                for idx, row in data.iterrows():
                    try:
                        ts = pd.to_datetime(row[self.timestamp_field])
                        if pd.notna(ts) and ts < cutoff_time:
                            stale_count += 1
                    except (ValueError, TypeError):
                        # 无效的时间戳格式
                        continue
                
                if stale_count > 0:
                    severity = QualityLevel.CRITICAL if stale_count > len(data) * 0.5 else QualityLevel.POOR
                    issues.append(QualityIssue(
                        issue_type=QualityIssueType.STALE_DATA,
                        severity=severity,
                        description=f"Found {stale_count} stale records older than {self.max_age}",
                        field=self.timestamp_field,
                        suggestion="Update or remove stale data"
                    ))
        
        elif isinstance(data, dict):
            if self.timestamp_field in data:
                try:
                    timestamp = pd.to_datetime(data[self.timestamp_field])
                    if timestamp < (current_time - self.max_age):
                        issues.append(QualityIssue(
                            issue_type=QualityIssueType.STALE_DATA,
                            severity=QualityLevel.POOR,
                            description=f"Data is stale (age: {current_time - timestamp})",
                            field=self.timestamp_field,
                            value=data[self.timestamp_field],
                            suggestion="Update with fresh data"
                        ))
                except Exception:
                    issues.append(QualityIssue(
                        issue_type=QualityIssueType.INVALID_FORMAT,
                        severity=QualityLevel.POOR,
                        description=f"Invalid timestamp format in field '{self.timestamp_field}'",
                        field=self.timestamp_field,
                        value=data[self.timestamp_field],
                        suggestion="Use valid timestamp format"
                    ))
        
        return issues


class DataQualityChecker:
    """
    数据质量检查器
    
    协调各种质量检查器，生成综合质量报告。
    """
    
    def __init__(self):
        """初始化数据质量检查器"""
        self._checkers: List[QualityChecker] = []
        self._default_checkers_initialized = False
    
    def add_checker(self, checker: QualityChecker) -> None:
        """
        添加质量检查器
        
        Args:
            checker: 质量检查器
        """
        self._checkers.append(checker)
    
    def remove_checker(self, checker_name: str) -> bool:
        """
        移除质量检查器
        
        Args:
            checker_name: 检查器名称
            
        Returns:
            是否成功移除
        """
        for i, checker in enumerate(self._checkers):
            if checker.name == checker_name:
                del self._checkers[i]
                return True
        return False
    
    def _initialize_default_checkers(self, data_type: str) -> None:
        """
        初始化默认检查器
        
        Args:
            data_type: 数据类型
        """
        if self._default_checkers_initialized:
            return
        
        if data_type == "market_data":
            # 市场数据默认检查器
            self.add_checker(CompletenessChecker(
                required_fields=["symbol", "date", "close", "volume"],
                threshold=0.95
            ))
            self.add_checker(ConsistencyChecker({
                "data_types": {
                    "close": "float64",
                    "volume": "int64"
                },
                "value_ranges": {
                    "close": {"min": 0},
                    "volume": {"min": 0}
                }
            }))
            self.add_checker(OutlierChecker(method="iqr", threshold=1.5))
            self.add_checker(DuplicateChecker(key_fields=["symbol", "date"]))
            self.add_checker(TimelinessChecker(
                max_age=timedelta(days=1),
                timestamp_field="date"
            ))
        
        elif data_type == "fundamental_data":
            # 基本面数据默认检查器
            self.add_checker(CompletenessChecker(
                required_fields=["symbol", "period", "revenue", "net_income"],
                threshold=0.90
            ))
            self.add_checker(ConsistencyChecker({
                "data_types": {
                    "revenue": "float64",
                    "net_income": "float64"
                }
            }))
            self.add_checker(DuplicateChecker(key_fields=["symbol", "period"]))
        
        self._default_checkers_initialized = True
    
    async def check_quality(
        self,
        data: Any,
        data_source: str,
        data_type: str,
        metadata: Optional[Dict[str, Any]] = None
    ) -> QualityReport:
        """
        执行数据质量检查
        
        Args:
            data: 要检查的数据
            data_source: 数据源
            data_type: 数据类型
            metadata: 元数据
            
        Returns:
            质量报告
        """
        # 初始化默认检查器
        self._initialize_default_checkers(data_type)
        
        # 执行所有检查器
        all_issues = []
        for checker in self._checkers:
            try:
                issues = await checker.check(data, metadata)
                all_issues.extend(issues)
            except Exception as e:
                # 记录检查器错误
                all_issues.append(QualityIssue(
                    issue_type=QualityIssueType.ACCURACY_ISSUE,
                    severity=QualityLevel.FAIR,
                    description=f"Quality checker '{checker.name}' failed: {str(e)}",
                    suggestion=f"Check the configuration of '{checker.name}' checker"
                ))
        
        # 计算基本指标
        total_records = self._count_records(data)
        valid_records = total_records - len([i for i in all_issues if i.severity in [QualityLevel.CRITICAL, QualityLevel.POOR]])
        
        # 确定整体质量等级
        overall_quality = self._determine_overall_quality(all_issues, total_records)
        
        # 生成建议
        recommendations = self._generate_recommendations(all_issues)
        
        # 计算质量指标
        metrics = self._calculate_metrics(data, all_issues)
        
        return QualityReport(
            data_source=data_source,
            data_type=data_type,
            check_timestamp=datetime.now(),
            overall_quality=overall_quality,
            total_records=total_records,
            valid_records=valid_records,
            issues=all_issues,
            metrics=metrics,
            recommendations=recommendations
        )
    
    def _count_records(self, data: Any) -> int:
        """计算记录数"""
        if isinstance(data, pd.DataFrame):
            return len(data)
        elif isinstance(data, (list, tuple)):
            return len(data)
        elif isinstance(data, dict):
            return 1
        else:
            return 0
    
    def _determine_overall_quality(self, issues: List[QualityIssue], total_records: int) -> QualityLevel:
        """确定整体质量等级"""
        if not issues:
            return QualityLevel.EXCELLENT
        
        critical_count = len([i for i in issues if i.severity == QualityLevel.CRITICAL])
        poor_count = len([i for i in issues if i.severity == QualityLevel.POOR])
        
        if critical_count > 0:
            return QualityLevel.CRITICAL
        elif poor_count > total_records * 0.1:
            return QualityLevel.POOR
        elif poor_count > 0:
            return QualityLevel.FAIR
        else:
            return QualityLevel.GOOD
    
    def _generate_recommendations(self, issues: List[QualityIssue]) -> List[str]:
        """生成改进建议"""
        recommendations = []
        issue_types = set(issue.issue_type for issue in issues)
        
        if QualityIssueType.MISSING_DATA in issue_types:
            recommendations.append("实施数据完整性检查和填充策略")
        
        if QualityIssueType.DUPLICATE_DATA in issue_types:
            recommendations.append("建立重复数据检测和清理流程")
        
        if QualityIssueType.OUTLIER in issue_types:
            recommendations.append("设置异常值检测和处理机制")
        
        if QualityIssueType.STALE_DATA in issue_types:
            recommendations.append("建立数据更新和时效性监控")
        
        if QualityIssueType.INCONSISTENT in issue_types:
            recommendations.append("标准化数据格式和验证规则")
        
        return recommendations
    
    def _calculate_metrics(self, data: Any, issues: List[QualityIssue]) -> Dict[str, Any]:
        """计算质量指标"""
        total_records = self._count_records(data)
        
        metrics = {
            "total_issues": len(issues),
            "critical_issues": len([i for i in issues if i.severity == QualityLevel.CRITICAL]),
            "poor_issues": len([i for i in issues if i.severity == QualityLevel.POOR]),
            "fair_issues": len([i for i in issues if i.severity == QualityLevel.FAIR]),
            "issue_rate": len(issues) / max(total_records, 1),
            "completeness_rate": 1.0 - len([i for i in issues if i.issue_type == QualityIssueType.MISSING_DATA]) / max(total_records, 1),
            "consistency_rate": 1.0 - len([i for i in issues if i.issue_type == QualityIssueType.INCONSISTENT]) / max(total_records, 1),
            "uniqueness_rate": 1.0 - len([i for i in issues if i.issue_type == QualityIssueType.DUPLICATE_DATA]) / max(total_records, 1)
        }
        
        return metrics


class DataValidator:
    """
    数据验证器
    
    提供数据验证功能，确保数据符合预定义的规则和约束。
    """
    
    def __init__(self, validation_rules: Optional[Dict[str, Any]] = None):
        """
        初始化数据验证器
        
        Args:
            validation_rules: 验证规则
        """
        self.validation_rules = validation_rules or {}
        self.errors = []
    
    def validate(self, data: Any, rules: Optional[Dict[str, Any]] = None) -> Tuple[bool, List[str]]:
        """
        验证数据
        
        Args:
            data: 要验证的数据
            rules: 验证规则（可选，覆盖默认规则）
            
        Returns:
            (是否通过验证, 错误列表)
        """
        self.errors = []
        validation_rules = rules or self.validation_rules
        
        if isinstance(data, pd.DataFrame):
            return self._validate_dataframe(data, validation_rules)
        elif isinstance(data, dict):
            return self._validate_dict(data, validation_rules)
        elif isinstance(data, list):
            return self._validate_list(data, validation_rules)
        else:
            return True, []
    
    def _validate_dataframe(self, df: pd.DataFrame, rules: Dict[str, Any]) -> Tuple[bool, List[str]]:
        """验证DataFrame"""
        # 检查必需列
        required_columns = rules.get("required_columns", [])
        for col in required_columns:
            if col not in df.columns:
                self.errors.append(f"Missing required column: {col}")
        
        # 检查数据类型
        column_types = rules.get("column_types", {})
        for col, expected_type in column_types.items():
            if col in df.columns:
                if not pd.api.types.is_dtype_equal(df[col].dtype, expected_type):
                    self.errors.append(f"Column {col} has incorrect type: expected {expected_type}, got {df[col].dtype}")
        
        # 检查数值范围
        value_ranges = rules.get("value_ranges", {})
        for col, range_rule in value_ranges.items():
            if col in df.columns and pd.api.types.is_numeric_dtype(df[col]):
                min_val = range_rule.get("min")
                max_val = range_rule.get("max")
                
                if min_val is not None and (df[col] < min_val).any():
                    self.errors.append(f"Column {col} has values below minimum {min_val}")
                
                if max_val is not None and (df[col] > max_val).any():
                    self.errors.append(f"Column {col} has values above maximum {max_val}")
        
        return len(self.errors) == 0, self.errors
    
    def _validate_dict(self, data: dict, rules: Dict[str, Any]) -> Tuple[bool, List[str]]:
        """验证字典"""
        # 检查必需字段
        required_fields = rules.get("required_fields", [])
        for field in required_fields:
            if field not in data:
                self.errors.append(f"Missing required field: {field}")
            elif data[field] is None:
                self.errors.append(f"Field {field} cannot be null")
        
        # 检查字段类型
        field_types = rules.get("field_types", {})
        for field, expected_type in field_types.items():
            if field in data and data[field] is not None:
                if not isinstance(data[field], expected_type):
                    self.errors.append(f"Field {field} has incorrect type: expected {expected_type.__name__}, got {type(data[field]).__name__}")
        
        return len(self.errors) == 0, self.errors
    
    def _validate_list(self, data: list, rules: Dict[str, Any]) -> Tuple[bool, List[str]]:
        """验证列表"""
        # 检查最小长度
        min_length = rules.get("min_length")
        if min_length is not None and len(data) < min_length:
            self.errors.append(f"List length {len(data)} is below minimum {min_length}")
        
        # 检查最大长度
        max_length = rules.get("max_length")
        if max_length is not None and len(data) > max_length:
            self.errors.append(f"List length {len(data)} is above maximum {max_length}")
        
        # 检查元素类型
        element_type = rules.get("element_type")
        if element_type is not None:
            for i, item in enumerate(data):
                if not isinstance(item, element_type):
                    self.errors.append(f"Element at index {i} has incorrect type: expected {element_type.__name__}, got {type(item).__name__}")
        
        return len(self.errors) == 0, self.errors


class DataCleaner:
    """
    数据清洗器
    
    提供数据清洗和预处理功能。
    """
    
    def __init__(self, cleaning_config: Optional[Dict[str, Any]] = None):
        """
        初始化数据清洗器
        
        Args:
            cleaning_config: 清洗配置
        """
        self.cleaning_config = cleaning_config or {}
        self.cleaning_log = []
    
    def clean(self, data: Any, config: Optional[Dict[str, Any]] = None) -> Any:
        """
        清洗数据
        
        Args:
            data: 要清洗的数据
            config: 清洗配置（可选，覆盖默认配置）
            
        Returns:
            清洗后的数据
        """
        self.cleaning_log = []
        cleaning_config = config or self.cleaning_config
        
        if isinstance(data, pd.DataFrame):
            return self._clean_dataframe(data, cleaning_config)
        elif isinstance(data, dict):
            return self._clean_dict(data, cleaning_config)
        elif isinstance(data, list):
            return self._clean_list(data, cleaning_config)
        else:
            return data
    
    def _clean_dataframe(self, df: pd.DataFrame, config: Dict[str, Any]) -> pd.DataFrame:
        """清洗DataFrame"""
        cleaned_df = df.copy()
        
        # 处理缺失值
        missing_strategy = config.get("missing_strategy", {})
        for column, strategy in missing_strategy.items():
            if column in cleaned_df.columns:
                if strategy == "drop":
                    before_count = len(cleaned_df)
                    cleaned_df = cleaned_df.dropna(subset=[column])
                    after_count = len(cleaned_df)
                    self.cleaning_log.append(f"Dropped {before_count - after_count} rows with missing {column}")
                
                elif strategy == "fill_mean" and pd.api.types.is_numeric_dtype(cleaned_df[column]):
                    mean_value = cleaned_df[column].mean()
                    missing_count = cleaned_df[column].isna().sum()
                    cleaned_df[column] = cleaned_df[column].fillna(mean_value)
                    self.cleaning_log.append(f"Filled {missing_count} missing values in {column} with mean {mean_value:.2f}")
                
                elif strategy == "fill_median" and pd.api.types.is_numeric_dtype(cleaned_df[column]):
                    median_value = cleaned_df[column].median()
                    missing_count = cleaned_df[column].isna().sum()
                    cleaned_df[column] = cleaned_df[column].fillna(median_value)
                    self.cleaning_log.append(f"Filled {missing_count} missing values in {column} with median {median_value:.2f}")
                
                elif strategy == "fill_mode":
                    mode_value = cleaned_df[column].mode().iloc[0] if not cleaned_df[column].mode().empty else None
                    if mode_value is not None:
                        missing_count = cleaned_df[column].isna().sum()
                        cleaned_df[column] = cleaned_df[column].fillna(mode_value)
                        self.cleaning_log.append(f"Filled {missing_count} missing values in {column} with mode {mode_value}")
                
                elif isinstance(strategy, (str, int, float)):
                    missing_count = cleaned_df[column].isna().sum()
                    cleaned_df[column] = cleaned_df[column].fillna(strategy)
                    self.cleaning_log.append(f"Filled {missing_count} missing values in {column} with {strategy}")
        
        # 处理重复数据
        if config.get("remove_duplicates", False):
            duplicate_subset = config.get("duplicate_subset")
            before_count = len(cleaned_df)
            cleaned_df = cleaned_df.drop_duplicates(subset=duplicate_subset, keep='first')
            after_count = len(cleaned_df)
            self.cleaning_log.append(f"Removed {before_count - after_count} duplicate rows")
        
        # 处理异常值
        outlier_strategy = config.get("outlier_strategy", {})
        for column, strategy in outlier_strategy.items():
            if column in cleaned_df.columns and pd.api.types.is_numeric_dtype(cleaned_df[column]):
                if strategy == "remove":
                    Q1 = cleaned_df[column].quantile(0.25)
                    Q3 = cleaned_df[column].quantile(0.75)
                    IQR = Q3 - Q1
                    lower_bound = Q1 - 1.5 * IQR
                    upper_bound = Q3 + 1.5 * IQR
                    
                    before_count = len(cleaned_df)
                    cleaned_df = cleaned_df[(cleaned_df[column] >= lower_bound) & (cleaned_df[column] <= upper_bound)]
                    after_count = len(cleaned_df)
                    self.cleaning_log.append(f"Removed {before_count - after_count} outliers from {column}")
                
                elif strategy == "cap":
                    Q1 = cleaned_df[column].quantile(0.25)
                    Q3 = cleaned_df[column].quantile(0.75)
                    IQR = Q3 - Q1
                    lower_bound = Q1 - 1.5 * IQR
                    upper_bound = Q3 + 1.5 * IQR
                    
                    outlier_count = ((cleaned_df[column] < lower_bound) | (cleaned_df[column] > upper_bound)).sum()
                    cleaned_df[column] = cleaned_df[column].clip(lower=lower_bound, upper=upper_bound)
                    self.cleaning_log.append(f"Capped {outlier_count} outliers in {column}")
        
        # 数据类型转换
        type_conversions = config.get("type_conversions", {})
        for column, target_type in type_conversions.items():
            if column in cleaned_df.columns:
                try:
                    cleaned_df[column] = cleaned_df[column].astype(target_type)
                    self.cleaning_log.append(f"Converted {column} to {target_type}")
                except Exception as e:
                    self.cleaning_log.append(f"Failed to convert {column} to {target_type}: {str(e)}")
        
        return cleaned_df
    
    def _clean_dict(self, data: dict, config: Dict[str, Any]) -> dict:
        """清洗字典"""
        cleaned_data = data.copy()
        
        # 移除空值
        if config.get("remove_null_values", False):
            cleaned_data = {k: v for k, v in cleaned_data.items() if v is not None}
            self.cleaning_log.append("Removed null values from dictionary")
        
        # 字段重命名
        field_mapping = config.get("field_mapping", {})
        for old_name, new_name in field_mapping.items():
            if old_name in cleaned_data:
                cleaned_data[new_name] = cleaned_data.pop(old_name)
                self.cleaning_log.append(f"Renamed field {old_name} to {new_name}")
        
        return cleaned_data
    
    def _clean_list(self, data: list, config: Dict[str, Any]) -> list:
        """清洗列表"""
        cleaned_data = data.copy()
        
        # 移除空值
        if config.get("remove_null_values", False):
            cleaned_data = [item for item in cleaned_data if item is not None]
            self.cleaning_log.append("Removed null values from list")
        
        # 去重
        if config.get("remove_duplicates", False):
            before_count = len(cleaned_data)
            cleaned_data = list(dict.fromkeys(cleaned_data))  # 保持顺序的去重
            after_count = len(cleaned_data)
            self.cleaning_log.append(f"Removed {before_count - after_count} duplicate items")
        
        return cleaned_data
    
    def get_cleaning_log(self) -> List[str]:
        """获取清洗日志"""
        return self.cleaning_log.copy()