# -*- coding: utf-8 -*-
"""
统计分析模块

提供全面的统计分析功能
"""

import pandas as pd
import numpy as np
from typing import Dict, List, Any, Optional, Tuple
from scipy import stats
from scipy.stats import chi2_contingency, pearsonr, spearmanr
from sklearn.preprocessing import LabelEncoder
try:
    from ..core.config import Config
    from ..utils.logger import LoggerMixin
except ImportError:
    # 如果相对导入失败，尝试绝对导入
    import sys
    import os
    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    from core.config import Config
    from utils.logger import LoggerMixin


class StatisticalAnalyzer(LoggerMixin):
    """统计分析器"""
    
    def __init__(self, config: Optional[Config] = None):
        super().__init__()
        self.config = config or Config()
        self.confidence_level = self.config.get('analysis.confidence_level', 0.95)
    
    def analyze(self, data: pd.DataFrame, features: Optional[List[str]] = None) -> Dict[str, Any]:
        """统一的分析接口
        
        Args:
            data: 输入数据
            features: 要分析的特征列表
            
        Returns:
            统计分析结果
        """
        try:
            result = {
                "success": True,
                "descriptive_statistics": self.descriptive_statistics(data, features),
                "correlation_analysis": self.correlation_analysis(data, features),
                "normality_testing": self.normality_testing(data, features)
            }
            
            self.log_info(f"统计分析完成，分析了 {len(data.columns)} 个特征")
            return result
            
        except Exception as e:
            self.log_error(f"统计分析失败: {str(e)}")
            return {"error": f"统计分析失败: {str(e)}"}
    
    def descriptive_statistics(self, data: pd.DataFrame, 
                             features: Optional[List[str]] = None) -> Dict[str, Any]:
        """描述性统计分析
        
        Args:
            data: 输入数据
            features: 要分析的特征列表
            
        Returns:
            描述性统计结果
        """
        try:
            if features is None:
                analysis_data = data
            else:
                missing_features = [f for f in features if f not in data.columns]
                if missing_features:
                    return {"error": f"特征不存在: {missing_features}"}
                analysis_data = data[features]
            
            result = {
                "success": True,
                "data_overview": self._get_data_overview(analysis_data),
                "numeric_statistics": self._analyze_numeric_features(analysis_data),
                "categorical_statistics": self._analyze_categorical_features(analysis_data),
                "missing_value_analysis": self._analyze_missing_values(analysis_data),
                "outlier_analysis": self._analyze_outliers(analysis_data)
            }
            
            self.log_info(f"描述性统计分析完成，分析了 {len(analysis_data.columns)} 个特征")
            return result
            
        except Exception as e:
            self.log_error(f"描述性统计分析失败: {str(e)}")
            return {"error": f"描述性统计分析失败: {str(e)}"}
    
    def correlation_analysis(self, data: pd.DataFrame, 
                           features: Optional[List[str]] = None,
                           method: str = 'pearson') -> Dict[str, Any]:
        """相关性分析
        
        Args:
            data: 输入数据
            features: 要分析的特征列表
            method: 相关性方法 ('pearson', 'spearman', 'kendall')
            
        Returns:
            相关性分析结果
        """
        try:
            # 选择数值特征
            if features is None:
                numeric_data = data.select_dtypes(include=[np.number])
            else:
                numeric_data = data[features].select_dtypes(include=[np.number])
            
            if numeric_data.empty:
                return {"error": "没有可用于相关性分析的数值特征"}
            
            if len(numeric_data.columns) < 2:
                return {"error": "至少需要2个数值特征进行相关性分析"}
            
            # 计算相关性矩阵
            correlation_matrix = numeric_data.corr(method=method)
            
            # 计算显著性检验
            significance_matrix = self._calculate_correlation_significance(numeric_data, method)
            
            # 找出强相关性
            strong_correlations = self._find_significant_correlations(
                correlation_matrix, significance_matrix
            )
            
            result = {
                "success": True,
                "method": method,
                "correlation_matrix": correlation_matrix.to_dict(),
                "significance_matrix": significance_matrix,
                "strong_correlations": strong_correlations,
                "summary": {
                    "features_analyzed": list(numeric_data.columns),
                    "total_pairs": len(strong_correlations),
                    "max_correlation": float(correlation_matrix.abs().max().max()),
                    "mean_correlation": float(correlation_matrix.abs().mean().mean())
                }
            }
            
            self.log_info(f"相关性分析完成，发现 {len(strong_correlations)} 对显著相关")
            return result
            
        except Exception as e:
            self.log_error(f"相关性分析失败: {str(e)}")
            return {"error": f"相关性分析失败: {str(e)}"}
    
    def hypothesis_testing(self, data: pd.DataFrame, 
                         test_type: str,
                         **kwargs) -> Dict[str, Any]:
        """假设检验
        
        Args:
            data: 输入数据
            test_type: 检验类型 ('ttest_1samp', 'ttest_ind', 'chi2', 'anova')
            **kwargs: 检验特定参数
            
        Returns:
            假设检验结果
        """
        try:
            if test_type == 'ttest_1samp':
                return self._one_sample_ttest(data, **kwargs)
            elif test_type == 'ttest_ind':
                return self._independent_ttest(data, **kwargs)
            elif test_type == 'chi2':
                return self._chi_square_test(data, **kwargs)
            elif test_type == 'anova':
                return self._anova_test(data, **kwargs)
            else:
                return {"error": f"不支持的检验类型: {test_type}"}
                
        except Exception as e:
            self.log_error(f"假设检验失败: {str(e)}")
            return {"error": f"假设检验失败: {str(e)}"}
    
    def normality_testing(self, data: pd.DataFrame, 
                        features: Optional[List[str]] = None) -> Dict[str, Any]:
        """正态性检验
        
        Args:
            data: 输入数据
            features: 要检验的特征列表
            
        Returns:
            正态性检验结果
        """
        try:
            # 选择数值特征
            if features is None:
                numeric_data = data.select_dtypes(include=[np.number])
            else:
                numeric_data = data[features].select_dtypes(include=[np.number])
            
            if numeric_data.empty:
                return {"error": "没有可用于正态性检验的数值特征"}
            
            results = {}
            
            for column in numeric_data.columns:
                feature_data = numeric_data[column].dropna()
                
                if len(feature_data) < 3:
                    results[column] = {"error": "样本量太小，无法进行正态性检验"}
                    continue
                
                # Shapiro-Wilk检验（适用于小样本）
                if len(feature_data) <= 5000:
                    shapiro_stat, shapiro_p = stats.shapiro(feature_data)
                    shapiro_result = {
                        "statistic": float(shapiro_stat),
                        "p_value": float(shapiro_p),
                        "is_normal": shapiro_p > (1 - self.confidence_level)
                    }
                else:
                    shapiro_result = {"note": "样本量过大，跳过Shapiro-Wilk检验"}
                
                # Kolmogorov-Smirnov检验
                ks_stat, ks_p = stats.kstest(feature_data, 'norm', 
                                            args=(feature_data.mean(), feature_data.std()))
                ks_result = {
                    "statistic": float(ks_stat),
                    "p_value": float(ks_p),
                    "is_normal": ks_p > (1 - self.confidence_level)
                }
                
                # Anderson-Darling检验
                ad_result = stats.anderson(feature_data, dist='norm')
                ad_critical_value = ad_result.critical_values[2]  # 5%显著性水平
                ad_is_normal = ad_result.statistic < ad_critical_value
                
                anderson_result = {
                    "statistic": float(ad_result.statistic),
                    "critical_value_5pct": float(ad_critical_value),
                    "is_normal": ad_is_normal
                }
                
                # 综合判断
                tests_passed = sum([
                    shapiro_result.get("is_normal", True),
                    ks_result["is_normal"],
                    anderson_result["is_normal"]
                ])
                
                results[column] = {
                    "shapiro_wilk": shapiro_result,
                    "kolmogorov_smirnov": ks_result,
                    "anderson_darling": anderson_result,
                    "overall_assessment": {
                        "tests_passed": tests_passed,
                        "total_tests": 3 if "note" not in shapiro_result else 2,
                        "likely_normal": tests_passed >= 2,
                        "recommendation": self._get_normality_recommendation(tests_passed)
                    }
                }
            
            summary = {
                "features_tested": list(results.keys()),
                "normal_features": [k for k, v in results.items() 
                                  if v.get("overall_assessment", {}).get("likely_normal", False)],
                "non_normal_features": [k for k, v in results.items() 
                                      if not v.get("overall_assessment", {}).get("likely_normal", True)]
            }
            
            result = {
                "success": True,
                "confidence_level": self.confidence_level,
                "results": results,
                "summary": summary
            }
            
            self.log_info(f"正态性检验完成，{len(summary['normal_features'])} 个特征符合正态分布")
            return result
            
        except Exception as e:
            self.log_error(f"正态性检验失败: {str(e)}")
            return {"error": f"正态性检验失败: {str(e)}"}
    
    def _get_data_overview(self, data: pd.DataFrame) -> Dict[str, Any]:
        """获取数据概览"""
        return {
            "shape": list(data.shape),
            "total_features": len(data.columns),
            "numeric_features": len(data.select_dtypes(include=[np.number]).columns),
            "categorical_features": len(data.select_dtypes(include=['object', 'category']).columns),
            "memory_usage_mb": float(data.memory_usage(deep=True).sum() / 1024 / 1024),
            "duplicate_rows": int(data.duplicated().sum())
        }
    
    def _analyze_numeric_features(self, data: pd.DataFrame) -> Dict[str, Any]:
        """分析数值特征"""
        numeric_data = data.select_dtypes(include=[np.number])
        
        if numeric_data.empty:
            return {"note": "没有数值特征"}
        
        results = {}
        
        for column in numeric_data.columns:
            feature_data = numeric_data[column]
            
            stats_dict = {
                "count": int(feature_data.count()),
                "mean": float(feature_data.mean()),
                "std": float(feature_data.std()),
                "min": float(feature_data.min()),
                "25%": float(feature_data.quantile(0.25)),
                "50%": float(feature_data.median()),
                "75%": float(feature_data.quantile(0.75)),
                "max": float(feature_data.max()),
                "skewness": float(feature_data.skew()),
                "kurtosis": float(feature_data.kurtosis()),
                "variance": float(feature_data.var()),
                "range": float(feature_data.max() - feature_data.min()),
                "iqr": float(feature_data.quantile(0.75) - feature_data.quantile(0.25)),
                "cv": float(feature_data.std() / feature_data.mean()) if feature_data.mean() != 0 else float('inf')
            }
            
            results[column] = stats_dict
        
        return results
    
    def _analyze_categorical_features(self, data: pd.DataFrame) -> Dict[str, Any]:
        """分析分类特征"""
        categorical_data = data.select_dtypes(include=['object', 'category'])
        
        if categorical_data.empty:
            return {"note": "没有分类特征"}
        
        results = {}
        
        for column in categorical_data.columns:
            feature_data = categorical_data[column]
            value_counts = feature_data.value_counts()
            
            stats_dict = {
                "count": int(feature_data.count()),
                "unique_count": int(feature_data.nunique()),
                "most_frequent": str(value_counts.index[0]) if len(value_counts) > 0 else None,
                "most_frequent_count": int(value_counts.iloc[0]) if len(value_counts) > 0 else 0,
                "most_frequent_percentage": float(value_counts.iloc[0] / len(feature_data) * 100) if len(value_counts) > 0 else 0,
                "entropy": float(stats.entropy(value_counts.values)),
                "top_5_values": {str(k): int(v) for k, v in value_counts.head(5).to_dict().items()}
            }
            
            results[column] = stats_dict
        
        return results
    
    def _analyze_missing_values(self, data: pd.DataFrame) -> Dict[str, Any]:
        """分析缺失值"""
        missing_counts = data.isnull().sum()
        missing_percentages = (missing_counts / len(data)) * 100
        
        missing_features = missing_counts[missing_counts > 0]
        
        return {
            "total_missing": int(missing_counts.sum()),
            "features_with_missing": len(missing_features),
            "missing_by_feature": {
                str(k): {
                    "count": int(v),
                    "percentage": float(missing_percentages[k])
                } for k, v in missing_features.to_dict().items()
            },
            "complete_rows": int(len(data) - data.isnull().any(axis=1).sum()),
            "complete_rows_percentage": float((len(data) - data.isnull().any(axis=1).sum()) / len(data) * 100)
        }
    
    def _analyze_outliers(self, data: pd.DataFrame) -> Dict[str, Any]:
        """分析异常值"""
        numeric_data = data.select_dtypes(include=[np.number])
        
        if numeric_data.empty:
            return {"note": "没有数值特征用于异常值分析"}
        
        results = {}
        
        for column in numeric_data.columns:
            feature_data = numeric_data[column].dropna()
            
            if len(feature_data) == 0:
                continue
            
            # IQR方法
            Q1 = feature_data.quantile(0.25)
            Q3 = feature_data.quantile(0.75)
            IQR = Q3 - Q1
            lower_bound = Q1 - 1.5 * IQR
            upper_bound = Q3 + 1.5 * IQR
            
            iqr_outliers = feature_data[(feature_data < lower_bound) | (feature_data > upper_bound)]
            
            # Z-score方法
            z_scores = np.abs(stats.zscore(feature_data))
            z_outliers = feature_data[z_scores > 3]
            
            results[column] = {
                "iqr_method": {
                    "outlier_count": len(iqr_outliers),
                    "outlier_percentage": float(len(iqr_outliers) / len(feature_data) * 100),
                    "lower_bound": float(lower_bound),
                    "upper_bound": float(upper_bound)
                },
                "zscore_method": {
                    "outlier_count": len(z_outliers),
                    "outlier_percentage": float(len(z_outliers) / len(feature_data) * 100),
                    "threshold": 3.0
                }
            }
        
        return results
    
    def _calculate_correlation_significance(self, data: pd.DataFrame, method: str) -> Dict[str, Dict[str, float]]:
        """计算相关性显著性"""
        significance_matrix = {}
        
        for col1 in data.columns:
            significance_matrix[col1] = {}
            for col2 in data.columns:
                if col1 == col2:
                    significance_matrix[col1][col2] = 0.0
                else:
                    try:
                        if method == 'pearson':
                            _, p_value = pearsonr(data[col1].dropna(), data[col2].dropna())
                        elif method == 'spearman':
                            _, p_value = spearmanr(data[col1].dropna(), data[col2].dropna())
                        else:
                            p_value = 1.0
                        
                        significance_matrix[col1][col2] = float(p_value)
                    except:
                        significance_matrix[col1][col2] = 1.0
        
        return significance_matrix
    
    def _find_significant_correlations(self, correlation_matrix: pd.DataFrame, 
                                     significance_matrix: Dict[str, Dict[str, float]],
                                     corr_threshold: float = 0.3,
                                     p_threshold: float = 0.05) -> List[Dict[str, Any]]:
        """找出显著相关性"""
        significant_correlations = []
        
        for i, col1 in enumerate(correlation_matrix.columns):
            for j, col2 in enumerate(correlation_matrix.columns):
                if i < j:  # 避免重复
                    corr_value = correlation_matrix.loc[col1, col2]
                    p_value = significance_matrix[col1][col2]
                    
                    if abs(corr_value) >= corr_threshold and p_value <= p_threshold:
                        significant_correlations.append({
                            "feature1": col1,
                            "feature2": col2,
                            "correlation": float(corr_value),
                            "p_value": float(p_value),
                            "strength": self._get_correlation_strength(abs(corr_value)),
                            "direction": "正相关" if corr_value > 0 else "负相关"
                        })
        
        return sorted(significant_correlations, key=lambda x: abs(x['correlation']), reverse=True)
    
    def _get_correlation_strength(self, abs_corr: float) -> str:
        """获取相关性强度描述"""
        if abs_corr >= 0.8:
            return "很强"
        elif abs_corr >= 0.6:
            return "强"
        elif abs_corr >= 0.4:
            return "中等"
        elif abs_corr >= 0.2:
            return "弱"
        else:
            return "很弱"
    
    def _get_normality_recommendation(self, tests_passed: int) -> str:
        """获取正态性检验建议"""
        if tests_passed >= 2:
            return "数据可能符合正态分布，可以使用参数检验"
        elif tests_passed == 1:
            return "数据正态性存疑，建议使用非参数检验或数据转换"
        else:
            return "数据不符合正态分布，建议使用非参数检验"
    
    def _one_sample_ttest(self, data: pd.DataFrame, feature: str, 
                         population_mean: float) -> Dict[str, Any]:
        """单样本t检验"""
        if feature not in data.columns:
            return {"error": f"特征 {feature} 不存在"}
        
        feature_data = data[feature].dropna()
        
        if len(feature_data) < 2:
            return {"error": "样本量太小，无法进行t检验"}
        
        t_stat, p_value = stats.ttest_1samp(feature_data, population_mean)
        
        return {
            "success": True,
            "test_type": "one_sample_ttest",
            "feature": feature,
            "sample_mean": float(feature_data.mean()),
            "population_mean": population_mean,
            "t_statistic": float(t_stat),
            "p_value": float(p_value),
            "significant": p_value < (1 - self.confidence_level),
            "confidence_level": self.confidence_level
        }
    
    def _independent_ttest(self, data: pd.DataFrame, feature: str, 
                          group_column: str) -> Dict[str, Any]:
        """独立样本t检验"""
        if feature not in data.columns or group_column not in data.columns:
            return {"error": "指定的特征或分组列不存在"}
        
        groups = data[group_column].unique()
        if len(groups) != 2:
            return {"error": "分组列必须恰好包含2个组别"}
        
        group1_data = data[data[group_column] == groups[0]][feature].dropna()
        group2_data = data[data[group_column] == groups[1]][feature].dropna()
        
        if len(group1_data) < 2 or len(group2_data) < 2:
            return {"error": "每组样本量至少需要2个"}
        
        t_stat, p_value = stats.ttest_ind(group1_data, group2_data)
        
        return {
            "success": True,
            "test_type": "independent_ttest",
            "feature": feature,
            "group_column": group_column,
            "group1": str(groups[0]),
            "group2": str(groups[1]),
            "group1_mean": float(group1_data.mean()),
            "group2_mean": float(group2_data.mean()),
            "t_statistic": float(t_stat),
            "p_value": float(p_value),
            "significant": p_value < (1 - self.confidence_level),
            "confidence_level": self.confidence_level
        }
    
    def _chi_square_test(self, data: pd.DataFrame, feature1: str, 
                        feature2: str) -> Dict[str, Any]:
        """卡方检验"""
        if feature1 not in data.columns or feature2 not in data.columns:
            return {"error": "指定的特征不存在"}
        
        # 创建交叉表
        contingency_table = pd.crosstab(data[feature1], data[feature2])
        
        if contingency_table.size == 0:
            return {"error": "交叉表为空"}
        
        chi2_stat, p_value, dof, expected = chi2_contingency(contingency_table)
        
        return {
            "success": True,
            "test_type": "chi_square",
            "feature1": feature1,
            "feature2": feature2,
            "chi2_statistic": float(chi2_stat),
            "p_value": float(p_value),
            "degrees_of_freedom": int(dof),
            "significant": p_value < (1 - self.confidence_level),
            "confidence_level": self.confidence_level,
            "contingency_table": contingency_table.to_dict()
        }
    
    def _anova_test(self, data: pd.DataFrame, feature: str, 
                   group_column: str) -> Dict[str, Any]:
        """方差分析"""
        if feature not in data.columns or group_column not in data.columns:
            return {"error": "指定的特征或分组列不存在"}
        
        groups = []
        group_names = []
        
        for group_name in data[group_column].unique():
            group_data = data[data[group_column] == group_name][feature].dropna()
            if len(group_data) > 0:
                groups.append(group_data)
                group_names.append(str(group_name))
        
        if len(groups) < 2:
            return {"error": "至少需要2个组进行方差分析"}
        
        f_stat, p_value = stats.f_oneway(*groups)
        
        group_stats = {}
        for i, (group_name, group_data) in enumerate(zip(group_names, groups)):
            group_stats[group_name] = {
                "count": len(group_data),
                "mean": float(group_data.mean()),
                "std": float(group_data.std())
            }
        
        return {
            "success": True,
            "test_type": "anova",
            "feature": feature,
            "group_column": group_column,
            "f_statistic": float(f_stat),
            "p_value": float(p_value),
            "significant": p_value < (1 - self.confidence_level),
            "confidence_level": self.confidence_level,
            "group_statistics": group_stats
        }