"""
数据分析模块
对标注后的数据进行统计分析和挖掘
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from typing import Dict, List, Any, Optional, Tuple
import logging
from datetime import datetime
import json
from collections import Counter
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS']
plt.rcParams['axes.unicode_minus'] = False

logger = logging.getLogger(__name__)

class DataAnalyzer:
    """数据分析器"""
    
    def __init__(self, labeled_data: pd.DataFrame):
        """
        初始化数据分析器
        
        Args:
            labeled_data: 标注后的数据
        """
        self.data = labeled_data
        self.analysis_results = {}
        self.figures = {}
        
    def basic_statistics(self) -> Dict[str, Any]:
        """
        基础统计分析
        
        Returns:
            基础统计结果
        """
        logger.info("开始基础统计分析...")
        
        stats = {
            'total_records': len(self.data),
            'column_count': len(self.data.columns),
            'data_types': self.data.dtypes.to_dict(),
            'missing_values': self.data.isnull().sum().to_dict(),
            'basic_stats': {}
        }
        
        # 数值型列的统计
        numeric_cols = self.data.select_dtypes(include=[np.number]).columns
        if len(numeric_cols) > 0:
            stats['numeric_summary'] = self.data[numeric_cols].describe().to_dict()
        
        # 分类型列的统计
        categorical_cols = self.data.select_dtypes(include=['object']).columns
        if len(categorical_cols) > 0:
            stats['categorical_summary'] = {}
            for col in categorical_cols:
                if col not in ['labeling_result', 'labels', 'reasoning']:
                    stats['categorical_summary'][col] = {
                        'unique_count': self.data[col].nunique(),
                        'top_values': self.data[col].value_counts().head(10).to_dict()
                    }
        
        self.analysis_results['basic_statistics'] = stats
        logger.info("基础统计分析完成")
        
        return stats
    
    def label_analysis(self) -> Dict[str, Any]:
        """
        标注结果分析
        
        Returns:
            标注结果分析
        """
        logger.info("开始标注结果分析...")
        
        if 'labels' not in self.data.columns:
            logger.warning("未找到标注结果列")
            return {}
        
        # 提取标注结果
        label_data = []
        for idx, row in self.data.iterrows():
            try:
                if isinstance(row['labels'], str):
                    labels = json.loads(row['labels'])
                else:
                    labels = row['labels']
                
                if isinstance(labels, dict):
                    label_data.append(labels)
                else:
                    logger.warning(f"第 {idx} 行标注结果格式异常")
                    
            except Exception as e:
                logger.warning(f"第 {idx} 行标注结果解析失败: {str(e)}")
                continue
        
        if not label_data:
            logger.warning("未找到有效的标注结果")
            return {}
        
        # 分析标注结果
        analysis = {
            'total_labeled': len(label_data),
            'label_distribution': {},
            'confidence_analysis': {},
            'category_analysis': {},
            'sentiment_analysis': {},
            'quality_analysis': {}
        }
        
        # 置信度分析
        if 'confidence' in self.data.columns:
            confidences = self.data['confidence'].dropna()
            if len(confidences) > 0:
                analysis['confidence_analysis'] = {
                    'mean': float(confidences.mean()),
                    'std': float(confidences.std()),
                    'min': float(confidences.min()),
                    'max': float(confidences.max()),
                    'distribution': {
                        'high_confidence': len(confidences[confidences >= 0.8]),
                        'medium_confidence': len(confidences[(confidences >= 0.5) & (confidences < 0.8)]),
                        'low_confidence': len(confidences[confidences < 0.5])
                    }
                }
        
        # 类别分析
        categories = []
        sentiments = []
        quality_scores = []
        all_tags = []
        
        for labels in label_data:
            if 'category' in labels:
                categories.append(labels['category'])
            if 'sentiment' in labels:
                sentiments.append(labels['sentiment'])
            if 'quality_score' in labels:
                quality_scores.append(labels['quality_score'])
            if 'tags' in labels and isinstance(labels['tags'], list):
                all_tags.extend(labels['tags'])
        
        # 类别分布
        if categories:
            category_counts = Counter(categories)
            analysis['category_analysis'] = {
                'total_categories': len(category_counts),
                'distribution': dict(category_counts),
                'most_common': category_counts.most_common(5)
            }
        
        # 情感分析
        if sentiments:
            sentiment_counts = Counter(sentiments)
            analysis['sentiment_analysis'] = {
                'distribution': dict(sentiment_counts),
                'most_common': sentiment_counts.most_common(3)
            }
        
        # 质量分析
        if quality_scores:
            quality_array = np.array(quality_scores)
            analysis['quality_analysis'] = {
                'mean_score': float(quality_array.mean()),
                'std_score': float(quality_array.std()),
                'score_distribution': {
                    'high_quality': len(quality_array[quality_array >= 8]),
                    'medium_quality': len(quality_array[(quality_array >= 5) & (quality_array < 8)]),
                    'low_quality': len(quality_array[quality_array < 5])
                }
            }
        
        # 标签分析
        if all_tags:
            tag_counts = Counter(all_tags)
            analysis['tag_analysis'] = {
                'total_tags': len(tag_counts),
                'distribution': dict(tag_counts),
                'top_tags': tag_counts.most_common(10)
            }
        
        self.analysis_results['label_analysis'] = analysis
        logger.info("标注结果分析完成")
        
        return analysis
    
    def correlation_analysis(self) -> Dict[str, Any]:
        """
        相关性分析
        
        Returns:
            相关性分析结果
        """
        logger.info("开始相关性分析...")
        
        # 选择数值型列
        numeric_cols = self.data.select_dtypes(include=[np.number]).columns
        if len(numeric_cols) < 2:
            logger.warning("数值型列不足，无法进行相关性分析")
            return {}
        
        # 计算相关系数
        correlation_matrix = self.data[numeric_cols].corr()
        
        # 找出强相关的特征对
        strong_correlations = []
        for i, col1 in enumerate(numeric_cols):
            for j, col2 in enumerate(numeric_cols):
                if i < j:  # 避免重复
                    corr_value = correlation_matrix.loc[col1, col2]
                    if abs(corr_value) > 0.5:  # 强相关阈值
                        strong_correlations.append({
                            'feature1': col1,
                            'feature2': col2,
                            'correlation': float(corr_value),
                            'strength': 'strong' if abs(corr_value) > 0.7 else 'moderate'
                        })
        
        analysis = {
            'correlation_matrix': correlation_matrix.to_dict(),
            'strong_correlations': strong_correlations,
            'highly_correlated_pairs': len([c for c in strong_correlations if abs(c['correlation']) > 0.7])
        }
        
        self.analysis_results['correlation_analysis'] = analysis
        logger.info("相关性分析完成")
        
        return analysis
    
    def trend_analysis(self) -> Dict[str, Any]:
        """
        趋势分析
        
        Returns:
            趋势分析结果
        """
        logger.info("开始趋势分析...")
        
        # 查找时间列
        time_columns = []
        for col in self.data.columns:
            if 'time' in col.lower() or 'date' in col.lower():
                time_columns.append(col)
        
        if not time_columns:
            logger.warning("未找到时间列，无法进行趋势分析")
            return {}
        
        analysis = {}
        
        for time_col in time_columns:
            try:
                # 尝试转换为时间格式
                self.data[time_col] = pd.to_datetime(self.data[time_col])
                
                # 按时间分组统计
                time_grouped = self.data.groupby(pd.Grouper(key=time_col, freq='D')).size()
                
                # 计算趋势
                if len(time_grouped) > 1:
                    trend_slope = np.polyfit(range(len(time_grouped)), time_grouped.values, 1)[0]
                    
                    analysis[time_col] = {
                        'daily_counts': time_grouped.to_dict(),
                        'trend_slope': float(trend_slope),
                        'trend_direction': 'increasing' if trend_slope > 0 else 'decreasing',
                        'date_range': {
                            'start': time_grouped.index.min().isoformat(),
                            'end': time_grouped.index.max().isoformat()
                        }
                    }
                
            except Exception as e:
                logger.warning(f"时间列 {time_col} 处理失败: {str(e)}")
                continue
        
        self.analysis_results['trend_analysis'] = analysis
        logger.info("趋势分析完成")
        
        return analysis
    
    def outlier_detection(self) -> Dict[str, Any]:
        """
        异常值检测
        
        Returns:
            异常值检测结果
        """
        logger.info("开始异常值检测...")
        
        numeric_cols = self.data.select_dtypes(include=[np.number]).columns
        if len(numeric_cols) == 0:
            logger.warning("未找到数值型列，无法进行异常值检测")
            return {}
        
        outliers = {}
        
        for col in numeric_cols:
            # 使用IQR方法检测异常值
            Q1 = self.data[col].quantile(0.25)
            Q3 = self.data[col].quantile(0.75)
            IQR = Q3 - Q1
            
            lower_bound = Q1 - 1.5 * IQR
            upper_bound = Q3 + 1.5 * IQR
            
            outlier_mask = (self.data[col] < lower_bound) | (self.data[col] > upper_bound)
            outlier_count = outlier_mask.sum()
            
            if outlier_count > 0:
                outliers[col] = {
                    'count': int(outlier_count),
                    'percentage': float(outlier_count / len(self.data) * 100),
                    'bounds': {
                        'lower': float(lower_bound),
                        'upper': float(upper_bound)
                    },
                    'outlier_values': self.data[col][outlier_mask].tolist()
                }
        
        analysis = {
            'outlier_columns': list(outliers.keys()),
            'total_outlier_columns': len(outliers),
            'outlier_details': outliers
        }
        
        self.analysis_results['outlier_detection'] = analysis
        logger.info("异常值检测完成")
        
        return analysis
    
    def generate_visualizations(self, output_dir: str = 'visualizations') -> Dict[str, str]:
        """
        生成可视化图表
        
        Args:
            output_dir: 输出目录
            
        Returns:
            生成的图表路径
        """
        logger.info("开始生成可视化图表...")
        
        import os
        os.makedirs(output_dir, exist_ok=True)
        
        figure_paths = {}
        
        # 1. 基础统计图表
        if 'basic_statistics' in self.analysis_results:
            # 缺失值图表
            missing_data = self.analysis_results['basic_statistics']['missing_values']
            if any(missing_data.values()):
                plt.figure(figsize=(12, 6))
                plt.bar(missing_data.keys(), missing_data.values())
                plt.title('缺失值统计', fontsize=14, fontweight='bold')
                plt.xlabel('列名')
                plt.ylabel('缺失值数量')
                plt.xticks(rotation=45)
                plt.tight_layout()
                
                path = os.path.join(output_dir, 'missing_values.png')
                plt.savefig(path, dpi=300, bbox_inches='tight')
                plt.close()
                figure_paths['missing_values'] = path
        
        # 2. 标注结果图表
        if 'label_analysis' in self.analysis_results:
            label_analysis = self.analysis_results['label_analysis']
            
            # 类别分布
            if 'category_analysis' in label_analysis and label_analysis['category_analysis']:
                categories = label_analysis['category_analysis']['distribution']
                plt.figure(figsize=(10, 6))
                plt.pie(categories.values(), labels=categories.keys(), autopct='%1.1f%%')
                plt.title('类别分布', fontsize=14, fontweight='bold')
                
                path = os.path.join(output_dir, 'category_distribution.png')
                plt.savefig(path, dpi=300, bbox_inches='tight')
                plt.close()
                figure_paths['category_distribution'] = path
            
            # 置信度分布
            if 'confidence_analysis' in label_analysis and label_analysis['confidence_analysis']:
                conf_dist = label_analysis['confidence_analysis']['distribution']
                plt.figure(figsize=(8, 6))
                plt.bar(conf_dist.keys(), conf_dist.values())
                plt.title('置信度分布', fontsize=14, fontweight='bold')
                plt.xlabel('置信度等级')
                plt.ylabel('数量')
                
                path = os.path.join(output_dir, 'confidence_distribution.png')
                plt.savefig(path, dpi=300, bbox_inches='tight')
                plt.close()
                figure_paths['confidence_distribution'] = path
        
        # 3. 相关性热力图
        if 'correlation_analysis' in self.analysis_results:
            corr_matrix = pd.DataFrame(self.analysis_results['correlation_analysis']['correlation_matrix'])
            if len(corr_matrix) > 1:
                plt.figure(figsize=(10, 8))
                sns.heatmap(corr_matrix, annot=True, cmap='coolwarm', center=0)
                plt.title('特征相关性热力图', fontsize=14, fontweight='bold')
                
                path = os.path.join(output_dir, 'correlation_heatmap.png')
                plt.savefig(path, dpi=300, bbox_inches='tight')
                plt.close()
                figure_paths['correlation_heatmap'] = path
        
        self.figures = figure_paths
        logger.info(f"可视化图表生成完成，共生成 {len(figure_paths)} 个图表")
        
        return figure_paths
    
    def run_complete_analysis(self, output_dir: str = 'analysis_output') -> Dict[str, Any]:
        """
        运行完整的数据分析流程
        
        Args:
            output_dir: 输出目录
            
        Returns:
            完整的分析结果
        """
        logger.info("开始完整数据分析流程...")
        
        # 运行各项分析
        self.basic_statistics()
        self.label_analysis()
        self.correlation_analysis()
        self.trend_analysis()
        self.outlier_detection()
        
        # 生成可视化
        self.generate_visualizations(output_dir)
        
        # 整合结果
        complete_results = {
            'analysis_timestamp': datetime.now().isoformat(),
            'data_summary': {
                'total_records': len(self.data),
                'total_features': len(self.data.columns)
            },
            'analysis_results': self.analysis_results,
            'visualizations': self.figures
        }
        
        logger.info("完整数据分析流程结束")
        
        return complete_results

# 使用示例
if __name__ == "__main__":
    # 配置日志
    logging.basicConfig(level=logging.INFO)
    
    # 创建示例数据
    sample_data = pd.DataFrame({
        'text': ['这是一条正面评论', '这是一条负面评论', '这是一条中性评论'],
        'source': ['用户A', '用户B', '用户C'],
        'timestamp': pd.to_datetime(['2024-01-01', '2024-01-02', '2024-01-03']),
        'labels': [
            '{"category": "正面", "sentiment": "positive", "quality_score": 8}',
            '{"category": "负面", "sentiment": "negative", "quality_score": 6}',
            '{"category": "中性", "sentiment": "neutral", "quality_score": 7}'
        ],
        'confidence': [0.9, 0.8, 0.7]
    })
    
    # 创建分析器
    analyzer = DataAnalyzer(sample_data)
    
    # 运行分析
    results = analyzer.run_complete_analysis()
    
    # 输出结果
    print(json.dumps(results, ensure_ascii=False, indent=2, default=str))
    
    print("数据分析完成!") 