import pandas as pd
import os
import logging
from typing import Dict, Any

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger('DataStatistics')


class DataStatistics:
    """
    数据统计类，提供重复性检查和空白值统计功能
    """
    
    def __init__(self, results_dir: str):
        """
        初始化统计类
        
        Args:
            results_dir: 结果保存目录
        """
        self.results_dir = results_dir
        os.makedirs(results_dir, exist_ok=True)
    
    def check_duplicates(self, df: pd.DataFrame, file_name: str) -> Dict[str, Any]:
        """
        检查数据重复性并生成统计报告
        
        Args:
            df: 要检查的DataFrame
            file_name: 文件名（用于输出）
            
        Returns:
            Dict: 统计结果字典
        """
        logger.info(f"开始检查 {file_name} 的重复性...")
        
        result = {
            'total_rows': len(df),
            'unique_rows': 0,
            'duplicate_count_all': 0,
            'duplicate_count_id': None,
            'duplicate_details': {}
        }
        
        # 检查完全重复的行
        duplicated_rows = df.duplicated(keep=False)
        result['duplicate_count_all'] = duplicated_rows.sum()
        result['unique_rows'] = len(df) - result['duplicate_count_all']
        
        # 检查笔记ID重复
        if '笔记ID' in df.columns:
            id_duplicated = df.duplicated(subset=['笔记ID'], keep=False)
            result['duplicate_count_id'] = id_duplicated.sum()
            
            # 提取重复的笔记ID
            duplicated_ids = df[df.duplicated(subset=['笔记ID'], keep=False)]['笔记ID'].unique()
            result['duplicate_details']['duplicated_note_ids'] = duplicated_ids.tolist()
            result['duplicate_details']['duplicated_note_ids_count'] = len(duplicated_ids)
        
        # 保存重复性统计报告
        report_df = pd.DataFrame([{
            '统计项': '总行数',
            '数值': result['total_rows']
        }, {
            '统计项': '去重后行数',
            '数值': result['unique_rows']
        }, {
            '统计项': '完全重复行数',
            '数值': result['duplicate_count_all']
        }, {
            '统计项': '笔记ID重复行数',
            '数值': result['duplicate_count_id'] if result['duplicate_count_id'] is not None else 'N/A'
        }, {
            '统计项': '重复率',
            '数值': f"{(result['duplicate_count_all'] / result['total_rows'] * 100):.2f}%" if result['total_rows'] > 0 else '0%'
        }])
        
        report_path = os.path.join(self.results_dir, f'{file_name}_重复性统计.xlsx')
        report_df.to_excel(report_path, index=False, engine='openpyxl')
        logger.info(f"重复性统计报告已保存到 {report_path}")
        
        # 如果有重复数据，保存重复行样本
        if result['duplicate_count_all'] > 0:
            duplicate_samples = df[duplicated_rows]
            sample_path = os.path.join(self.results_dir, f'{file_name}_重复样本.xlsx')
            duplicate_samples.to_excel(sample_path, index=False, engine='openpyxl')
            logger.info(f"重复样本已保存到 {sample_path}")
        
        return result
    
    def check_blank_values(self, df: pd.DataFrame, file_name: str) -> Dict[str, Any]:
        """
        检查各字段空白值情况并生成统计报告
        
        Args:
            df: 要检查的DataFrame
            file_name: 文件名（用于输出）
            
        Returns:
            Dict: 统计结果字典
        """
        logger.info(f"开始检查 {file_name} 的空白值情况...")
        
        # 创建空白值统计结果
        blank_stats = []
        
        for column in df.columns:
            # 计算空白值数量
            blank_count = df[column].isna().sum() + (df[column] == '').sum()
            total_count = len(df)
            blank_percentage = (blank_count / total_count * 100) if total_count > 0 else 0
            
            blank_stats.append({
                '字段名': column,
                '非空值数量': total_count - blank_count,
                '空白值数量': blank_count,
                '空白值占比': f"{blank_percentage:.2f}%",
                '总记录数': total_count
            })
        
        # 转换为DataFrame并按空白值占比排序
        stats_df = pd.DataFrame(blank_stats)
        stats_df = stats_df.sort_values(by='空白值数量', ascending=False)
        
        # 保存空白值统计报告
        report_path = os.path.join(self.results_dir, f'{file_name}_空白值统计.xlsx')
        stats_df.to_excel(report_path, index=False, engine='openpyxl')
        logger.info(f"空白值统计报告已保存到 {report_path}")
        
        # 返回统计结果摘要
        result = {
            'total_columns': len(df.columns),
            'columns_with_blank': sum(1 for s in blank_stats if s['空白值数量'] > 0),
            'blank_statistics': blank_stats
        }
        
        return result
    
    def run_complete_analysis(self, df: pd.DataFrame, file_name: str) -> Dict[str, Any]:
        """
        运行完整的数据分析（重复性和空白值）
        
        Args:
            df: 要分析的DataFrame
            file_name: 文件名（用于输出）
            
        Returns:
            Dict: 包含所有统计结果的字典
        """
        logger.info(f"开始对 {file_name} 进行完整数据分析...")
        
        # 运行重复性检查
        duplicate_result = self.check_duplicates(df, file_name)
        
        # 运行空白值检查
        blank_result = self.check_blank_values(df, file_name)
        
        # 合并结果
        complete_result = {
            'file_name': file_name,
            'duplicate_analysis': duplicate_result,
            'blank_analysis': blank_result,
            'summary': {
                'total_rows': duplicate_result['total_rows'],
                'unique_rows': duplicate_result['unique_rows'],
                'duplicate_count': duplicate_result['duplicate_count_all'],
                'columns_with_blank': blank_result['columns_with_blank'],
                'total_columns': blank_result['total_columns']
            }
        }
        
        logger.info(f"{file_name} 数据分析完成！")
        return complete_result