import pandas as pd
import numpy as np
import json
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.font_manager import fontManager
import seaborn as sns
from datetime import datetime
import time
import warnings
import pyarrow.parquet as pq
warnings.filterwarnings('ignore')

# 配置显示选项和中文字体
pd.set_option('display.max_columns', None)
plt.style.use('seaborn')
font_path = "/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc"  # 根据实际路径修改
fontManager.addfont(font_path)
# 清除缓存后必须执行的配置
mpl.rcParams.update(mpl.rcParamsDefault)  # 重置默认配置

# 配置正黑字体
plt.rcParams['font.family'] = 'WenQuanYi Zen Hei'
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

class DataAnalyzer:
    def __init__(self, data_dir, batch_size=100000000):  # 减少批次大小到50万
        self.data_dir = data_dir
        self.batch_size = batch_size
        self.parquet_files = [
            'part-00000.parquet', 'part-00001.parquet',
            'part-00002.parquet', 'part-00003.parquet',
            'part-00004.parquet', 'part-00005.parquet',
            'part-00006.parquet', 'part-00007.parquet'
        ]
        
        # 初始化统计结果存储
        self.stats = {
            'missing_values': pd.DataFrame(),
            'outliers': pd.DataFrame(),
            'user_profiles': pd.DataFrame(),
            'processing_times': []
        }
    
    def parse_json_field(self, json_str):
        """解析JSON字段为字典"""
        try:
            return json.loads(json_str.replace("'", '"'))
        except:
            return {}
    
    def process_batch(self, df):
        """处理单个数据批次"""
        start_time = time.time()
        
        # 优化内存使用 - 转换数据类型
        df = df.astype({
            'age': 'int32',
            'income': 'float32',
            'is_active': 'bool'
        })
        
        # 1. 解析JSON字段
        df['purchase_history'] = df['purchase_history'].apply(self.parse_json_field)
        df['login_history'] = df['login_history'].apply(self.parse_json_field)
        
        # 2. 提取JSON中的特征
        df['purchase_amount'] = df['purchase_history'].apply(
            lambda x: x.get('avg_price', np.nan))
        df['purchase_category'] = df['purchase_history'].apply(
            lambda x: x.get('categories', ''))
        df['login_count'] = df['login_history'].apply(
            lambda x: x.get('login_count', 0))
        df['payment_method'] = df['purchase_history'].apply(
            lambda x: x.get('payment_method', ''))
        
        # 3. 数据预处理
        # 检查缺失值
        missing_values = df.isnull().sum()
        # 处理缺失值
        df['income'].fillna(df['income'].median(), inplace=True)
        df['age'].fillna(df['age'].median(), inplace=True)
        
        # 处理异常值
        # 1. 年龄处理：必须在0-100岁之间
        df['age'] = np.where(df['age'] < 0, 0,
                           np.where(df['age'] > 100, 100, df['age']))
        
        # 2. 收入处理：必须非负
        df['income'] = np.where(df['income'] < 0, 0, df['income'])
        
        # 3. IQR方法处理其他异常
        for col in ['income', 'age']:
            q1 = df[col].quantile(0.25)
            q3 = df[col].quantile(0.75)
            iqr = q3 - q1
            lower_bound = q1 - 1.5 * iqr
            upper_bound = q3 + 1.5 * iqr
            df[col] = np.where(df[col] < lower_bound, lower_bound, 
                              np.where(df[col] > upper_bound, upper_bound, df[col]))
        
        # 4. 数据质量分析
        
        numeric_cols = df.select_dtypes(include=[np.number]).columns
        outliers = df[numeric_cols].apply(
            lambda x: np.sum((x - x.mean()).abs() > 3 * x.std()))
        
        # 5. 用户画像分析
        df['age_group'] = pd.cut(df['age'], 
            bins=[0, 18, 30, 45, 60, 100],
            labels=['未成年', '青年', '中年', '中老年', '老年'])

        # 计算排名比例
        df['income_rank'] = df['income'].rank(pct=True)
        df['purchase_rank'] = df['purchase_amount'].rank(pct=True)
        df['login_rank'] = df['login_count'].rank(pct=True)
        
        # 计算用户价值指标
        df['user_value'] = (df['income_rank'] * 0.4 + 
                           df['purchase_rank'] * 0.3 + 
                           df['login_rank'] * 0.3)
        
        # 识别高价值用户(前20%)
        high_value_threshold = df['user_value'].quantile(0.8)
        df['is_high_value'] = df['user_value'] >= high_value_threshold
        
        # 分析高价值用户特征
        high_value_stats = {
            'count': df['is_high_value'].sum(),
            'avg_income': df[df['is_high_value']]['income'].mean(),
            'avg_purchase': df[df['is_high_value']]['purchase_amount'].mean(),
            'top_countries': df[df['is_high_value']]['country'].value_counts().head(3).to_dict(),
            'top_categories': df[df['is_high_value']]['purchase_category'].value_counts().head(3).to_dict()
        }
        
        # 存储统计结果
        batch_stats = {
            'missing_values': missing_values,
            'outliers': outliers,
            'processing_time': time.time() - start_time,
            'high_value_stats': high_value_stats
        }
        
        return df, batch_stats
    
    def analyze_data(self):
        """主分析函数"""
        for file in self.parquet_files:
            file_path = f"{self.data_dir}/{file}"
            print(f"Processing {file_path}...")
            
            # 使用pyarrow分批读取parquet文件
            parquet_file = pq.ParquetFile(file_path)
            for batch in parquet_file.iter_batches(batch_size=self.batch_size):
                df = batch.to_pandas()
                processed_batch, batch_stats = self.process_batch(df)
                
                # 更新全局统计
                self._update_stats(batch_stats)
                
                # 保存当前批次数据用于最终可视化
                self._save_batch_data(processed_batch)
        
        # 生成最终报告和可视化
        self._generate_report()
        self._visualize_all_data()
    
    def _update_stats(self, batch_stats):
        """更新全局统计信息"""
        # 合并缺失值统计
        if self.stats['missing_values'].empty:
            self.stats['missing_values'] = batch_stats['missing_values'].to_frame()
        else:
            # 使用concat和groupby替代add操作，避免fill_value问题
            combined = pd.concat([
                self.stats['missing_values'], 
                batch_stats['missing_values'].to_frame()
            ], axis=1)
            self.stats['missing_values'] = combined.sum(axis=1).to_frame()
        
        # 合并异常值统计
        if self.stats['outliers'].empty:
            self.stats['outliers'] = batch_stats['outliers'].to_frame()
        else:
            # 使用同样的方法处理异常值统计
            combined = pd.concat([
                self.stats['outliers'], 
                batch_stats['outliers'].to_frame()
            ], axis=1)
            self.stats['outliers'] = combined.sum(axis=1).to_frame()
        
        # 合并高价值用户统计
        if 'high_value_users' not in self.stats:
            self.stats['high_value_users'] = {
                'total': 0,
                'incomes': [],
                'purchases': [],
                'countries': {},
                'categories': {}
            }
        
        self.stats['high_value_users']['total'] += batch_stats['high_value_stats']['count']
        self.stats['high_value_users']['incomes'].append(batch_stats['high_value_stats']['avg_income'])
        self.stats['high_value_users']['purchases'].append(batch_stats['high_value_stats']['avg_purchase'])
        
        # 合并国家分布
        for country, count in batch_stats['high_value_stats']['top_countries'].items():
            self.stats['high_value_users']['countries'][country] = \
                self.stats['high_value_users']['countries'].get(country, 0) + count
        
        # 合并品类分布
        for category, count in batch_stats['high_value_stats']['top_categories'].items():
            self.stats['high_value_users']['categories'][category] = \
                self.stats['high_value_users']['categories'].get(category, 0) + count
        
        # 记录处理时间
        self.stats['processing_times'].append(batch_stats['processing_time'])
    
    def _save_batch_data(self, df):
        """保存批次数据用于最终可视化"""
        if not hasattr(self, 'all_data'):
            self.all_data = []
        self.all_data.append(df)
    
    def _visualize_all_data(self):
        """使用完整数据生成可视化图表"""
        if not hasattr(self, 'all_data') or not self.all_data:
            return
            
        df = pd.concat(self.all_data)
        
        # 1. 饼图：整体用户性别比例
        plt.figure(figsize=(10, 6))
        df['gender'].value_counts().plot(kind='pie', autopct='%1.1f%%')
        plt.title('整体用户性别比例')
        plt.ylabel('')
        plt.savefig('gender_distribution.png')
        plt.close()
        
        # 2. 饼图：活跃用户性别比例
        plt.figure(figsize=(10, 6))
        df[df['is_active']]['gender'].value_counts().plot(kind='pie', autopct='%1.1f%%')
        plt.title('活跃用户性别比例')
        plt.ylabel('')
        plt.savefig('active_gender_distribution.png')
        plt.close()
        
        # 3. 饼图：支付方式比例
        plt.figure(figsize=(10, 6))
        df['payment_method'].value_counts().plot(kind='pie', autopct='%1.1f%%')
        plt.title('支付方式比例')
        plt.ylabel('')
        plt.savefig('payment_method_pie.png')
        plt.close()
        
        # 4. 柱状图：年龄分布
        plt.figure(figsize=(12, 6))
        df['age_group'].value_counts().sort_index().plot(kind='bar')
        plt.title('年龄分布')
        plt.xlabel('年龄段')
        plt.ylabel('用户数量')
        plt.savefig('age_distribution_bar.png')
        plt.close()
        
        # 5. 箱型图：各年龄段不同性别的收入
        plt.figure(figsize=(14, 6))
        sns.boxplot(x='age_group', y='income', hue='gender', data=df)
        plt.title('各年龄段不同性别的收入分布')
        plt.xlabel('年龄段')
        plt.ylabel('收入')
        plt.savefig('age_gender_income_boxplot.png')
        plt.close()
        
        # 6. 折线图：注册用户数量变化（按月）
        if 'registration_date' in df.columns:
            plt.figure(figsize=(12, 6))
            df['registration_date'] = pd.to_datetime(df['registration_date'])
            monthly_reg = df.set_index('registration_date').resample('M').size()
            monthly_reg.plot(kind='line')
            plt.title('每月新增用户数量')
            plt.xlabel('日期')
            plt.ylabel('用户数量')
            plt.savefig('monthly_registration.png')
            plt.close()
        
        # 7. 饼图：高价值用户性别比例
        plt.figure(figsize=(10, 6))
        df[df['is_high_value']]['gender'].value_counts().plot(kind='pie', autopct='%1.1f%%')
        plt.title('高价值用户性别比例')
        plt.ylabel('')
        plt.savefig('high_value_gender.png')
        plt.close()

    def _generate_report(self):
        """生成分析报告"""
        report = []
        
        # 1. 数据质量报告
        total_records = len(pd.concat(self.all_data)) if hasattr(self, 'all_data') else 0
        
        # 缺失值统计
        missing_values = self.stats['missing_values']
        missing_rates = (missing_values / total_records * 100).round(2)
        
        # 异常值统计
        outliers = self.stats['outliers']
        outlier_rates = (outliers / total_records * 100).round(2)
        
        # 数据删除统计
        initial_count = total_records + self.stats.get('deleted_records', 0)
        deleted_count = self.stats.get('deleted_records', 0)
        deleted_rate = round(deleted_count / initial_count * 100, 2) if initial_count > 0 else 0
        
        report.append("=== 数据质量分析报告 ===")
        report.append(f"初始记录数: {initial_count:,}")
        report.append(f"处理后记录数: {total_records:,}")
        report.append(f"删除记录数: {deleted_count:,} (占比{deleted_rate}%)")
        
        report.append("\n缺失值统计:")
        report.append(missing_values.to_string())
        report.append("\n缺失值比例(%):")
        report.append(missing_rates.to_string())
        
        report.append("\n异常值统计:")
        report.append(outliers.to_string())
        report.append("\n异常值比例(%):")
        report.append(outlier_rates.to_string())
        
        # 2. 处理时间分析
        avg_time = np.mean(self.stats['processing_times'])
        report.append(f"\n平均每批次处理时间: {avg_time:.2f}秒")
        report.append(f"总处理时间: {sum(self.stats['processing_times']):.2f}秒")
        
        # 3. 用户画像分析
        df = pd.concat(self.all_data)
        report.append("\n=== 用户画像分析 ===")
        report.append(f"总用户数: {len(df):,}")
        report.append(f"活跃用户比例: {(df['is_active'].mean() * 100).round(2)}%")
        report.append("\n性别分布:")
        report.append(df['gender'].value_counts().to_string())
        
        # 4. 高价值用户分析
        high_value_df = df[df['is_high_value']]
        report.append("\n=== 高价值用户分析 ===")
        report.append(f"高价值用户数量: {len(high_value_df):,} (占比{(len(high_value_df)/len(df)*100):.2f}%)")
        report.append(f"平均收入: {high_value_df['income'].mean():,.2f}")
        report.append(f"平均消费金额: {high_value_df['purchase_amount'].mean():,.2f}")
        
        report.append("\n高价值用户国家分布(TOP3):")
        top_countries = high_value_df['country'].value_counts().head(3)
        report.append(top_countries.to_string())
        
        report.append("\n高价值用户消费品类(TOP3):")
        top_categories = high_value_df['purchase_category'].value_counts().head(3)
        report.append(top_categories.to_string())
        
        # 保存报告到文件
        with open('data_analysis_report.txt', 'w', encoding='utf-8') as f:
            f.write("\n".join(report))
        
        print("分析报告已生成: data_analysis_report.txt")

if __name__ == "__main__":
    analyzer = DataAnalyzer("data/10G_data_new")
    print("Starting data analysis...", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
    analyzer.analyze_data()
    print("Data analysis completed.", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))