import pandas as pd
import numpy as np
import json
import glob
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.font_manager import fontManager
import seaborn as sns
import pyarrow.parquet as pq
import gc
import psutil
import time
from tempfile import TemporaryDirectory
from collections import defaultdict
from datetime import datetime

# 配置全局设置
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'WenQuanYi Zen Hei'
mpl.rcParams['axes.unicode_minus'] = False

class OptimizedDataAnalyzer:
    def __init__(self, data_dir, initial_batch_size=1_000_000):
        self.data_dir = data_dir
        self.batch_size = initial_batch_size
        self.parquet_files = glob.glob(os.path.join(data_dir, "*.parquet"))
        self.temp_dir = TemporaryDirectory(prefix="analytics_temp_")
        
        # 初始化流式统计数据结构
        self.stats = {
            'missing_values': defaultdict(int),
            'outlier_counts': defaultdict(int),
            'gender_dist': defaultdict(int),
            'active_gender': defaultdict(int),
            'payment_dist': defaultdict(int),
            'age_dist': defaultdict(int),
            'high_value': {
                'total': 0,
                'gender': defaultdict(int),
                'countries': defaultdict(int),
                'categories': defaultdict(int),
                'income_sum': 0.0,
                'purchase_sum': 0.0
            },
            'total_records': 0
        }

    def _optimize_dtypes(self, df):
        """优化数据类型减少内存占用"""
        type_map = {
            'user_id': 'category',
            'gender': 'category',
            'country': 'category',
            'purchase_category': 'category',
            'payment_method': 'category',
            'age_group': 'category',
            'is_active': 'bool',
            'is_high_value': 'bool',
            'age': 'uint8',
            'income': 'float32',
            'purchase_amount': 'float32',
            'login_count': 'uint16'
        }
        return df.astype({col: dtype for col, dtype in type_map.items() if col in df})

    def _parse_json_fields(self, df):
        """优化JSON字段解析"""
        # 使用矢量化操作提取JSON字段
        try:
            df['purchase_history'] = df['purchase_history'].str.replace("'", '"')
            purchase_json = pd.json_normalize(df['purchase_history'].apply(json.loads))
            
            df['purchase_amount'] = purchase_json['avg_price'].astype('float32')
            df['purchase_category'] = purchase_json['categories'].astype('category')
            df['payment_method'] = purchase_json['payment_method'].astype('category')
            
            login_json = pd.json_normalize(df['login_history'].str.replace("'", '"').apply(json.loads))
            df['login_count'] = login_json['login_count'].astype('uint16')
        except Exception as e:
            print(f"JSON解析错误: {str(e)}")
            df['purchase_amount'] = 0.0
            df['purchase_category'] = 'unknown'
            df['payment_method'] = 'unknown'
            df['login_count'] = 0
        
        return df.drop(['purchase_history', 'login_history'], axis=1)

    def _clean_data(self, df):
        """数据清洗和特征工程"""
        # 处理缺失值
        df['income'] = df['income'].fillna(df['income'].median()).astype('float32')
        df['age'] = df['age'].fillna(df['age'].median()).astype('uint8')
        
        # 处理异常值
        df['age'] = df['age'].clip(0, 100)
        df['income'] = df['income'].clip(lower=0)
        
        # 分箱处理
        age_bins = [0, 18, 30, 45, 60, 100]
        df['age_group'] = pd.cut(df['age'], bins=age_bins,
                                labels=['未成年', '青年', '中年', '中老年', '老年'],
                                include_lowest=True).astype('category')
        
        df['income_rank'] = df['income'].rank(pct=True)
        df['purchase_rank'] = df['purchase_amount'].rank(pct=True)
        df['login_rank'] = df['login_count'].rank(pct=True)
        
        # 计算用户价值指标
        df['user_value'] = (df['income_rank'] * 0.4 + 
                           df['purchase_rank'] * 0.3 + 
                           df['login_rank'] * 0.3)

        high_value_threshold = df['user_value'].quantile(0.8)
        df['is_high_value'] = (df['user_value'] >= high_value_threshold).astype('bool')
        
        return df

    def _update_streaming_stats(self, df):
        """流式更新统计信息"""
        # 基础统计
        self.stats['total_records'] += len(df)
        
        # 缺失值统计
        missing = df.isnull().sum()
        for col, count in missing.items():
            self.stats['missing_values'][col] += count
        
        # 异常值检测（使用滑动阈值）
        numeric_cols = df.select_dtypes(include=np.number).columns
        for col in numeric_cols:
            q1 = df[col].quantile(0.25)
            q3 = df[col].quantile(0.75)
            iqr = q3 - q1
            lower = q1 - 1.5 * iqr
            upper = q3 + 1.5 * iqr
            outliers = ((df[col] < lower) | (df[col] > upper)).sum()
            self.stats['outlier_counts'][col] += outliers
        
        # 分布统计
        for gender, count in df['gender'].value_counts().items():
            self.stats['gender_dist'][gender] += count
            
        active_df = df[df['is_active']]
        for gender, count in active_df['gender'].value_counts().items():
            self.stats['active_gender'][gender] += count
            
        for method, count in df['payment_method'].value_counts().items():
            self.stats['payment_dist'][method] += count
            
        for age_group, count in df['age_group'].value_counts().items():
            self.stats['age_dist'][age_group] += count
        
        # 高价值用户统计
        high_value = df[df['is_high_value']]
        self.stats['high_value']['total'] += len(high_value)
        self.stats['high_value']['income_sum'] += high_value['income'].sum()
        self.stats['high_value']['purchase_sum'] += high_value['purchase_amount'].sum()
        
        for gender, count in high_value['gender'].value_counts().items():
            self.stats['high_value']['gender'][gender] += count
        for country, count in high_value['country'].value_counts().items():
            self.stats['high_value']['countries'][country] += count
        for category, count in high_value['purchase_category'].value_counts().items():
            self.stats['high_value']['categories'][category] += count

    def _process_batch(self, file_path):
        """带内存监控的分批处理"""
        parquet_file = pq.ParquetFile(file_path)
        mem_history = []
        
        for i, batch in enumerate(parquet_file.iter_batches(batch_size=self.batch_size)):
            # 动态调整批次大小
            if len(mem_history) >= 3:
                avg_mem = np.mean(mem_history[-3:])
                free_mem = psutil.virtual_memory().available
                
                if avg_mem > 0.7 * free_mem:
                    self.batch_size = max(int(self.batch_size * 0.8), 100_000)
                    print(f"内存压力较大，调整批次大小为 {self.batch_size}")
                elif avg_mem < 0.3 * free_mem:
                    self.batch_size = min(int(self.batch_size * 1.2), 5_000_000)
            
            try:
                df = batch.to_pandas()
                df = self._parse_json_fields(df)
                df = self._clean_data(df)
                df = self._optimize_dtypes(df)
                
                # 更新统计信息
                self._update_streaming_stats(df)
                
                # 保存中间结果到临时文件
                temp_path = os.path.join(self.temp_dir.name, 
                                       f"{os.path.basename(file_path)}_{i}.feather")
                df.to_feather(temp_path)
                
                # 监控内存使用
                mem_usage = df.memory_usage().sum() / 1024**2  # MB
                mem_history.append(mem_usage)
                
            finally:
                del df
                gc.collect()

    def analyze(self):
        """执行分析流程"""
        try:
            for file in self.parquet_files:
                print(f"正在处理文件: {os.path.basename(file)}")
                self._process_batch(file)
            
            self._generate_report()
            self._generate_visualizations()
        finally:
            self.temp_dir.cleanup()
            print("临时文件已清理")

    def _generate_report(self):
        """生成分析报告"""
        report = []
        
        # 数据质量报告
        report.append("="*40 + " 数据质量报告 " + "="*40)
        report.append(f"总处理记录数: {self.stats['total_records']:,}")
        report.append("\n缺失值统计:")
        for col, count in self.stats['missing_values'].items():
            rate = count / self.stats['total_records'] * 100
            report.append(f"{col}: {count:,} ({rate:.2f}%)")
        
        report.append("\n异常值统计:")
        for col, count in self.stats['outlier_counts'].items():
            rate = count / self.stats['total_records'] * 100
            report.append(f"{col}: {count:,} ({rate:.2f}%)")
        
        # 用户画像报告
        report.append("\n" + "="*40 + " 用户画像报告 " + "="*40)
        report.append("性别分布:")
        for gender, count in self.stats['gender_dist'].items():
            report.append(f"{gender}: {count:,} ({(count/self.stats['total_records']*100):.1f}%)")
        
        report.append("\n活跃用户性别分布:")
        total_active = sum(self.stats['active_gender'].values())
        for gender, count in self.stats['active_gender'].items():
            report.append(f"{gender}: {count:,} ({(count/total_active*100 if total_active>0 else 0):.1f}%)")
        
        # 高价值用户报告
        report.append("\n" + "="*40 + " 高价值用户分析 " + "="*40)
        hv_total = self.stats['high_value']['total']
        report.append(f"高价值用户总数: {hv_total:,} ({(hv_total/self.stats['total_records']*100):.1f}%)")
        if hv_total > 0:
            avg_income = self.stats['high_value']['income_sum'] / hv_total
            avg_purchase = self.stats['high_value']['purchase_sum'] / hv_total
            report.append(f"平均收入: {avg_income:.2f}")
            report.append(f"平均消费金额: {avg_purchase:.2f}")
            
            top_countries = sorted(self.stats['high_value']['countries'].items(), 
                                 key=lambda x: x[1], reverse=True)[:3]
            report.append("\nTop 3国家分布:")
            for country, count in top_countries:
                report.append(f"{country}: {count:,}")
            
            top_categories = sorted(self.stats['high_value']['categories'].items(),
                                  key=lambda x: x[1], reverse=True)[:3]
            report.append("\nTop 3消费品类:")
            for category, count in top_categories:
                report.append(f"{category}: {count:,}")

        # 保存报告
        with open('./30Greport/analysis_report.txt', 'w', encoding='utf-8') as f:
            f.write("\n".join(report))

    def _generate_visualizations(self):
        """生成可视化图表"""
        plt.figure(figsize=(12, 6))
        
        # 性别分布饼图
        plt.subplot(2, 2, 1)
        plt.pie(self.stats['gender_dist'].values(), 
                labels=self.stats['gender_dist'].keys(),
                autopct='%1.1f%%')
        plt.title('用户性别分布')
        
        # 支付方式分布
        plt.subplot(2, 2, 2)
        payment_data = sorted(self.stats['payment_dist'].items(), 
                            key=lambda x: x[1], reverse=True)[:5]
        plt.bar([x[0] for x in payment_data], [x[1] for x in payment_data])
        plt.title('支付方式TOP5')
        plt.xticks(rotation=45)
        
        # 年龄分布
        plt.subplot(2, 2, 3)
        age_groups = ['未成年', '青年', '中年', '中老年', '老年']
        age_counts = [self.stats['age_dist'].get(g, 0) for g in age_groups]
        plt.bar(age_groups, age_counts)
        plt.title('年龄分布')
        
        # 高价值用户国家分布
        plt.subplot(2, 2, 4)
        countries = sorted(self.stats['high_value']['countries'].items(),
                         key=lambda x: x[1], reverse=True)[:5]
        plt.barh([x[0] for x in countries], [x[1] for x in countries])
        plt.title('高价值用户国家分布TOP5')
        
        plt.tight_layout()
        plt.savefig('data_analysis.png', dpi=300, bbox_inches='tight')
        plt.close()

if __name__ == "__main__":
    analyzer = OptimizedDataAnalyzer("data/30G_data_new")
    print("开始分析数据...", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
    analyzer.analyze()
    print("分析完成！", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))