"""
增强版模拟器类，支持MySQL和SQLite存储
"""
import pandas as pd
import numpy as np
import random
import json
import os
import sqlite3
from typing import List, Dict, Optional
from datetime import datetime, timedelta

# 数据库连接库
try:
    import pymysql
    MYSQL_AVAILABLE = True
except ImportError:
    MYSQL_AVAILABLE = False
    print("Warning: pymysql not available. MySQL functionality disabled.")

try:
    from .enhanced_models import (
        DistributionConfig, SimulatorConfig, UserGenerationParams, 
        UserBasicInfo, AgeBaseParams, GenderType, AgeGroup, TrendType,
        DatabaseConfig, UserRecord, UserRecordV2, GridInfo
    )
    from .enhanced_user import EnhancedUser
except ImportError:
    from enhanced_models import (
        DistributionConfig, SimulatorConfig, UserGenerationParams, 
        UserBasicInfo, AgeBaseParams, GenderType, AgeGroup, TrendType,
        DatabaseConfig, UserRecord, UserRecordV2, GridInfo
    )
    from enhanced_user import EnhancedUser


class EnhancedMobileDataSimulator:
    """增强版移动资费数据模拟器，支持完整的schema和数据库存储"""
    
    def __init__(self, config: SimulatorConfig):
        """
        初始化模拟器
        
        Args:
            config: 模拟器配置
        """
        self.config = config
        self.users: List[EnhancedUser] = []
        self.grid_mapping = {
            f"G{str(i).zfill(3)}": {
                "LAT": round(random.uniform(39.43, 41.05), 2),
                "LONG": round(random.uniform(115.25, 117.30), 2)
            } for i in range(1, 1000)
        }
        
        # 基站位置的经纬度
        self.base_mapping = {
            f"B{str(i).zfill(3)}": {
                "LAT": round(random.uniform(39.43, 41.05), 2),
                "LONG": round(random.uniform(115.25, 117.30), 2)
            } for i in range(1, 1000)
        }
        
        # 设置随机种子
        if config.random_seed is not None:
            np.random.seed(config.random_seed)
            random.seed(config.random_seed)
    
    @classmethod
    def create_default_config(cls) -> SimulatorConfig:
        """创建默认配置"""
        # 加载北京市区县数据
        beijing_districts = cls._load_beijing_districts()
        
        # 年龄段基础参数
        age_base_params = {
            AgeGroup.UNDER_18: AgeBaseParams(
                traffic=30, call=150, growth=0.07, decline=0.02, package=88
            ),
            AgeGroup.AGE_18_25: AgeBaseParams(
                traffic=60, call=200, growth=0.06, decline=0.015, package=128
            ),
            AgeGroup.AGE_26_30: AgeBaseParams(
                traffic=50, call=300, growth=0.05, decline=0.01, package=158
            ),
            AgeGroup.AGE_31_35: AgeBaseParams(
                traffic=45, call=350, growth=0.04, decline=0.008, package=198
            ),
            AgeGroup.AGE_35_45: AgeBaseParams(
                traffic=40, call=400, growth=0.03, decline=0.005, package=198
            ),
            AgeGroup.AGE_46_60: AgeBaseParams(
                traffic=25, call=300, growth=0.02, decline=0.003, package=128
            ),
            AgeGroup.OVER_60: AgeBaseParams(
                traffic=15, call=200, growth=0.01, decline=0.001, package=88
            )
        }
        
        # 分布配置
        distribution_config = DistributionConfig(
            age_probabilities={
                AgeGroup.UNDER_18: 0.1,
                AgeGroup.AGE_18_25: 0.15,
                AgeGroup.AGE_26_30: 0.2,
                AgeGroup.AGE_31_35: 0.15,
                AgeGroup.AGE_35_45: 0.2,
                AgeGroup.AGE_46_60: 0.15,
                AgeGroup.OVER_60: 0.05
            },
            gender_probabilities={
                GenderType.MALE: 0.51,
                GenderType.FEMALE: 0.49
            },
            trend_probabilities={
                TrendType.STABLE: 0.2,
                TrendType.GROWTH: 0.2,
                TrendType.DECLINE: 0.4,
                TrendType.FLUCTUATION: 0.2
            },
            district_probabilities=cls._create_district_probabilities(beijing_districts),
            age_base_params=age_base_params,
            beijing_districts=beijing_districts
        )
        
        return SimulatorConfig(
            num_users=1000,
            num_days=30,
            output_file="data/enhanced_user_data.csv",
            random_seed=42,
            start_date=datetime(2025, 7, 30),
            distribution_config=distribution_config
        )
    
    @staticmethod
    def _load_beijing_districts() -> Dict[str, List[str]]:
        """从JSON文件加载北京市各区县街道数据"""
        json_path = os.path.join(os.path.dirname(__file__), '..', 'data', 'beijing_blocks.json')
        try:
            with open(json_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            districts_data = {k: v for k, v in data.items() if k != 'metadata'}
            return districts_data
        except FileNotFoundError:
            print(f"警告: 未找到文件 {json_path}")
            print("使用默认的beijing_districts数据")
            return {
                "东城区": ["东华门街道", "景山街道", "交道口街道", "安定门街道"],
                "西城区": ["西长安街街道", "新街口街道", "月坛街道", "展览路街道"],
                "朝阳区": ["建外街道", "朝外街道", "呼家楼街道", "三里屯街道"]
            }
    
    @staticmethod
    def _create_district_probabilities(beijing_districts: Dict[str, List[str]]) -> Dict[str, float]:
        """创建区县分布概率"""
        districts = list(beijing_districts.keys())
        num_districts = len(districts)
        
        if num_districts == 0:
            return {}
        
        # 创建基础概率数组
        if num_districts >= 16:
            probs = np.array([0.08, 0.08, 0.15, 0.1, 0.05, 0.15, 0.05, 0.05, 0.08, 0.05, 0.03, 0.08, 0.03, 0.02, 0.02, 0.02])
            if num_districts > 16:
                extra_prob = 0.01
                probs = np.concatenate([probs, [extra_prob] * (num_districts - 16)])
        else:
            probs = np.ones(num_districts) / num_districts
        
        # 归一化
        probs = probs / probs.sum()
        
        return dict(zip(districts, probs))
    
    def generate_users(self) -> List[EnhancedUser]:
        """生成所有用户"""
        print(f"开始生成 {self.config.num_users} 个用户...")
        
        users = []
        for i in range(self.config.num_users):
            user_params = self._generate_user_params(i)
            user = EnhancedUser(user_params)
            users.append(user)
            
            if (i + 1) % 100 == 0:
                print(f"已生成 {i + 1}/{self.config.num_users} 用户参数")
        
        self.users = users
        return users
    
    def _generate_user_params(self, user_index: int) -> UserGenerationParams:
        """生成单个用户的参数"""
        dist_config = self.config.distribution_config
        
        # 生成基本信息
        user_id = f"U{100000 + user_index}"
        person_id = f"P{100000 + user_index}"
        
        # 根据分布概率选择属性
        gender = self._sample_from_distribution(dist_config.gender_probabilities)
        age_group = self._sample_from_distribution(dist_config.age_probabilities)
        trend_type = self._sample_from_distribution(dist_config.trend_probabilities)
        district = self._sample_from_distribution(dist_config.district_probabilities)
        
        # 随机选择街道
        street = random.choice(dist_config.beijing_districts[district])
        
        # 生成个人差异系数
        individual_factor = np.random.uniform(0.8, 1.2)
        
        # 创建用户基本信息
        basic_info = UserBasicInfo(
            USER_ID=user_id,
            PERSON_ID=person_id,
            ATTR_GENDER=gender,
            age_group=age_group,
            ATTR_CITY=district,  # ATTR_ITY存储区信息
            district=district,   # district字段保持不变，用于内部逻辑
            ATTR_COUNTY=street,
            trend_type=trend_type
        )
        
        # 获取年龄段基础参数
        age_base_params = dist_config.age_base_params[age_group]

        
        # 为每个用户生成网格和基站信息
        grid_id = f"G{user_index+1:03d}"
        base_id = f"B{user_index+1:03d}"
        
        grid_info = GridInfo(
            GRID_ID=grid_id,
            GRID_LAT=self.grid_mapping.get(grid_id, {}).get("LAT", 40.0),
            GRID_LONG=self.grid_mapping.get(grid_id, {}).get("LONG", 116.0)
        )
        
        base_info = GridInfo(
            GRID_ID=base_id,
            GRID_LAT=self.base_mapping.get(base_id, {}).get("LAT", 40.0),
            GRID_LONG=self.base_mapping.get(base_id, {}).get("LONG", 116.0)
        )
        
        return UserGenerationParams(
            basic_info=basic_info,
            age_base_params=age_base_params,
            num_months=self.config.num_months,
            start_date=self.config.start_date,
            individual_factor=individual_factor,
            grid_mapping=grid_info,
            base_mapping=base_info
        )
    
    def _sample_from_distribution(self, distribution: Dict) -> any:
        """从分布中采样"""
        items = list(distribution.keys())
        probabilities = list(distribution.values())
        selected = random.choices(items, weights=probabilities, k=1)[0]
        return selected
    
    def generate_all_data(self) -> pd.DataFrame:
        """生成所有用户的数据并合并为DataFrame"""
        print("开始生成用户记录数据...")
        
        # 使用新的生成器逻辑
        from .enhanced_models import UserInfoWithCalulateGenerator, DayUserGenerator
        
        # 生成用户信息
        user_info_generator = UserInfoWithCalulateGenerator(nums=self.config.num_users)
        user_infos = list(user_info_generator.generator())
        
        # 生成日期列表
        end_date = self.config.start_date
        if self.config.num_days is not None:
            # 使用天数配置
            days = [
                (end_date - timedelta(days=i)) 
                for i in range(self.config.num_days-1, -1, -1)
            ]
        else:
            # 使用月数配置（兼容旧版本）
            days = [
                (end_date - timedelta(days=i)) 
                for i in range(self.config.num_months-1, -1, -1)
            ]
        
        all_records = []
        for i, user_info in enumerate(user_infos):
            # 为每个用户生成每日记录
            day_generator = DayUserGenerator(days, user_info)
            user_records = list(day_generator.generator())
            all_records.extend(user_records)
            
            if (i + 1) % 100 == 0:
                print(f"已生成 {i + 1}/{len(user_infos)} 用户的记录数据")
        
        # 转换为DataFrame
        data_dicts = []
        for record in all_records:
            data_dict = record.dict()
            # 处理枚举类型
            for key, value in data_dict.items():
                if hasattr(value, 'value'):
                    data_dict[key] = value.value
            data_dicts.append(data_dict)
        
        final_df = pd.DataFrame(data_dicts)
        print(f"数据生成完成！总记录数: {len(final_df)}")
        return final_df
    
    def save_to_csv(self, df: pd.DataFrame = None) -> str:
        """保存数据到CSV文件"""
        if df is None:
            df = self.generate_all_data()
        
        output_file = self.config.output_file
        
        # 确保输出目录存在
        output_dir = os.path.dirname(output_file)
        if output_dir and not os.path.exists(output_dir):
            os.makedirs(output_dir, exist_ok=True)
            print(f"创建输出目录: {output_dir}")
        
        df.to_csv(output_file, index=False, encoding="utf-8-sig")
        print(f"数据已保存到CSV: {output_file}")
        return output_file
    
    def save_to_sqlite(self, df: pd.DataFrame = None, db_config: DatabaseConfig = None) -> str:
        """保存数据到SQLite数据库"""
        if df is None:
            df = self.generate_all_data()
        
        if db_config is None:
            db_file = "data/user_data.db"
            table_name = "user_records"
        else:
            db_file = db_config.database
            table_name = db_config.table_name
        
        # 确保数据库目录存在
        db_dir = os.path.dirname(db_file)
        if db_dir and not os.path.exists(db_dir):
            os.makedirs(db_dir, exist_ok=True)
        
        # 连接SQLite数据库
        conn = sqlite3.connect(db_file)
        
        try:
            # 保存数据
            df.to_sql(table_name, conn, if_exists='replace', index=False)
            print(f"数据已保存到SQLite: {db_file}, 表: {table_name}")
            
            # 获取记录数
            cursor = conn.cursor()
            cursor.execute(f"SELECT COUNT(*) FROM {table_name}")
            count = cursor.fetchone()[0]
            print(f"插入记录数: {count}")
            
        finally:
            conn.close()
        
        return db_file
    
    def save_to_mysql(self, df: pd.DataFrame = None, db_config: DatabaseConfig = None):
        """保存数据到MySQL数据库"""
        if not MYSQL_AVAILABLE:
            raise ImportError("pymysql not available. Please install it: pip install pymysql")
        
        if df is None:
            df = self.generate_all_data()
        
        if db_config is None:
            raise ValueError("MySQL数据库配置不能为空")
        
        # 连接MySQL数据库
        connection = pymysql.connect(
            host=db_config.host,
            port=db_config.port or 3306,
            user=db_config.username,
            password=db_config.password,
            database=db_config.database,
            charset='utf8mb4'
        )
        
        try:
            cursor = connection.cursor()
            
            # 检查并创建表
            self._create_mysql_table(cursor, db_config.table_name)
            
            # 插入数据
            self._insert_mysql_data(cursor, df, db_config.table_name)
            
            connection.commit()
            print(f"数据已保存到MySQL: {db_config.host}:{db_config.port}/{db_config.database}")
            print(f"表: {db_config.table_name}, 插入记录数: {len(df)}")
            
        finally:
            connection.close()
    
    def _create_mysql_table(self, cursor, table_name: str):
        """创建MySQL表"""
        # 检查表是否存在
        check_table_query = f"""
        SELECT COUNT(*)
        FROM information_schema.tables 
        WHERE table_schema = DATABASE() AND table_name = %s
        """
        cursor.execute(check_table_query, (table_name,))
        result = cursor.fetchone()[0]
        
        if result == 0:
            # 创建表的SQL（基于UserRecordV2）
            create_table_query = f"""
            CREATE TABLE {table_name} (
                -- 基础标识字段
                STATIS_TIME VARCHAR(8) COMMENT '统计时间',
                PROV VARCHAR(10) COMMENT '省份名称',
                PERSON_ID VARCHAR(10) COMMENT '自然人编码',
                PHONE_NUM VARCHAR(11) COMMENT '手机号码',
                USER_ID VARCHAR(10) COMMENT '用户编码',
                CUST_ID VARCHAR(10) COMMENT '客户编码',
                
                -- 用户属性字段
                ATTR_AGE INT COMMENT '属性-年龄',
                ATTR_GENDER VARCHAR(2) COMMENT '属性-性别',
                ATTR_PROVINCE VARCHAR(20) DEFAULT '北京市' COMMENT '属性-省',
                ATTR_CITY VARCHAR(20) COMMENT '属性-市',
                ATTR_COUNTY VARCHAR(50) COMMENT '属性-区县街道',
                
                -- 网格和基站信息
                ATTR_GRID_ID VARCHAR(10) COMMENT '属性-所属网格编码',
                ATTR_GRID_LAT FLOAT COMMENT '属性-所属网格经度',
                ATTR_GRID_LONG FLOAT COMMENT '属性-所属网格纬度',
                ATTR_DAY_BASE_ID VARCHAR(10) COMMENT '属性-日间常驻基站编码',
                ATTR_DAY_BASE_LAT FLOAT COMMENT '属性-日间常驻基站经度',
                ATTR_DAY_BASE_LONG FLOAT COMMENT '属性-日间常驻基站纬度',
                ATTR_NIGHT_BASE_ID VARCHAR(10) COMMENT '属性-夜间常驻基站编码',
                ATTR_NIGHT_BASE_LAT FLOAT COMMENT '属性-夜间常驻基站经度',
                ATTR_NIGHT_BASE_LONG FLOAT COMMENT '属性-夜间常驻基站纬度',
                
                -- 业务属性
                ATTR_FEE_FLUCTUATION VARCHAR(10) COMMENT '属性-资费波动标签',
                
                -- 标签字段
                LABEL_IS_GSM TINYINT COMMENT '标签-是否全球通用户',
                LABEL_IS_MID TINYINT COMMENT '标签-是否腰部用户',
                LABEL_IS_PREM TINYINT COMMENT '标签-是否中高端用户',
                LABEL_IS_DIFF TINYINT COMMENT '标签-是否异网用户',
                LABEL_IS_ELDER TINYINT COMMENT '标签-是否银发用户',
                LABEL_IS_INT TINYINT COMMENT '标签-是否国际用户',
                
                -- 社交圈信息
                ATTR_FAMILY_CIRCLE_ID VARCHAR(10) COMMENT '属性-家庭圈ID',
                ATTR_SOCIAL_CIRCLE_ID VARCHAR(10) COMMENT '属性-交往圈ID',
                ATTR_COMMUNITY_CIRCLE_ID VARCHAR(10) COMMENT '属性-社区圈ID',
                ATTR_CAMPUS_CIRCLE_ID VARCHAR(10) COMMENT '属性-校园圈ID',
                ATTR_COMMUTE_CIRCLE_ID VARCHAR(10) COMMENT '属性-通勤圈ID',
                ATTR_BUSINESS_CIRCLE_ID VARCHAR(10) COMMENT '属性-商业圈ID',
                ATTR_ONLINE_CIRCLE_ID VARCHAR(10) COMMENT '属性-网络圈ID',
                
                -- 指标字段
                METRIC_ARPU FLOAT COMMENT '指标-ARPU（元）',
                METRIC_FLUX_USED FLOAT COMMENT '指标-使用流量（MB）',
                
                -- 家宽指标
                METRIC_BROADBAND_COUNT INT DEFAULT 0 COMMENT '属性_家宽条数',
                METRIC_BROADBAND_TRAFFIC FLOAT DEFAULT 0.0 COMMENT '属性_家宽平均流量(KB)',
                METRIC_BROADBAND_ONILNETIME FLOAT DEFAULT 0.0 COMMENT '属性_家宽在线时长(秒)',
                
                -- 消费指标
                METRIC_MONTHLY_CONSUMPTION FLOAT COMMENT '指标-月消费金额(元)',
                METRIC_BASIC_PACKAGE_FEE FLOAT COMMENT '指标-基本套餐费(元)',
                METRIC_LOCAL_CALL_FEE FLOAT COMMENT '指标-本地通话费（元）',
                METRIC_LONG_DISTANCE_CALL_FEE FLOAT COMMENT '指标-异地通话费（元）',
                METRIC_DOMESTIC_DATA_FEE FLOAT COMMENT '指标-国内流量包业务费（元）',
                METRIC_INTERNATIONAL_DATA_FEE FLOAT COMMENT '指标-国际流量包业务费（元）',
                METRIC_ADDITIONAL_SERVICE_FEE FLOAT COMMENT '指标-附加业务费（元）',
                METRIC_VALUE_ADDED_SERVICE_FEE FLOAT COMMENT '指标-增值业务费（元）',
                METRIC_DATA_USAGE_FEE FLOAT COMMENT '指标-流量使用费（元）',
                METRIC_CALL_DURATION_MINUTES INT COMMENT '指标-通话时长（分钟）',
                
                -- V2版本扩展字段
                -- 套餐相关属性
                ATTR_NUMBER_ACTIVATION_DATE VARCHAR(8) COMMENT '属性-激活日期',
                ATTR_NUMBER_PLAN_NAME VARCHAR(50) COMMENT '属性-套餐名称',
                ATTR_NUMBER_TERMINATION_DATE VARCHAR(8) COMMENT '属性-离网日期',
                ATTR_NUMBER_REGISTERED_LOCATION VARCHAR(100) COMMENT '属性-入网地址',
                
                -- 套餐指标
                METRIC_NUMBER_DATA_ALLOWANCE_MB INT COMMENT '指标-套餐总流量（MB）',
                METRIC_NUMBER_FREE_CALL_MINUTES INT COMMENT '指标-套餐总通话时长（分钟）',
                
                -- 携号转网标签
                LABEL_NUMBER_PORTABILITY_STATUS VARCHAR(2) COMMENT '标签-是否携号转网',
                
                -- 基站使用统计
                METRIC_LAC_DAY_TOTAL_TIME_MINUTES INT COMMENT '指标-白天常驻基站时间（分钟）',
                METRIC_LAC_NIGHT_TOTAL_TIME_MINUTES INT COMMENT '指标-夜间常驻基站时间（分钟）',
                METRIC_LAC_DAY_LAC_COUNT INT COMMENT '指标-白天常驻基站连接次数',
                METRIC_LAC_NIGHT_LAC_COUNT INT COMMENT '指标-夜间常驻基站连接次数',
                
                -- 家宽信息
                ATTR_BRD_ADDR VARCHAR(200) COMMENT '属性-家宽地址',
                
                -- 产品使用信息
                ATTR_PRODUCT_NAME VARCHAR(100) COMMENT '属性-使用产品名称',
                
                -- 集团信息
                ATTR_GROUP_NAME VARCHAR(100) COMMENT '属性-所属集团名称',
                
                -- 信用分
                ATTR_CREDIT_SCORE INT COMMENT '属性-信用分总得分'
            ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='用户资费数据表V2'
            """
            cursor.execute(create_table_query)
            print(f"表 {table_name} 创建成功")
        else:
            print(f"表 {table_name} 已存在")
    
    def _insert_mysql_data(self, cursor, df: pd.DataFrame, table_name: str):
        """插入数据到MySQL表"""
        # 处理NaN值 - 将NaN替换为None，并处理特殊的float类型NaN
        df_clean = df.copy()
        
        # 替换所有NaN为None
        df_clean = df_clean.where(pd.notna(df_clean), None)
        
        # 获取列名
        columns = df_clean.columns.tolist()
        placeholders = ', '.join(['%s'] * len(columns))
        columns_str = ', '.join(columns)
        
        insert_query = f"""
        INSERT INTO {table_name} ({columns_str}) VALUES ({placeholders})
        """
        
        # 转换数据并处理可能的numpy类型
        data_rows = []
        for _, row in df_clean.iterrows():
            clean_row = []
            for value in row:
                # 处理numpy NaN和None
                if pd.isna(value) or value is None:
                    clean_row.append(None)
                # 处理numpy数据类型
                elif hasattr(value, 'item'):
                    clean_row.append(value.item())
                else:
                    clean_row.append(value)
            data_rows.append(tuple(clean_row))
        
        # 批量插入数据
        cursor.executemany(insert_query, data_rows)
    
    def get_statistics(self, df: pd.DataFrame = None) -> Dict:
        """获取数据统计信息"""
        if df is None:
            df = self.generate_all_data()
        
        # 计算统计信息
        unique_users = df.drop_duplicates('USER_ID')
        
        # 只统计非异网用户的波动标签
        non_diff_users = unique_users[unique_users['LABEL_IS_DIFF'] == 0]
        fluctuation_counts = non_diff_users['ATTR_FEE_FLUCTUATION'].value_counts()
        
        stats = {
            "total_records": len(df),
            "total_users": len(unique_users),
            "non_diff_users": len(non_diff_users),
            "diff_users": len(unique_users[unique_users['LABEL_IS_DIFF'] == 1]),
            "months_per_user": len(df) // len(unique_users),
            "fluctuation_label_distribution": fluctuation_counts.to_dict(),
            "age_distribution": unique_users['ATTR_AGE'].describe().to_dict(),
            "gender_distribution": unique_users['ATTR_GENDER'].value_counts().to_dict()
        }
        
        return stats
    
    def print_statistics(self, df: pd.DataFrame = None):
        """打印统计信息"""
        stats = self.get_statistics(df)
        
        print("=" * 60)
        print("增强版数据统计信息")
        print("=" * 60)
        print(f"总记录数: {stats['total_records']}")
        print(f"用户总数: {stats['total_users']}")
        print(f"非异网用户: {stats['non_diff_users']}")
        print(f"异网用户: {stats['diff_users']}")
        print(f"每用户月数: {stats['months_per_user']}")
        print()
        print("用户资费波动标签分布 (仅非异网用户):")
        for label, count in stats['fluctuation_label_distribution'].items():
            percentage = count / stats['non_diff_users'] * 100
            print(f"  {label}: {count} ({percentage:.1f}%)")
        print()
        print("性别分布:")
        for gender, count in stats['gender_distribution'].items():
            percentage = count / stats['total_users'] * 100
            print(f"  {gender}: {count} ({percentage:.1f}%)")
        print("=" * 60)
    
    def run_simulation(self, save_csv: bool = True, save_sqlite: bool = False, 
                      save_mysql: bool = False, db_config: DatabaseConfig = None) -> pd.DataFrame:
        """运行完整的模拟流程"""
        print("开始增强版移动资费数据模拟...")
        print(f"配置: {self.config.num_users}用户 x {self.config.num_months}个月")
        
        # 生成数据
        df = self.generate_all_data()
        
        # 保存数据
        if save_csv:
            self.save_to_csv(df)
        
        if save_sqlite:
            self.save_to_sqlite(df, db_config)
        
        if save_mysql:
            if db_config is None:
                print("警告: 未提供MySQL配置，跳过MySQL保存")
            else:
                self.save_to_mysql(df, db_config)
        
        # 打印统计信息
        self.print_statistics(df)
        
        return df
