#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
批量处理用户数据脚本
用于处理目录中的多个用户数据文件
"""
import os
import pandas as pd
import glob
import sys

# 添加项目根目录到Python路径
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))

def batch_process_user_data(input_dir, output_dir):
    """
    批量处理目录中的所有用户数据文件
    
    参数:
    input_dir: 输入目录路径
    output_dir: 输出目录路径
    """
    import glob
    
    # 确保输出目录存在
    os.makedirs(output_dir, exist_ok=True)
    
    # 查找所有CSV文件
    csv_files = glob.glob(os.path.join(input_dir, "*.csv"))
    
    if not csv_files:
        print(f"在目录 {input_dir} 中未找到CSV文件")
        return
    
    all_reports = []
    
    for csv_file in csv_files:
        print(f"\n处理文件: {os.path.basename(csv_file)}")
        
        # 生成输出文件名
        base_name = os.path.basename(csv_file)
        output_file = os.path.join(output_dir, f"processed_{base_name}")
        
        # 执行预处理
        try:
            # 尝试导入comprehensive_data_preprocessing函数
            try:
                from src.data_processor import comprehensive_data_preprocessing
            except ImportError:
                # 如果没有comprehensive_data_preprocessing函数，使用preprocess_user_data
                from src.data_processor import preprocess_user_data
                df_processed = preprocess_user_data(csv_file)
                # 手动保存结果
                df_processed.to_csv(output_file, sep=';', index=False, encoding='utf-8')
            else:
                df_processed = comprehensive_data_preprocessing(
                    input_file=csv_file,
                    output_file=output_file
                )
            
            # 验证数据
            from src.data_validator import validate_data_quality
            
            # 重命名列以匹配验证函数的预期
            validation_df = df_processed.copy()
            if 'phone_no' in validation_df.columns and 'distinct_usermsg.phone_no' not in validation_df.columns:
                validation_df.rename(columns={
                    'phone_no': 'distinct_usermsg.phone_no',
                    'sm_name': 'distinct_usermsg.sm_name',
                    'run_time': 'distinct_usermsg.run_time'
                }, inplace=True)
            
            validation_report = validate_data_quality(validation_df)
            
            # 读取原始文件行数
            original_rows = len(pd.read_csv(csv_file, sep=';', encoding='utf-8'))
            
            all_reports.append({
                'file': base_name,
                'original_rows': original_rows,
                'processed_rows': len(df_processed),
                'validation': validation_report
            })
            
        except Exception as e:
            print(f"处理文件 {csv_file} 时出错: {e}")
            import traceback
            traceback.print_exc()
    
    # 生成汇总报告
    print_summary_report(all_reports)
    
    # 保存汇总报告到文件
    report_file = os.path.join(output_dir, "summary_report.txt")
    save_summary_report(all_reports, report_file)
    print(f"\n汇总报告已保存到: {report_file}")

def print_summary_report(reports):
    """
    打印汇总报告
    
    参数:
    reports: 处理报告列表
    """
    print("\n" + "="*60)
    print("批量处理汇总报告")
    print("="*60)
    
    if not reports:
        print("\n没有成功处理的文件")
        return
    
    total_original = 0
    total_processed = 0
    
    for report in reports:
        print(f"\n文件: {report['file']}")
        print(f"  原始行数: {report['original_rows']}")
        print(f"  处理后行数: {report['processed_rows']}")
        print(f"  删除行数: {report['original_rows'] - report['processed_rows']}")
        print(f"  删除比例: {round((1 - report['processed_rows']/report['original_rows'])*100, 2)}%")
        
        # 验证信息
        if 'validation' in report and report['validation']:
            validation = report['validation']
            if 'phone_no_unique' in validation:
                print(f"  用户编号唯一性: {'✓' if validation['phone_no_unique'] else '✗'}")
            if 'has_simulate_tv' in validation and validation['has_simulate_tv']:
                print(f"  模拟有线电视记录: {validation['simulate_tv_count']} 条")
        
        total_original += report['original_rows']
        total_processed += report['processed_rows']
    
    print(f"\n总计:")
    print(f"  总原始行数: {total_original}")
    print(f"  总处理后行数: {total_processed}")
    print(f"  总删除行数: {total_original - total_processed}")
    print(f"  平均删除比例: {round((1 - total_processed/total_original)*100, 2)}%")

def save_summary_report(reports, output_file):
    """
    保存汇总报告到文件
    
    参数:
    reports: 处理报告列表
    output_file: 输出文件路径
    """
    with open(output_file, 'w', encoding='utf-8') as f:
        f.write("="*60 + "\n")
        f.write("批量处理汇总报告\n")
        f.write("="*60 + "\n\n")
        
        if not reports:
            f.write("没有成功处理的文件\n")
            return
        
        total_original = 0
        total_processed = 0
        
        for report in reports:
            f.write(f"文件: {report['file']}\n")
            f.write(f"  原始行数: {report['original_rows']}\n")
            f.write(f"  处理后行数: {report['processed_rows']}\n")
            f.write(f"  删除行数: {report['original_rows'] - report['processed_rows']}\n")
            f.write(f"  删除比例: {round((1 - report['processed_rows']/report['original_rows'])*100, 2)}%\n\n")
            
            total_original += report['original_rows']
            total_processed += report['processed_rows']
        
        f.write(f"总计:\n")
        f.write(f"  总原始行数: {total_original}\n")
        f.write(f"  总处理后行数: {total_processed}\n")
        f.write(f"  总删除行数: {total_original - total_processed}\n")
        f.write(f"  平均删除比例: {round((1 - total_processed/total_original)*100, 2)}%\n")

def main():
    """
    主函数
    """
    import argparse
    
    parser = argparse.ArgumentParser(description='批量处理用户数据文件')
    parser.add_argument('--input-dir', type=str, default='.', help='输入目录路径')
    parser.add_argument('--output-dir', type=str, default='output', help='输出目录路径')
    
    args = parser.parse_args()
    
    print(f"输入目录: {args.input_dir}")
    print(f"输出目录: {args.output_dir}")
    
    batch_process_user_data(args.input_dir, args.output_dir)

if __name__ == "__main__":
    main()