#!/usr/bin/env python3
"""
Analysis.csv数据优化精简工具
用于优化和精简过于冗余的分析数据文件
"""

import pandas as pd
import numpy as np
import os
import json
import logging
from datetime import datetime
from utils import optimize_analysis_data, export_optimized_analysis, create_analysis_summary

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

def find_analysis_files(directory='.'):
    """
    查找目录中的analysis.csv文件
    :param directory: 搜索目录
    :return: 文件列表
    """
    analysis_files = []
    for filename in os.listdir(directory):
        if filename.endswith('_analysis.csv'):
            filepath = os.path.join(directory, filename)
            analysis_files.append(filepath)
    
    return sorted(analysis_files)

def analyze_file_structure(filepath):
    """
    分析文件结构
    :param filepath: 文件路径
    :return: 结构信息
    """
    try:
        df = pd.read_csv(filepath)
        
        file_size = os.path.getsize(filepath) / 1024  # KB
        
        structure_info = {
            'filename': os.path.basename(filepath),
            'rows': len(df),
            'columns': len(df.columns),
            'data_points': len(df) * len(df.columns),
            'file_size_kb': round(file_size, 2),
            'memory_usage_mb': round(df.memory_usage(deep=True).sum() / 1024 / 1024, 2)
        }
        
        return structure_info, df
        
    except Exception as e:
        logger.error(f"分析文件失败 {filepath}: {e}")
        return None, None

def optimize_single_file(filepath, optimization_level='balanced'):
    """
    优化单个分析文件
    :param filepath: 文件路径
    :param optimization_level: 优化级别
    :return: 优化结果
    """
    logger.info(f"开始优化文件: {filepath}")
    
    # 分析原始文件
    structure_info, df = analyze_file_structure(filepath)
    if df is None:
        return None
    
    # 从文件名提取信息
    filename = os.path.basename(filepath)
    parts = filename.replace('_analysis.csv', '').split('_')
    
    if len(parts) >= 2:
        symbol = parts[0]
        period = parts[1]
    else:
        symbol = 'UNKNOWN'
        period = '日线'
    
    logger.info(f"识别股票: {symbol}, 周期: {period}")
    logger.info(f"原始数据: {structure_info['rows']} 行 × {structure_info['columns']} 列")
    logger.info(f"文件大小: {structure_info['file_size_kb']} KB")
    
    # 执行优化
    try:
        output_dir = os.path.dirname(filepath) or '.'
        optimized_filepath, summary_filepath = export_optimized_analysis(
            df, symbol, period, optimization_level, output_dir
        )
        
        # 分析优化后的文件
        opt_structure_info, _ = analyze_file_structure(optimized_filepath)
        
        optimization_result = {
            'original': structure_info,
            'optimized': opt_structure_info,
            'compression_ratio': round((1 - opt_structure_info['data_points'] / structure_info['data_points']) * 100, 1),
            'size_reduction': round((1 - opt_structure_info['file_size_kb'] / structure_info['file_size_kb']) * 100, 1),
            'optimized_file': optimized_filepath,
            'summary_file': summary_filepath
        }
        
        logger.info(f"优化完成:")
        logger.info(f"  数据压缩: {optimization_result['compression_ratio']}%")
        logger.info(f"  文件减小: {optimization_result['size_reduction']}%")
        
        return optimization_result
        
    except Exception as e:
        logger.error(f"优化失败: {e}")
        return None

def batch_optimize_files(directory='.', optimization_level='balanced'):
    """
    批量优化分析文件
    :param directory: 目录路径
    :param optimization_level: 优化级别
    :return: 优化结果列表
    """
    logger.info(f"开始批量优化，目录: {directory}, 级别: {optimization_level}")
    
    # 查找分析文件
    analysis_files = find_analysis_files(directory)
    
    if not analysis_files:
        logger.warning("未找到analysis.csv文件")
        return []
    
    logger.info(f"找到 {len(analysis_files)} 个分析文件")
    
    results = []
    for filepath in analysis_files:
        result = optimize_single_file(filepath, optimization_level)
        if result:
            results.append(result)
    
    return results

def generate_optimization_report(results, output_file='optimization_report.json'):
    """
    生成优化报告
    :param results: 优化结果列表
    :param output_file: 输出文件
    """
    if not results:
        logger.warning("没有优化结果，无法生成报告")
        return
    
    # 计算总体统计
    total_original_size = sum(r['original']['file_size_kb'] for r in results)
    total_optimized_size = sum(r['optimized']['file_size_kb'] for r in results)
    total_original_points = sum(r['original']['data_points'] for r in results)
    total_optimized_points = sum(r['optimized']['data_points'] for r in results)
    
    report = {
        'optimization_summary': {
            'total_files': len(results),
            'total_size_reduction_kb': round(total_original_size - total_optimized_size, 2),
            'total_size_reduction_pct': round((1 - total_optimized_size / total_original_size) * 100, 1),
            'total_data_compression_pct': round((1 - total_optimized_points / total_original_points) * 100, 1),
            'average_compression_ratio': round(sum(r['compression_ratio'] for r in results) / len(results), 1)
        },
        'file_details': results,
        'generated_at': datetime.now().isoformat()
    }
    
    # 保存报告
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(report, f, ensure_ascii=False, indent=2, default=str)
    
    logger.info(f"优化报告已保存: {output_file}")
    
    # 打印摘要
    print("\n" + "="*60)
    print("📊 数据优化摘要报告")
    print("="*60)
    print(f"处理文件数量: {report['optimization_summary']['total_files']}")
    print(f"总体数据压缩: {report['optimization_summary']['total_data_compression_pct']}%")
    print(f"总体文件减小: {report['optimization_summary']['total_size_reduction_pct']}%")
    print(f"节省空间: {report['optimization_summary']['total_size_reduction_kb']} KB")
    print(f"平均压缩比: {report['optimization_summary']['average_compression_ratio']}%")
    
    print(f"\n📁 详细文件信息:")
    for result in results:
        print(f"  {result['original']['filename']}:")
        print(f"    原始: {result['original']['columns']}列 × {result['original']['rows']}行 = {result['original']['file_size_kb']}KB")
        print(f"    优化: {result['optimized']['columns']}列 × {result['optimized']['rows']}行 = {result['optimized']['file_size_kb']}KB")
        print(f"    压缩: {result['compression_ratio']}% | 减小: {result['size_reduction']}%")
    
    print("="*60)

def interactive_optimization():
    """
    交互式优化界面
    """
    print("🚀 Analysis.csv数据优化工具")
    print("="*50)
    
    # 选择优化级别
    print("\n请选择优化级别:")
    print("1. minimal    - 最小化 (仅保留核心数据)")
    print("2. balanced   - 平衡化 (保留重要指标)")
    print("3. comprehensive - 全面化 (保留大部分指标)")
    
    while True:
        choice = input("\n请输入选择 (1-3): ").strip()
        if choice == '1':
            optimization_level = 'minimal'
            break
        elif choice == '2':
            optimization_level = 'balanced'
            break
        elif choice == '3':
            optimization_level = 'comprehensive'
            break
        else:
            print("无效选择，请重新输入")
    
    print(f"\n选择的优化级别: {optimization_level}")
    
    # 选择处理模式
    print("\n请选择处理模式:")
    print("1. 批量处理当前目录所有analysis.csv文件")
    print("2. 处理指定文件")
    
    while True:
        mode = input("\n请输入选择 (1-2): ").strip()
        if mode in ['1', '2']:
            break
        else:
            print("无效选择，请重新输入")
    
    if mode == '1':
        # 批量处理
        results = batch_optimize_files('.', optimization_level)
        if results:
            generate_optimization_report(results)
        else:
            print("❌ 没有找到可处理的文件")
    
    elif mode == '2':
        # 单文件处理
        filepath = input("\n请输入文件路径: ").strip()
        if os.path.exists(filepath):
            result = optimize_single_file(filepath, optimization_level)
            if result:
                generate_optimization_report([result])
            else:
                print("❌ 文件处理失败")
        else:
            print("❌ 文件不存在")

def main():
    """主函数"""
    import sys
    
    if len(sys.argv) > 1:
        # 命令行模式
        if sys.argv[1] == 'batch':
            level = sys.argv[2] if len(sys.argv) > 2 else 'balanced'
            results = batch_optimize_files('.', level)
            if results:
                generate_optimization_report(results)
        elif sys.argv[1] == 'single':
            if len(sys.argv) < 3:
                print("用法: python optimize_analysis_data.py single <文件路径> [优化级别]")
                return
            filepath = sys.argv[2]
            level = sys.argv[3] if len(sys.argv) > 3 else 'balanced'
            result = optimize_single_file(filepath, level)
            if result:
                generate_optimization_report([result])
        else:
            print("用法:")
            print("  python optimize_analysis_data.py batch [优化级别]")
            print("  python optimize_analysis_data.py single <文件路径> [优化级别]")
            print("  python optimize_analysis_data.py  # 交互模式")
    else:
        # 交互模式
        interactive_optimization()

if __name__ == "__main__":
    main()
