#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
快速特征处理脚本 - 自动增量处理模式

直接生成三个缓存文件：
1. fund_enhanced_features.csv
2. llm_features_by_fund.pkl  
3. market_features_cache.pkl
"""

import pandas as pd
import os
import time
import pickle
from feature_cache_manager import FeatureCacheManager
from func import add_market_features
from baseline import add_basic_features, generate_llm_features

def quick_process():
    """快速处理所有特征"""
    print("🚀 快速特征处理 - 增量模式")
    print("=" * 50)
    
    start_time = time.time()
    
    # 1. 初始化
    cache_manager = FeatureCacheManager()
    
    # 2. 检测新增数据
    print("🔍 检测新增数据...")
    new_data_df, new_fund_codes = cache_manager.detect_new_data()
    
    print(f"📈 检测结果:")
    print(f"  新增数据: {len(new_data_df)} 行")
    print(f"  新增基金: {len(new_fund_codes)} 个")
    
    if len(new_data_df) == 0:
        print("✅ 没有新增数据，所有缓存都是最新的")
        return
    
    # 3. 处理市场特征
    print("\n🔧 处理市场特征...")
    enhanced_data_df = add_market_features(
        new_data_df, 
        cache_path="market_features_cache.pkl", 
        force_refresh=False
    )
    
    # 4. 添加基础特征
    print("🔧 添加基础特征...")
    enhanced_data_df = add_basic_features(enhanced_data_df)
    
    print(f"📊 增强后数据形状: {enhanced_data_df.shape}")
    
    # 5. 更新增强特征缓存
    print("💾 更新增强特征缓存...")
    cache_manager.update_enhanced_features(enhanced_data_df)
    
    # 6. 处理LLM特征（如果有新基金）
    if len(new_fund_codes) > 0:
        print(f"🤖 为 {len(new_fund_codes)} 个新基金生成LLM特征...")
        
        # 只为新基金生成LLM特征
        new_funds_data = enhanced_data_df[enhanced_data_df['fund_code'].isin(new_fund_codes)].copy()
        
        try:
            generate_llm_features(new_funds_data, cache_path="llm_features.pkl")
            
            # 提取并更新LLM特征
            if os.path.exists("llm_features.pkl"):
                with open("llm_features.pkl", 'rb') as f:
                    llm_data = pickle.load(f)
                    llm_features_dict = llm_data.get('features', {})
                
                cache_manager.update_llm_features(new_fund_codes, llm_features_dict)
                print(f"✅ 成功生成 {len(llm_features_dict)} 个基金的LLM特征")
        except Exception as e:
            print(f"⚠️ LLM特征生成失败: {e}")
    
    # 7. 验证结果
    print("\n📊 验证生成的缓存文件:")
    
    files_to_check = [
        ("fund_enhanced_features.csv", "增强特征缓存"),
        ("llm_features_by_fund.pkl", "LLM特征缓存"),
        ("market_features_cache.pkl", "市场特征缓存")
    ]
    
    for filename, description in files_to_check:
        if os.path.exists(filename):
            file_size = os.path.getsize(filename) / 1024  # KB
            print(f"✅ {description}: {filename} ({file_size:.1f} KB)")
        else:
            print(f"❌ {description}: {filename} 未生成")
    
    # 8. 显示统计
    total_time = time.time() - start_time
    print(f"\n⚡ 处理完成！")
    print(f"  总耗时: {total_time:.2f} 秒")
    print(f"  处理数据: {len(new_data_df)} 行")
    print(f"  涉及基金: {new_data_df['fund_code'].nunique()} 个")

def main():
    """主函数"""
    print("🎯 快速特征处理系统")
    
    # 检查原始数据文件
    if not os.path.exists('fund_apply_redeem_series.csv'):
        print("❌ 找不到原始数据文件: fund_apply_redeem_series.csv")
        return
    
    try:
        quick_process()
        
        print("\n📚 生成的文件说明:")
        print("  fund_enhanced_features.csv - 包含市场特征和时间特征的完整数据")
        print("  llm_features_by_fund.pkl - 按基金代码存储的LLM特征")
        print("  market_features_cache.pkl - 市场数据缓存，避免重复API请求")
        
        print("\n💡 后续使用:")
        print("  from feature_cache_manager import FeatureCacheManager")
        print("  cache_manager = FeatureCacheManager()")
        print("  df = cache_manager.merge_features()  # 加载完整特征数据")
        
    except Exception as e:
        print(f"❌ 处理过程中发生错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()
