#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
完整的特征处理主函数

功能：
1. 生成 fund_enhanced_features.csv（基础+市场特征）
2. 生成 llm_features_by_fund.pkl（LLM特征）
3. 生成 market_features_cache.pkl（市场特征缓存）
4. 智能增量处理，避免重复计算
"""

import pandas as pd
import numpy as np
import os
import time
import pickle
from datetime import datetime
from feature_cache_manager import FeatureCacheManager
from func import add_market_features
from baseline import add_basic_features, generate_llm_features

def process_all_features(force_rebuild=False):
    """
    处理所有特征并生成缓存文件
    
    Args:
        force_rebuild: 是否强制重建所有缓存
    """
    print("🚀 开始完整特征处理流程")
    print("=" * 80)
    
    start_time = time.time()
    
    # 1. 初始化缓存管理器
    print("\n1️⃣ 初始化特征缓存管理器...")
    cache_manager = FeatureCacheManager()
    
    # 显示初始状态
    print("\n📊 初始缓存状态:")
    initial_stats = cache_manager.get_statistics()
    for cache_type, info in initial_stats.items():
        print(f"  {cache_type}: {info}")
    
    # 2. 检测需要处理的数据
    print("\n2️⃣ 检测需要处理的数据...")
    if force_rebuild:
        print("🔄 强制重建模式，将处理所有数据")
        # 加载所有原始数据
        df = pd.read_csv('fund_apply_redeem_series.csv', dtype={'fund_code': str})
        df['transaction_date'] = pd.to_datetime(df['transaction_date'], format='%Y%m%d')
        new_data_df = df.copy()
        new_fund_codes = df['fund_code'].unique().tolist()
        
        # 清理现有缓存
        cache_manager.clear_cache('all')
    else:
        new_data_df, new_fund_codes = cache_manager.detect_new_data()
    
    print(f"📈 待处理数据统计:")
    print(f"  数据行数: {len(new_data_df)}")
    print(f"  基金数量: {len(new_fund_codes)}")
    print(f"  总基金数: {new_data_df['fund_code'].nunique()}")
    
    # 检查是否需要生成LLM特征
    llm_cache_exists = os.path.exists("llm_features_by_fund.pkl")
    need_llm_generation = not llm_cache_exists or force_rebuild
    
    if len(new_data_df) == 0 and not need_llm_generation:
        print("✅ 没有新增数据需要处理，所有缓存都是最新的")
        return cache_manager
    
    # 如果没有新数据但需要生成LLM特征，加载所有数据用于LLM处理
    if len(new_data_df) == 0 and need_llm_generation:
        print("⚠️ 检测到LLM特征缓存缺失，需要为所有基金生成LLM特征")
        df = pd.read_csv('fund_apply_redeem_series.csv', dtype={'fund_code': str})
        df['transaction_date'] = pd.to_datetime(df['transaction_date'].astype(str), format='%Y%m%d')
        new_data_df = df.copy()
        new_fund_codes = df['fund_code'].unique().tolist()
        print(f"📊 将为 {len(new_fund_codes)} 个基金生成LLM特征")
        
        # 由于只需要LLM特征，可以直接使用现有的增强特征缓存
        if os.path.exists("fund_enhanced_features.csv"):
            print("📖 使用现有增强特征缓存作为LLM特征生成的基础")
            enhanced_data_df = pd.read_csv("fund_enhanced_features.csv", dtype={'fund_code': str})
            enhanced_data_df['transaction_date'] = pd.to_datetime(enhanced_data_df['transaction_date'])
            skip_feature_processing = True
        else:
            skip_feature_processing = False
    else:
        skip_feature_processing = False
    
    if not skip_feature_processing:
        # 3. 处理市场特征
        print(f"\n3️⃣ 处理市场特征...")
        market_start_time = time.time()
        
        print("🔍 添加市场特征到新增数据...")
        enhanced_data_df = add_market_features(
            new_data_df, 
            cache_path="market_features_cache.pkl", 
            force_refresh=force_rebuild
        )
        
        market_time = time.time() - market_start_time
        print(f"⏱️ 市场特征处理耗时: {market_time:.2f} 秒")
        print(f"📊 市场特征后数据形状: {enhanced_data_df.shape}")
        
        # 4. 添加基础时间特征
        print(f"\n4️⃣ 添加基础时间特征...")
        basic_start_time = time.time()
        
        enhanced_data_df = add_basic_features(enhanced_data_df)
        
        basic_time = time.time() - basic_start_time
        print(f"⏱️ 基础特征处理耗时: {basic_time:.2f} 秒")
        print(f"📊 基础特征后数据形状: {enhanced_data_df.shape}")
        
        # 显示特征列统计
        feature_cols = [col for col in enhanced_data_df.columns 
                       if col not in ['fund_code', 'transaction_date', 'apply_amt', 'redeem_amt']]
        print(f"📋 生成的特征列数: {len(feature_cols)}")
        
        market_cols = [col for col in feature_cols if 'market' in col.lower() or 'volume' in col.lower()]
        time_cols = [col for col in feature_cols if any(x in col.lower() for x in ['day', 'week', 'month', 'quarter', 'year'])]
        print(f"  市场特征: {len(market_cols)} 个")
        print(f"  时间特征: {len(time_cols)} 个")
        print(f"  其他特征: {len(feature_cols) - len(market_cols) - len(time_cols)} 个")
        
        # 5. 更新增强特征缓存
        print(f"\n5️⃣ 更新增强特征缓存...")
        cache_start_time = time.time()
        
        cache_manager.update_enhanced_features(enhanced_data_df, force_rebuild=force_rebuild)
        
        cache_time = time.time() - cache_start_time
        print(f"⏱️ 缓存更新耗时: {cache_time:.2f} 秒")
    else:
        print(f"\n3️⃣-5️⃣ 跳过特征处理，使用现有增强特征缓存")
        market_time = 0
        basic_time = 0
        cache_time = 0
    
    # 6. 处理LLM特征
    print(f"\n6️⃣ 处理LLM特征...")
    
    if len(new_fund_codes) > 0:
        print(f"🤖 为 {len(new_fund_codes)} 个基金生成LLM特征...")
        llm_start_time = time.time()
        
        # 创建只包含新基金的数据集用于LLM特征生成
        new_funds_data = enhanced_data_df[enhanced_data_df['fund_code'].isin(new_fund_codes)].copy()
        
        # 生成LLM特征
        try:
            llm_enhanced_data = generate_llm_features(new_funds_data, cache_path="llm_features_by_fund.pkl")
            
            # 提取LLM特征字典
            llm_features_dict = {}
            if os.path.exists("llm_features_by_fund.pkl"):
                with open("llm_features_by_fund.pkl", 'rb') as f:
                    llm_data = pickle.load(f)
                    llm_features_dict = llm_data.get('features', {})
            
            # 更新LLM特征缓存
            cache_manager.update_llm_features(new_fund_codes, llm_features_dict)
            
            llm_time = time.time() - llm_start_time
            print(f"⏱️ LLM特征处理耗时: {llm_time:.2f} 秒")
            print(f"📊 成功生成 {len(llm_features_dict)} 个基金的LLM特征")
            
        except Exception as e:
            print(f"❌ LLM特征生成失败: {e}")
            print("⚠️ 将继续处理，但LLM特征可能不完整")
    else:
        print("✅ 没有新基金需要生成LLM特征")
    
    # 7. 验证生成的缓存文件
    print(f"\n7️⃣ 验证生成的缓存文件...")
    
    # 检查增强特征缓存
    if os.path.exists("fund_enhanced_features.csv"):
        enhanced_df = pd.read_csv("fund_enhanced_features.csv")
        print(f"✅ fund_enhanced_features.csv: {enhanced_df.shape}")
        print(f"   基金数量: {enhanced_df['fund_code'].nunique()}")
        print(f"   日期范围: {enhanced_df['transaction_date'].min()} 到 {enhanced_df['transaction_date'].max()}")
        print(f"   特征列数: {len(enhanced_df.columns) - 4}")  # 减去fund_code, transaction_date, apply_amt, redeem_amt
    else:
        print("❌ fund_enhanced_features.csv 未生成")
    
    # 检查LLM特征缓存
    if os.path.exists("llm_features_by_fund.pkl"):
        with open("llm_features_by_fund.pkl", 'rb') as f:
            llm_cache = pickle.load(f)
        print(f"✅ llm_features_by_fund.pkl: {llm_cache.get('fund_count', 0)} 个基金")
        
        # 检查第一个基金的特征结构
        features = llm_cache.get('features', {})
        if features:
            sample_fund = list(features.keys())[0]
            sample_feature = features[sample_fund]
            embeddings = sample_feature.get('embeddings', [])
            labels = sample_feature.get('labels', {})
            # 安全检查embeddings长度，避免numpy数组的歧义
            embed_len = len(embeddings) if embeddings is not None and hasattr(embeddings, '__len__') else 0
            print(f"   Embedding维度: {embed_len}")
            print(f"   标签数量: {len(labels)}")
    else:
        print("❌ llm_features_by_fund.pkl 未生成")
    
    # 检查市场特征缓存
    if os.path.exists("market_features_cache.pkl"):
        with open("market_features_cache.pkl", 'rb') as f:
            market_cache = pickle.load(f)
        total_dates = market_cache.get('total_dates', 0)
        print(f"✅ market_features_cache.pkl: {total_dates} 个交易日")
    else:
        print("❌ market_features_cache.pkl 未生成")
    
    # 8. 显示最终统计
    print(f"\n8️⃣ 最终缓存统计:")
    final_stats = cache_manager.get_statistics()
    for cache_type, info in final_stats.items():
        print(f"\n  📁 {cache_type}:")
        for key, value in info.items():
            print(f"    {key}: {value}")
    
    # 9. 性能总结
    total_time = time.time() - start_time
    print(f"\n⚡ 性能总结:")
    print(f"  总耗时: {total_time:.2f} 秒")
    print(f"  市场特征: {market_time:.2f} 秒 ({market_time/total_time*100:.1f}%)")
    print(f"  基础特征: {basic_time:.2f} 秒 ({basic_time/total_time*100:.1f}%)")
    print(f"  缓存更新: {cache_time:.2f} 秒 ({cache_time/total_time*100:.1f}%)")
    if 'llm_time' in locals():
        print(f"  LLM特征: {llm_time:.2f} 秒 ({llm_time/total_time*100:.1f}%)")
    
    print(f"\n✅ 特征处理完成！")
    print(f"📊 处理数据: {len(new_data_df)} 行")
    print(f"📊 涉及基金: {new_data_df['fund_code'].nunique()} 个")
    
    return cache_manager

def test_feature_loading():
    """测试特征加载功能"""
    print("\n🧪 测试特征加载功能")
    print("-" * 40)
    
    cache_manager = FeatureCacheManager()
    
    # 测试加载指定基金
    sample_funds = ['001316', '000086', '000192']
    try:
        start_time = time.time()
        fund_data, llm_embeddings = cache_manager.merge_features(fund_codes=sample_funds)
        load_time = time.time() - start_time
        
        print(f"✅ 指定基金数据加载: {fund_data.shape}")
        print(f"⏱️ 加载耗时: {load_time:.2f} 秒")
        print(f"🤖 LLM embeddings: {len(llm_embeddings)} 个基金")
        
        # 统计特征类型
        embed_cols = [col for col in fund_data.columns if col.startswith('embed_')]
        possible_label_cols = ['fund_type', 'risk_level', 'main_industry', 'investment_style', 'liquidity']
        label_cols = [col for col in fund_data.columns if col in possible_label_cols]
        market_cols = [col for col in fund_data.columns if 'market' in col.lower() or 'volume' in col.lower()]
        
        print(f"📊 特征统计:")
        print(f"  Embedding特征: {len(embed_cols)} 个")
        print(f"  标签特征: {len(label_cols)} 个")
        print(f"  市场特征: {len(market_cols)} 个")
        print(f"  总特征数: {len(fund_data.columns) - 4}")  # 减去基本列
        
    except Exception as e:
        print(f"❌ 特征加载测试失败: {e}")
    
    # 测试日期范围加载
    try:
        start_time = time.time()
        date_data, date_llm_embeddings = cache_manager.merge_features(date_range=('2024-04-10', '2024-04-20'))
        date_load_time = time.time() - start_time
        
        print(f"✅ 日期范围数据加载: {date_data.shape}")
        print(f"⏱️ 加载耗时: {date_load_time:.2f} 秒")
        print(f"🤖 LLM embeddings: {len(date_llm_embeddings)} 个基金")
        
    except Exception as e:
        print(f"❌ 日期范围加载测试失败: {e}")

def main():
    """主函数"""
    print("🎯 特征处理系统")
    print("=" * 80)
    
    # 检查必要文件
    if not os.path.exists('fund_apply_redeem_series.csv'):
        print("❌ 找不到原始数据文件: fund_apply_redeem_series.csv")
        return
    
    # 显示选项
    print("\n请选择处理模式:")
    print("1. 增量处理 (推荐) - 只处理新增数据")
    print("2. 强制重建 - 重新处理所有数据")
    print("3. 仅测试加载 - 测试现有缓存的加载功能")
    
    choice = input("\n请输入选择 (1/2/3): ").strip()
    
    if choice == '1':
        print("\n🔄 开始增量处理...")
        cache_manager = process_all_features(force_rebuild=False)
        test_feature_loading()
        
    elif choice == '2':
        print("\n⚠️ 强制重建将删除所有现有缓存，确定要继续吗？")
        confirm = input("输入 'yes' 确认: ").strip().lower()
        if confirm == 'yes':
            print("\n🏗️ 开始强制重建...")
            cache_manager = process_all_features(force_rebuild=True)
            test_feature_loading()
        else:
            print("❌ 已取消操作")
            
    elif choice == '3':
        print("\n🧪 开始测试加载...")
        test_feature_loading()
        
    else:
        print("❌ 无效选择")
        return
    
    print("\n🎉 操作完成！")
    print("\n📚 生成的文件:")
    print("  - fund_enhanced_features.csv: 增强特征缓存")
    print("  - llm_features_by_fund.pkl: LLM特征缓存") 
    print("  - market_features_cache.pkl: 市场特征缓存")
    print("\n💡 使用建议:")
    print("  - 使用 FeatureCacheManager 类来管理和查询特征")
    print("  - 定期运行增量处理以保持缓存最新")
    print("  - 查看 FEATURE_CACHE_README.md 获取详细说明")

if __name__ == "__main__":
    main() 