#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
特征缓存管理系统

功能：
1. 基础+市场特征缓存到CSV文件，支持增量更新
2. LLM特征单独缓存，按基金代码存储
3. 智能增量处理，只处理新增数据
4. 特征合并和查询功能
"""

import pandas as pd
import pickle
import os
import json
from datetime import datetime
from typing import Dict, List, Optional, Tuple
import numpy as np

class FeatureCacheManager:
    """特征缓存管理器"""
    
    def __init__(self, 
                 base_data_path: str = "fund_apply_redeem_series.csv",
                 enhanced_data_path: str = "fund_enhanced_features.csv",
                 llm_features_path: str = "llm_features_by_fund.pkl",
                 market_cache_path: str = "market_features_cache.pkl"):
        """
        初始化缓存管理器
        
        Args:
            base_data_path: 原始数据文件路径
            enhanced_data_path: 增强特征缓存文件路径
            llm_features_path: LLM特征缓存文件路径
            market_cache_path: 市场特征缓存文件路径
        """
        self.base_data_path = base_data_path
        self.enhanced_data_path = enhanced_data_path
        self.llm_features_path = llm_features_path
        self.market_cache_path = market_cache_path
        
        # 缓存状态
        self.enhanced_cache_info = self._load_enhanced_cache_info()
        self.llm_cache_info = self._load_llm_cache_info()
    
    def _load_enhanced_cache_info(self) -> Dict:
        """加载增强特征缓存信息"""
        if os.path.exists(self.enhanced_data_path):
            try:
                # 读取缓存文件的基本信息
                df = pd.read_csv(self.enhanced_data_path, nrows=1)
                file_stats = os.stat(self.enhanced_data_path)
                
                # 获取完整数据以获取日期范围
                full_df = pd.read_csv(self.enhanced_data_path)
                if 'transaction_date' in full_df.columns:
                    # 智能检测日期格式
                    try:
                        # 先尝试 %Y%m%d 格式
                        full_df['transaction_date'] = pd.to_datetime(full_df['transaction_date'], format='%Y%m%d')
                    except ValueError:
                        try:
                            # 再尝试 %Y-%m-%d 格式
                            full_df['transaction_date'] = pd.to_datetime(full_df['transaction_date'], format='%Y-%m-%d')
                        except ValueError:
                            # 最后使用自动推断
                            full_df['transaction_date'] = pd.to_datetime(full_df['transaction_date'])
                    
                    date_range = {
                        'min_date': full_df['transaction_date'].min(),
                        'max_date': full_df['transaction_date'].max(),
                        'unique_dates': full_df['transaction_date'].nunique(),
                        'total_rows': len(full_df)
                    }
                else:
                    date_range = {}
                
                return {
                    'exists': True,
                    'file_size': file_stats.st_size,
                    'last_modified': datetime.fromtimestamp(file_stats.st_mtime),
                    'columns': list(df.columns),
                    'date_range': date_range
                }
            except Exception as e:
                print(f"⚠️ 加载增强特征缓存信息失败: {e}")
                return {'exists': False}
        else:
            return {'exists': False}
    
    def _load_llm_cache_info(self) -> Dict:
        """加载LLM特征缓存信息"""
        if os.path.exists(self.llm_features_path):
            try:
                with open(self.llm_features_path, 'rb') as f:
                    llm_data = pickle.load(f)
                
                file_stats = os.stat(self.llm_features_path)
                
                return {
                    'exists': True,
                    'file_size': file_stats.st_size,
                    'last_modified': datetime.fromtimestamp(file_stats.st_mtime),
                    'fund_count': len(llm_data.get('features', {})),
                    'fund_codes': list(llm_data.get('features', {}).keys()),
                    'timestamp': llm_data.get('timestamp', 'Unknown')
                }
            except Exception as e:
                print(f"⚠️ 加载LLM特征缓存信息失败: {e}")
                return {'exists': False}
        else:
            return {'exists': False}
    
    def get_cache_status(self) -> Dict:
        """获取缓存状态信息"""
        return {
            'enhanced_features': self.enhanced_cache_info,
            'llm_features': self.llm_cache_info,
            'market_cache_exists': os.path.exists(self.market_cache_path)
        }
    
    def detect_new_data(self) -> Tuple[pd.DataFrame, List[str]]:
        """
        检测新增数据
        
        Returns:
            (new_data_df, new_fund_codes): 新增的数据和新增的基金代码
        """
        print("🔍 检测新增数据...")
        
        # 加载原始数据
        base_df = pd.read_csv(self.base_data_path, dtype={'fund_code': str})
        base_df['transaction_date'] = pd.to_datetime(base_df['transaction_date'], format='%Y%m%d')
        # 确保基金代码格式一致（6位格式）
        base_df['fund_code'] = base_df['fund_code'].str.zfill(6)
        
        print(f"📊 原始数据: {len(base_df)} 行，{base_df['fund_code'].nunique()} 个基金")
        
        if not self.enhanced_cache_info['exists']:
            print("📁 增强特征缓存不存在，所有数据都是新数据")
            new_fund_codes = base_df['fund_code'].unique().tolist()
            return base_df, new_fund_codes
        
        # 加载现有缓存
        try:
            cached_df = pd.read_csv(self.enhanced_data_path, dtype={'fund_code': str})
            # 智能检测日期格式
            try:
                cached_df['transaction_date'] = pd.to_datetime(cached_df['transaction_date'], format='%Y%m%d')
            except ValueError:
                try:
                    cached_df['transaction_date'] = pd.to_datetime(cached_df['transaction_date'], format='%Y-%m-%d')
                except ValueError:
                    cached_df['transaction_date'] = pd.to_datetime(cached_df['transaction_date'])
            
            print(f"📦 缓存数据: {len(cached_df)} 行，{cached_df['fund_code'].nunique()} 个基金")
            
            # 找出新增的数据行
            # 将日期转换为字符串格式进行比较，避免datetime精度问题
            base_keys = base_df[['fund_code', 'transaction_date']].copy()
            cached_keys = cached_df[['fund_code', 'transaction_date']].copy()
            
            # 统一转换为字符串格式进行比较
            base_keys['date_str'] = base_keys['transaction_date'].dt.strftime('%Y-%m-%d')
            cached_keys['date_str'] = cached_keys['transaction_date'].dt.strftime('%Y-%m-%d')
            
            # 合并找出新增行
            merged = base_keys[['fund_code', 'date_str']].merge(
                cached_keys[['fund_code', 'date_str']], 
                on=['fund_code', 'date_str'], 
                how='left', indicator=True
            )
            new_rows_mask = merged['_merge'] == 'left_only'
            
            new_data_df = base_df[new_rows_mask].copy()
            
            # 找出新增的基金代码
            cached_funds = set(cached_df['fund_code'].unique())
            base_funds = set(base_df['fund_code'].unique())
            new_fund_codes = list(base_funds - cached_funds)
            
            print(f"📈 新增数据: {len(new_data_df)} 行")
            print(f"📈 新增基金: {len(new_fund_codes)} 个")
            
            if len(new_data_df) > 0:
                date_range = f"{new_data_df['transaction_date'].min().strftime('%Y-%m-%d')} 到 {new_data_df['transaction_date'].max().strftime('%Y-%m-%d')}"
                print(f"📅 新增日期范围: {date_range}")
            
            return new_data_df, new_fund_codes
            
        except Exception as e:
            print(f"❌ 检测新增数据失败: {e}")
            print("🔄 将处理所有数据")
            new_fund_codes = base_df['fund_code'].unique().tolist()
            return base_df, new_fund_codes
    
    def update_enhanced_features(self, new_data_df: pd.DataFrame, force_rebuild: bool = False):
        """
        更新增强特征缓存
        
        Args:
            new_data_df: 新增数据（已包含增强特征）
            force_rebuild: 是否强制重建缓存
        """
        if len(new_data_df) == 0 and not force_rebuild:
            print("✅ 没有新增数据需要更新")
            return
        
        print(f"🔄 更新增强特征缓存...")
        
        # 确保日期格式正确（保存为YYYY-MM-DD格式）
        new_data_df_copy = new_data_df.copy()
        if 'transaction_date' in new_data_df_copy.columns:
            if new_data_df_copy['transaction_date'].dtype == 'datetime64[ns]':
                new_data_df_copy['transaction_date'] = new_data_df_copy['transaction_date'].dt.strftime('%Y-%m-%d')
        
        if force_rebuild or not self.enhanced_cache_info['exists']:
            print("🏗️ 重建增强特征缓存")
            new_data_df_copy.to_csv(self.enhanced_data_path, index=False)
        else:
            print("📈 增量更新增强特征缓存")
            # 读取现有缓存
            existing_df = pd.read_csv(self.enhanced_data_path, dtype={'fund_code': str})
            
            # 合并新数据
            combined_df = pd.concat([existing_df, new_data_df_copy], ignore_index=True)
            
            # 去重（基于fund_code和transaction_date）
            combined_df['transaction_date_str'] = combined_df['transaction_date'].astype(str)
            combined_df = combined_df.drop_duplicates(subset=['fund_code', 'transaction_date_str'], keep='last')
            combined_df = combined_df.drop(columns=['transaction_date_str'])
            
            # 排序
            combined_df = combined_df.sort_values(['fund_code', 'transaction_date'])
            
            # 保存
            combined_df.to_csv(self.enhanced_data_path, index=False)
        
        # 更新缓存信息
        self.enhanced_cache_info = self._load_enhanced_cache_info()
        
        print(f"✅ 增强特征缓存更新完成")
        if 'file_size' in self.enhanced_cache_info:
            print(f"📊 缓存文件大小: {self.enhanced_cache_info['file_size'] / 1024:.2f} KB")
        else:
            print(f"📊 缓存已更新")
    
    def update_llm_features(self, new_fund_codes: List[str], llm_features_dict: Dict):
        """
        更新LLM特征缓存
        
        Args:
            new_fund_codes: 新增的基金代码列表
            llm_features_dict: 新的LLM特征字典
        """
        if len(new_fund_codes) == 0:
            print("✅ 没有新增基金需要更新LLM特征")
            return
        
        print(f"🔄 更新LLM特征缓存，新增 {len(new_fund_codes)} 个基金")
        
        # 加载现有LLM特征
        if self.llm_cache_info['exists']:
            try:
                with open(self.llm_features_path, 'rb') as f:
                    existing_llm_data = pickle.load(f)
                existing_features = existing_llm_data.get('features', {})
            except Exception as e:
                print(f"⚠️ 加载现有LLM特征失败: {e}")
                existing_features = {}
        else:
            existing_features = {}
        
        # 合并新特征
        for fund_code in new_fund_codes:
            if fund_code in llm_features_dict:
                existing_features[fund_code] = llm_features_dict[fund_code]
                print(f"📊 更新基金 {fund_code} 的LLM特征")
        
        # 保存更新后的LLM特征
        llm_cache_data = {
            'features': existing_features,
            'timestamp': datetime.now().isoformat(),
            'fund_count': len(existing_features),
            'last_update': datetime.now().isoformat()
        }
        
        with open(self.llm_features_path, 'wb') as f:
            pickle.dump(llm_cache_data, f)
        
        # 更新缓存信息
        self.llm_cache_info = self._load_llm_cache_info()
        
        print(f"✅ LLM特征缓存更新完成")
        print(f"📊 总计 {len(existing_features)} 个基金的LLM特征")
        print(f"📊 缓存文件大小: {self.llm_cache_info['file_size'] / 1024:.2f} KB")
    
    def load_enhanced_features(self, fund_codes: Optional[List[str]] = None, 
                             date_range: Optional[Tuple[str, str]] = None) -> pd.DataFrame:
        """
        加载增强特征数据
        
        Args:
            fund_codes: 指定基金代码列表，None表示加载所有
            date_range: 日期范围 (start_date, end_date)，格式为 'YYYY-MM-DD'
        
        Returns:
            增强特征数据框
        """
        if not self.enhanced_cache_info['exists']:
            raise FileNotFoundError(f"增强特征缓存文件不存在: {self.enhanced_data_path}")
        
        print(f"📖 加载增强特征数据...")
        
        # 加载数据
        df = pd.read_csv(self.enhanced_data_path, dtype={'fund_code': str})
        # 智能检测日期格式
        try:
            df['transaction_date'] = pd.to_datetime(df['transaction_date'], format='%Y%m%d')
        except ValueError:
            try:
                df['transaction_date'] = pd.to_datetime(df['transaction_date'], format='%Y-%m-%d')
            except ValueError:
                df['transaction_date'] = pd.to_datetime(df['transaction_date'])
        
        # 筛选基金代码
        if fund_codes:
            df = df[df['fund_code'].isin(fund_codes)]
            print(f"📊 筛选基金: {len(fund_codes)} 个")
        
        # 筛选日期范围
        if date_range:
            start_date, end_date = date_range
            start_date = pd.to_datetime(start_date)
            end_date = pd.to_datetime(end_date)
            df = df[(df['transaction_date'] >= start_date) & (df['transaction_date'] <= end_date)]
            print(f"📅 筛选日期: {start_date.strftime('%Y-%m-%d')} 到 {end_date.strftime('%Y-%m-%d')}")
        
        print(f"📊 加载数据: {len(df)} 行，{df['fund_code'].nunique()} 个基金")
        
        return df
    
    def load_llm_features(self, fund_codes: Optional[List[str]] = None) -> Dict:
        """
        加载LLM特征数据
        
        Args:
            fund_codes: 指定基金代码列表，None表示加载所有
        
        Returns:
            LLM特征字典
        """
        if not self.llm_cache_info['exists']:
            print("⚠️ LLM特征缓存文件不存在")
            return {}
        
        print(f"📖 加载LLM特征数据...")
        
        try:
            with open(self.llm_features_path, 'rb') as f:
                llm_data = pickle.load(f)
            
            features = llm_data.get('features', {})
            
            # 筛选基金代码
            if fund_codes:
                features = {code: features[code] for code in fund_codes if code in features}
                print(f"📊 筛选基金: {len(fund_codes)} 个，实际有LLM特征: {len(features)} 个")
            else:
                print(f"📊 加载所有LLM特征: {len(features)} 个基金")
            
            return features
            
        except Exception as e:
            print(f"❌ 加载LLM特征失败: {e}")
            return {}
    
    def merge_features(self, fund_codes: Optional[List[str]] = None,
                      date_range: Optional[Tuple[str, str]] = None,
                      include_llm_embeddings: bool = False,
                      include_llm_labels: bool = True) -> Tuple[pd.DataFrame, Dict]:
        """
        合并所有特征数据
        
        Args:
            fund_codes: 指定基金代码列表
            date_range: 日期范围
            include_llm_embeddings: 是否包含LLM embedding特征
            include_llm_labels: 是否包含LLM结构化标签
        
        Returns:
            Tuple[enhanced_df, llm_embeddings_dict]
            - enhanced_df: 增强特征数据框（包含结构化标签，如果选择）
            - llm_embeddings_dict: LLM embedding字典 {fund_code: embedding_array}
        """
        print("🔗 合并特征数据...")
        print(f"   🎯 LLM embeddings: {'✅' if include_llm_embeddings else '❌'}")
        print(f"   🎯 LLM labels: {'✅' if include_llm_labels else '❌'}")
        
        # 加载增强特征
        enhanced_df = self.load_enhanced_features(fund_codes, date_range)
        
        llm_embeddings_dict = {}
        
        if include_llm_embeddings or include_llm_labels:
            # 加载LLM特征
            llm_features = self.load_llm_features(fund_codes)
            
            if not llm_features:
                print("⚠️ 没有LLM特征数据")
                return enhanced_df, llm_embeddings_dict
            
            # 处理LLM embeddings（保持完整性，不拆分）
            if include_llm_embeddings:
                for fund_code, features in llm_features.items():
                    embeddings = features.get('embeddings', [])
                    if isinstance(embeddings, list):
                        embeddings = np.array(embeddings)
                    if embeddings is not None and len(embeddings) > 0:
                        llm_embeddings_dict[fund_code] = embeddings
                
                print(f"📊 LLM embeddings: {len(llm_embeddings_dict)} 个基金")
                if llm_embeddings_dict:
                    sample_embed = next(iter(llm_embeddings_dict.values()))
                    print(f"   📐 Embedding维度: {sample_embed.shape}")
            
            # 处理LLM结构化标签（作为增强特征的一部分）
            if include_llm_labels:
                llm_label_rows = []
                for fund_code, features in llm_features.items():
                    labels = features.get('labels', {})
                    if labels:
                        row = {'fund_code': fund_code}
                        row.update({
                            'fund_type': labels.get('基金类型', ''),
                            'risk_level': labels.get('风险等级', ''),
                            'main_industry': ','.join(labels.get('主要投资行业', [])) if isinstance(labels.get('主要投资行业'), list) else labels.get('主要投资行业', ''),
                            'investment_style': labels.get('投资风格', ''),
                            'liquidity': labels.get('流动性等级', ''),
                            'asset_scale_level': labels.get('资产规模等级', ''),
                            'fee_level': labels.get('费用水平', ''),
                            'dividend_feature': labels.get('分红特征', ''),
                            'strategy_type': labels.get('投资策略类型', ''),
                            'company_strength': labels.get('基金公司实力', '')
                        })
                        llm_label_rows.append(row)
                
                if llm_label_rows:
                    llm_labels_df = pd.DataFrame(llm_label_rows)
                    enhanced_df = enhanced_df.merge(llm_labels_df, on='fund_code', how='left')
                    print(f"📊 LLM labels: {len(llm_label_rows)} 个基金的标签已合并")
                    
                    # 统计标签覆盖情况
                    if 'fund_type' in enhanced_df.columns:
                        label_coverage = enhanced_df['fund_type'].notna().sum()
                        print(f"📊 标签覆盖: {label_coverage}/{len(enhanced_df)} 行 ({label_coverage/len(enhanced_df)*100:.1f}%)")
        
        print(f"✅ 特征合并完成")
        print(f"   📊 增强特征: {enhanced_df.shape}")
        print(f"   📊 LLM embeddings: {len(llm_embeddings_dict)} 个基金")
        
        return enhanced_df, llm_embeddings_dict
    
    def get_enhanced_feature_columns(self) -> List[str]:
        """
        获取增强特征的列名（用于确定特征维度）
        """
        if not self.enhanced_cache_info['exists']:
            return []
        
        try:
            # 读取文件的第一行获取列名
            enhanced_df = pd.read_csv(self.enhanced_data_path, nrows=1)
            # 排除非特征列
            exclude_cols = ['fund_code', 'transaction_date', 'apply_amt', 'redeem_amt']
            feature_cols = [col for col in enhanced_df.columns if col not in exclude_cols]
            return feature_cols
        except Exception as e:
            print(f"❌ 获取特征列失败: {e}")
            return []
    
    def clear_cache(self, cache_type: str = 'all'):
        """
        清理缓存文件
        
        Args:
            cache_type: 缓存类型 ('enhanced', 'llm', 'market', 'all')
        """
        print(f"🗑️ 清理缓存: {cache_type}")
        
        files_to_remove = []
        
        if cache_type in ['enhanced', 'all']:
            files_to_remove.append(self.enhanced_data_path)
        
        if cache_type in ['llm', 'all']:
            files_to_remove.append(self.llm_features_path)
        
        if cache_type in ['market', 'all']:
            files_to_remove.append(self.market_cache_path)
        
        for file_path in files_to_remove:
            try:
                if os.path.exists(file_path):
                    os.remove(file_path)
                    print(f"✅ 已删除: {file_path}")
                else:
                    print(f"📁 文件不存在: {file_path}")
            except Exception as e:
                print(f"❌ 删除失败 {file_path}: {e}")
        
        # 重新加载缓存信息
        self.enhanced_cache_info = self._load_enhanced_cache_info()
        self.llm_cache_info = self._load_llm_cache_info()
    
    def get_statistics(self) -> Dict:
        """获取缓存统计信息"""
        stats = {
            'enhanced_features': {
                'exists': self.enhanced_cache_info['exists'],
                'file_size_kb': self.enhanced_cache_info.get('file_size', 0) / 1024,
                'total_rows': self.enhanced_cache_info.get('date_range', {}).get('total_rows', 0),
                'unique_dates': self.enhanced_cache_info.get('date_range', {}).get('unique_dates', 0),
                'date_range': f"{self.enhanced_cache_info.get('date_range', {}).get('min_date', 'N/A')} 到 {self.enhanced_cache_info.get('date_range', {}).get('max_date', 'N/A')}"
            },
            'llm_features': {
                'exists': self.llm_cache_info['exists'],
                'file_size_kb': self.llm_cache_info.get('file_size', 0) / 1024,
                'fund_count': self.llm_cache_info.get('fund_count', 0),
                'timestamp': self.llm_cache_info.get('timestamp', 'N/A')
            },
            'market_cache': {
                'exists': os.path.exists(self.market_cache_path),
                'file_size_kb': os.path.getsize(self.market_cache_path) / 1024 if os.path.exists(self.market_cache_path) else 0
            }
        }
        
        return stats

def demo_usage():
    """演示使用方法"""
    print("🎯 特征缓存管理系统演示")
    print("=" * 60)
    
    # 初始化缓存管理器
    cache_manager = FeatureCacheManager()
    
    # 获取缓存状态
    print("\n📊 当前缓存状态:")
    stats = cache_manager.get_statistics()
    for cache_type, info in stats.items():
        print(f"  {cache_type}:")
        for key, value in info.items():
            print(f"    {key}: {value}")
    
    # 检测新增数据
    new_data, new_funds = cache_manager.detect_new_data()
    print(f"\n📈 检测结果:")
    print(f"  新增数据行数: {len(new_data)}")
    print(f"  新增基金数量: {len(new_funds)}")
    
    # 示例：加载特征数据
    if cache_manager.enhanced_cache_info['exists']:
        print(f"\n📖 加载特征数据示例:")
        
        # 加载指定基金的数据
        sample_funds = ['000086', '000192']
        enhanced_data = cache_manager.load_enhanced_features(fund_codes=sample_funds)
        print(f"  指定基金数据: {enhanced_data.shape}")
        
        # 加载指定日期范围的数据
        date_range = ('2024-04-10', '2024-04-15')
        date_filtered_data = cache_manager.load_enhanced_features(date_range=date_range)
        print(f"  指定日期数据: {date_filtered_data.shape}")
        
        # 合并所有特征
        merged_data, llm_embeddings = cache_manager.merge_features(fund_codes=sample_funds)
        print(f"  合并特征数据: {merged_data.shape}")
        print(f"  合并的LLM embeddings: {len(llm_embeddings)} 个基金")

if __name__ == "__main__":
    demo_usage() 