import pickle
import json
import os
import datetime
from pathlib import Path
import numpy as np
import pandas as pd
from typing import Dict, Any, Optional

class ModelPersistenceManager:
    """模型持久化管理器 - 用于保存和加载训练好的模型"""
    
    def __init__(self, base_path: str = "./models"):
        self.base_path = Path(base_path)
        self.base_path.mkdir(exist_ok=True)
        
        # 创建子目录
        self.genetic_path = self.base_path / "genetic_algorithm"
        self.rule_forest_path = self.base_path / "rule_forest"
        self.ensemble_path = self.base_path / "ensemble"
        
        for path in [self.genetic_path, self.rule_forest_path, self.ensemble_path]:
            path.mkdir(exist_ok=True)
    
    def save_genetic_model(self, model: Any, model_name: str, 
                          metadata: Dict[str, Any] = None) -> str:
        """
        保存遗传算法模型
        
        Args:
            model: 训练好的遗传算法模型
            model_name: 模型名称
            metadata: 模型元数据（训练参数、性能指标等）
        
        Returns:
            保存的文件路径
        """
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"{model_name}_{timestamp}.pkl"
        filepath = self.genetic_path / filename
        
        # 准备保存数据
        save_data = {
            'model': model,
            'metadata': metadata or {},
            'save_timestamp': timestamp,
            'model_type': 'genetic_algorithm'
        }
        
        # 保存模型
        with open(filepath, 'wb') as f:
            pickle.dump(save_data, f)
        
        # 保存元数据为JSON（便于查看）
        metadata_file = filepath.with_suffix('.json')
        with open(metadata_file, 'w', encoding='utf-8') as f:
            json.dump(metadata, f, indent=2, ensure_ascii=False, default=str)
        
        print(f"遗传算法模型已保存: {filepath}")
        return str(filepath)
    
    def save_rule_forest_model(self, model: Any, model_name: str,
                              metadata: Dict[str, Any] = None) -> str:
        """
        保存规则森林模型
        
        Args:
            model: 训练好的规则森林模型
            model_name: 模型名称
            metadata: 模型元数据
        
        Returns:
            保存的文件路径
        """
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"{model_name}_{timestamp}.pkl"
        filepath = self.rule_forest_path / filename
        
        save_data = {
            'model': model,
            'metadata': metadata or {},
            'save_timestamp': timestamp,
            'model_type': 'rule_forest'
        }
        
        with open(filepath, 'wb') as f:
            pickle.dump(save_data, f)
        
        # 保存元数据
        metadata_file = filepath.with_suffix('.json')
        with open(metadata_file, 'w', encoding='utf-8') as f:
            json.dump(metadata, f, indent=2, ensure_ascii=False, default=str)
        
        print(f"规则森林模型已保存: {filepath}")
        return str(filepath)
    
    def load_latest_model(self, model_type: str, model_name: str = None) -> Optional[Dict[str, Any]]:
        """
        加载最新的模型
        
        Args:
            model_type: 模型类型 ('genetic_algorithm' 或 'rule_forest')
            model_name: 模型名称（可选，用于筛选）
        
        Returns:
            加载的模型数据
        """
        if model_type == 'genetic_algorithm':
            search_path = self.genetic_path
        elif model_type == 'rule_forest':
            search_path = self.rule_forest_path
        else:
            raise ValueError(f"不支持的模型类型: {model_type}")
        
        # 查找匹配的模型文件
        pattern = f"{model_name}_*.pkl" if model_name else "*.pkl"
        model_files = list(search_path.glob(pattern))
        
        if not model_files:
            print(f"未找到{model_type}类型的模型文件")
            return None
        
        # 选择最新的模型文件
        latest_file = max(model_files, key=lambda x: x.stat().st_mtime)
        
        try:
            with open(latest_file, 'rb') as f:
                model_data = pickle.load(f)
            
            print(f"已加载模型: {latest_file}")
            return model_data
        except Exception as e:
            print(f"加载模型失败: {e}")
            return None
    
    def load_model_by_date(self, model_type: str, date_str: str) -> Optional[Dict[str, Any]]:
        """
        根据日期加载特定模型
        
        Args:
            model_type: 模型类型
            date_str: 日期字符串 (格式: YYYYMMDD)
        
        Returns:
            加载的模型数据
        """
        if model_type == 'genetic_algorithm':
            search_path = self.genetic_path
        elif model_type == 'rule_forest':
            search_path = self.rule_forest_path
        else:
            raise ValueError(f"不支持的模型类型: {model_type}")
        
        # 查找指定日期的模型
        pattern = f"*_{date_str}_*.pkl"
        model_files = list(search_path.glob(pattern))
        
        if not model_files:
            print(f"未找到{date_str}的{model_type}模型")
            return None
        
        # 选择最新的文件
        latest_file = max(model_files, key=lambda x: x.stat().st_mtime)
        
        try:
            with open(latest_file, 'rb') as f:
                model_data = pickle.load(f)
            
            print(f"已加载模型: {latest_file}")
            return model_data
        except Exception as e:
            print(f"加载模型失败: {e}")
            return None
    
    def list_available_models(self, model_type: str = None) -> Dict[str, list]:
        """
        列出所有可用的模型
        
        Args:
            model_type: 模型类型（可选）
        
        Returns:
            可用模型列表
        """
        models = {}
        
        if model_type is None or model_type == 'genetic_algorithm':
            ga_models = []
            for file in self.genetic_path.glob("*.pkl"):
                metadata_file = file.with_suffix('.json')
                metadata = {}
                if metadata_file.exists():
                    with open(metadata_file, 'r', encoding='utf-8') as f:
                        metadata = json.load(f)
                
                ga_models.append({
                    'filename': file.name,
                    'filepath': str(file),
                    'size': file.stat().st_size,
                    'modified': datetime.datetime.fromtimestamp(file.stat().st_mtime),
                    'metadata': metadata
                })
            models['genetic_algorithm'] = sorted(ga_models, key=lambda x: x['modified'], reverse=True)
        
        if model_type is None or model_type == 'rule_forest':
            rf_models = []
            for file in self.rule_forest_path.glob("*.pkl"):
                metadata_file = file.with_suffix('.json')
                metadata = {}
                if metadata_file.exists():
                    with open(metadata_file, 'r', encoding='utf-8') as f:
                        metadata = json.load(f)
                
                rf_models.append({
                    'filename': file.name,
                    'filepath': str(file),
                    'size': file.stat().st_size,
                    'modified': datetime.datetime.fromtimestamp(file.stat().st_mtime),
                    'metadata': metadata
                })
            models['rule_forest'] = sorted(rf_models, key=lambda x: x['modified'], reverse=True)
        
        return models
    
    def delete_old_models(self, days_to_keep: int = 30):
        """
        删除旧的模型文件
        
        Args:
            days_to_keep: 保留天数
        """
        cutoff_time = datetime.datetime.now() - datetime.timedelta(days=days_to_keep)
        
        for model_type in ['genetic_algorithm', 'rule_forest']:
            if model_type == 'genetic_algorithm':
                search_path = self.genetic_path
            else:
                search_path = self.rule_forest_path
            
            deleted_count = 0
            for file in search_path.glob("*.pkl"):
                if datetime.datetime.fromtimestamp(file.stat().st_mtime) < cutoff_time:
                    file.unlink()
                    # 删除对应的JSON文件
                    json_file = file.with_suffix('.json')
                    if json_file.exists():
                        json_file.unlink()
                    deleted_count += 1
            
            print(f"已删除{deleted_count}个旧的{model_type}模型文件")

# 使用示例
if __name__ == "__main__":
    # 创建模型管理器
    manager = ModelPersistenceManager()
    
    # 示例：保存遗传算法模型
    class GeneticModel:
        def __init__(self):
            self.population = []
            self.best_individual = None
            self.generation = 0
    
    genetic_model = GeneticModel()
    genetic_model.population = [1, 2, 3, 4, 5]
    genetic_model.best_individual = 5
    genetic_model.generation = 100
    
    metadata = {
        'accuracy': 0.85,
        'training_time': '2.5小时',
        'population_size': 1000,
        'generations': 500,
        'fitness_function': 'sharpe_ratio'
    }
    
    # 保存模型
    saved_path = manager.save_genetic_model(genetic_model, "gold_trading_ga", metadata)
    
    # 加载最新模型
    loaded_data = manager.load_latest_model('genetic_algorithm', 'gold_trading_ga')
    
    # 列出所有模型
    available_models = manager.list_available_models()
    print("可用模型:", available_models) 