import os
import sys
import argparse
from pathlib import Path
from typing import Dict, Any, Optional
import yaml
import optuna
from optuna.integration import PyTorchLightningPruningCallback
import torch
from lightning import Trainer
from lightning.pytorch.callbacks import EarlyStopping, ModelCheckpoint
from lightning.pytorch.loggers import TensorBoardLogger
import tempfile
import shutil

from model.model_interface import MInterface
from data.data_interface import DInterface


class OptunaObjective:
    """Optuna目标函数类，用于超参数优化"""
    
    def __init__(self, 
                 data_config: Dict[str, Any],
                 base_config: Dict[str, Any],
                 max_epochs: int = 50,
                 min_epochs: int = 10,
                 patience: int = 15,
                 metric_name: str = "val_loss",
                 direction: str = "minimize",
                 log_dir: str = "optuna_logs"):
        self.data_config = data_config
        self.base_config = base_config
        self.max_epochs = max_epochs
        self.min_epochs = min_epochs
        self.patience = patience
        self.metric_name = metric_name
        self.direction = direction
        self.log_dir = Path(log_dir)
        
        # 确保日志目录存在
        self.log_dir.mkdir(parents=True, exist_ok=True)
        
    def __call__(self, trial: optuna.Trial) -> float:
        """Optuna试验的目标函数"""
        
        # 使用临时目录避免多个试验间的冲突
        with tempfile.TemporaryDirectory() as temp_dir:
            temp_path = Path(temp_dir)
            
            # 建议超参数
            params = self._suggest_hyperparameters(trial)
            
            # 创建数据模块
            datamodule = DInterface(**self.data_config)
            
            # 创建模型
            model_params = {**self.base_config, **params}
            model = MInterface(**model_params)
            
            # 创建日志记录器 - 统一到指定目录
            logger = TensorBoardLogger(
                save_dir=self.log_dir,
                name="trials",
                version=f"trial_{trial.number}",
                default_hp_metric=False  # 避免自动记录超参数指标
            )
            
            # 配置回调函数
            callbacks = [
                # Optuna剪枝回调
                PyTorchLightningPruningCallback(trial, monitor=self.metric_name),
                # 早停回调
                EarlyStopping(
                    monitor=self.metric_name,
                    patience=self.patience,
                    mode="min" if "loss" in self.metric_name else "max",
                    verbose=False
                ),
                # 检查点回调
                ModelCheckpoint(
                    dirpath=temp_path / "checkpoints",
                    filename=f"trial_{trial.number}_best",
                    monitor=self.metric_name,
                    save_top_k=1,
                    mode="min" if "loss" in self.metric_name else "max",
                    verbose=False
                )
            ]
            
            # 创建训练器
            trainer = Trainer(
                max_epochs=self.max_epochs,
                min_epochs=self.min_epochs,
                logger=logger,
                callbacks=callbacks,
                enable_progress_bar=False,
                enable_model_summary=False,
                devices=1 if torch.cuda.is_available() else "auto",
                accelerator="gpu" if torch.cuda.is_available() else "cpu",
                deterministic=True,
                # 禁用检查点以减少磁盘使用
                enable_checkpointing=True,
            )
            
            try:
                # 训练模型
                trainer.fit(model, datamodule=datamodule)
                
                # 获取最佳指标值
                if trainer.callback_metrics:
                    metric_value = trainer.callback_metrics.get(self.metric_name)
                    if metric_value is not None:
                        return float(metric_value)
                
                # 如果无法从回调指标获取，尝试从验证结果获取
                if hasattr(trainer, 'logged_metrics'):
                    metric_value = trainer.logged_metrics.get(f"{self.metric_name}_epoch")
                    if metric_value is not None:
                        return float(metric_value)
                
                # 如果仍然无法获取，返回一个较差的默认值
                print(f"警告: 无法获取指标 {self.metric_name}，返回默认值")
                return float('inf') if "loss" in self.metric_name else 0.0
                
            except Exception as e:
                print(f"试验 {trial.number} 失败: {str(e)}")
                # 返回一个较差的值表示失败
                return float('inf') if "loss" in self.metric_name else 0.0
    
    def _suggest_hyperparameters(self, trial: optuna.Trial) -> Dict[str, Any]:
        """建议超参数搜索空间"""
        params = {}
        
        # 学习率搜索
        params['learning_rate'] = trial.suggest_float('learning_rate', 1e-5, 1e-2, log=True)
        
        # 权重衰减
        params['weight_decay'] = trial.suggest_float('weight_decay', 1e-6, 1e-2, log=True)
        
        # 损失函数权重 - 更合理的范围
        params['loss_l2'] = trial.suggest_float('loss_l2', 0.0, 1.0)
        params['loss_l1'] = trial.suggest_float('loss_l1', 0.0, 1.0)
        params['loss_perc'] = trial.suggest_float('loss_perc', 0.0, 0.5)
        params['loss_msssim'] = trial.suggest_float('loss_msssim', 0.0, 0.3)
        params['loss_focal'] = trial.suggest_float('loss_focal', 0.0, 0.1)
        
        # 分割相关参数（如果启用）
        if self.base_config.get('enable_segmentation', False):
            params['seg_loss_weight'] = trial.suggest_float('seg_loss_weight', 0.001, 0.1, log=True)
            params['seg_use_multiscale'] = trial.suggest_categorical('seg_use_multiscale', [True, False])
            params['seg_use_dropout'] = trial.suggest_categorical('seg_use_dropout', [True, False])
            if params['seg_use_dropout']:
                params['seg_dropout_rate'] = trial.suggest_float('seg_dropout_rate', 0.1, 0.5)
        
        # 优化器选择
        params['optimizer_type'] = trial.suggest_categorical('optimizer_type', ['adamw', 'adam'])
        
        # 调度器选择
        params['scheduler_type'] = trial.suggest_categorical('scheduler_type', ['cosine', 'plateau'])
        
        # 如果选择plateau调度器，添加相关参数
        if params['scheduler_type'] == 'plateau':
            params['scheduler_patience'] = trial.suggest_int('scheduler_patience', 5, 20)
            params['scheduler_factor'] = trial.suggest_float('scheduler_factor', 0.3, 0.8)
        
        # Warmup epochs
        params['warmup_epochs'] = trial.suggest_int('warmup_epochs', 0, 15)
        
        # 梯度裁剪
        params['grad_clip_norm'] = trial.suggest_float('grad_clip_norm', 0.5, 2.0)
        
        # 鲁棒性参数
        params['dropout'] = trial.suggest_float('dropout', 0.0, 0.3)
        params['input_noise_std'] = trial.suggest_float('input_noise_std', 0.0, 0.05)
        
        # EMA参数 - 设为0表示禁用，或者使用较高的衰减率
        ema_decay = trial.suggest_float('ema_decay', 0.99, 0.9999)
        params['ema_decay'] = ema_decay if trial.suggest_categorical('use_ema', [True, False]) else 0.0
        
        # 模型架构相关参数 - 使用正确的有效选项
        if 'merge_mode' in self.base_config or 'up_mode' in self.base_config:
            # 使用正确的有效选项
            merge_mode = trial.suggest_categorical('merge_mode', ['concat', 'add', 'adaptive'])
            up_mode = trial.suggest_categorical('up_mode', ['transpose', 'upsample'])  # 只使用有效选项
            
            # 检查兼容性：如果merge_mode是'add'且up_mode是'upsample'，则剪枝
            if merge_mode == 'add' and up_mode == 'upsample':
                # 剪枝不兼容的组合
                raise optuna.TrialPruned("Incompatible combination: merge_mode='add' with up_mode='upsample'")
            
            params['merge_mode'] = merge_mode
            params['up_mode'] = up_mode
            
        elif 'merge_mode' in self.base_config:
            params['merge_mode'] = trial.suggest_categorical('merge_mode', ['concat', 'add', 'adaptive'])
        elif 'up_mode' in self.base_config:
            params['up_mode'] = trial.suggest_categorical('up_mode', ['transpose', 'upsample'])
        
        # 添加其他可能的架构参数
        if 'depth' in self.base_config:
            params['depth'] = trial.suggest_int('depth', 3, 6)
        
        if 'start_filts' in self.base_config:
            params['start_filts'] = trial.suggest_categorical('start_filts', [16, 32, 64])
        
        if 'batch_norm' in self.base_config:
            params['batch_norm'] = trial.suggest_categorical('batch_norm', [True, False])
        
        return params


def load_base_config(config_path: str) -> Dict[str, Any]:
    """加载基础配置文件"""
    if not os.path.exists(config_path):
        print(f"配置文件不存在: {config_path}")
        return {}
    
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    return config


def create_study_name(model_type: str, data_name: Optional[str]=None) -> str:
    """创建研究名称"""
    try:
        device_name = torch.cuda.get_device_name().replace(' ', '_') if torch.cuda.is_available() else 'cpu'
    except:
        device_name = 'gpu' if torch.cuda.is_available() else 'cpu'
    
    if data_name:
        return f"optuna_{model_type}_{data_name}_{device_name}"
    else:
        return f"optuna_{model_type}_{device_name}"


def main():
    parser = argparse.ArgumentParser(description="使用Optuna进行超参数优化")
    
    # 必需参数
    parser.add_argument("--config", type=str, required=True, 
                       help="基础配置文件路径")
    
    # Optuna相关参数
    parser.add_argument("--n_trials", type=int, default=100,
                       help="试验次数 (default: 100)")
    parser.add_argument("--study_name", type=str, default=None,
                       help="研究名称，如果不指定则自动生成")
    parser.add_argument("--storage", type=str, default=None,
                       help="Optuna存储URL，如sqlite:///optuna.db")
    parser.add_argument("--sampler", type=str, default="TPE", 
                       choices=["TPE", "Random", "CmaEs"],
                       help="采样器类型 (default: TPE)")
    parser.add_argument("--pruner", type=str, default="MedianPruner",
                       choices=["MedianPruner", "PercentilePruner", "HyperbandPruner"],
                       help="剪枝器类型 (default: MedianPruner)")
    
    # 训练相关参数
    parser.add_argument("--max_epochs", type=int, default=50,
                       help="每个试验的最大训练轮数 (default: 50)")
    parser.add_argument("--min_epochs", type=int, default=10,
                       help="每个试验的最小训练轮数 (default: 10)")
    parser.add_argument("--patience", type=int, default=15,
                       help="早停耐心值 (default: 15)")
    parser.add_argument("--metric", type=str, default="val_loss",
                       help="优化目标指标 (default: val_loss)")
    parser.add_argument("--direction", type=str, default="minimize",
                       choices=["minimize", "maximize"],
                       help="优化方向 (default: minimize)")
    
    # 日志相关参数
    parser.add_argument("--log_dir", type=str, default="optuna_logs",
                       help="TensorBoard日志目录 (default: optuna_logs)")
    
    # 其他参数
    parser.add_argument("--seed", type=int, default=42,
                       help="随机种子 (default: 42)")
    parser.add_argument("--verbose", action="store_true",
                       help="显示详细输出")
    
    args = parser.parse_args()
    
    # 设置随机种子
    torch.manual_seed(args.seed)
    
    # 加载基础配置
    base_config = load_base_config(args.config)
    if not base_config:
        print("无法加载配置文件，退出")
        sys.exit(1)
    
    # 提取模型和数据配置 - 改进的配置提取逻辑
    if 'model' in base_config:
        model_config = base_config['model']
        # 如果使用Lightning CLI格式，可能在init_args中
        if 'init_args' in model_config:
            model_config = model_config['init_args']
    else:
        # 如果配置直接在根级别
        model_config = {k: v for k, v in base_config.items() 
                       if k not in ['data', 'trainer', 'seed_everything', 'plots', 'ckpt_path', 'find_lr']}
    
    if 'data' in base_config:
        data_config = base_config['data'].copy()  # 创建副本以避免修改原配置
        # 如果使用Lightning CLI格式，可能在init_args中
        if 'init_args' in data_config:
            data_config = data_config['init_args'].copy()
    else:
        print("错误: 配置文件中未找到数据配置")
        sys.exit(1)
        
    # 验证必要的训练文件是否存在（如果配置中指定了文件路径）
    for file_key in ['train_file', 'val_file']:
        if file_key in data_config and data_config[file_key]:
            # 检查是否为绝对路径
            file_value = data_config[file_key]
            file_path = Path(file_value)
            if not file_path.exists():
                print(f"警告: {file_key} 文件不存在: {file_path.absolute()}")
    
    # 创建研究名称
    if args.study_name is None:
        model_type = model_config.get('model_type', 'unknown')
        study_name = create_study_name(model_type, data_name=None)
    else:
        study_name = args.study_name
    
    print(f"开始Optuna优化，研究名称: {study_name}")
    print(f"目标指标: {args.metric} ({args.direction})")
    print(f"试验次数: {args.n_trials}")
    print(f"模型类型: {model_config.get('model_type', 'unknown')}")
    print(f"TensorBoard日志目录: {Path(args.log_dir).absolute()}")
    
    # 创建采样器
    if args.sampler == "TPE":
        sampler = optuna.samplers.TPESampler(seed=args.seed)
    elif args.sampler == "Random":
        sampler = optuna.samplers.RandomSampler(seed=args.seed)
    elif args.sampler == "CmaEs":
        sampler = optuna.samplers.CmaEsSampler(seed=args.seed)
    
    # 创建剪枝器
    if args.pruner == "MedianPruner":
        pruner = optuna.pruners.MedianPruner(n_startup_trials=5, n_warmup_steps=10)
    elif args.pruner == "PercentilePruner":
        pruner = optuna.pruners.PercentilePruner(25.0, n_startup_trials=5, n_warmup_steps=10)
    elif args.pruner == "HyperbandPruner":
        pruner = optuna.pruners.HyperbandPruner(min_resource=5, max_resource=args.max_epochs)
    
    # 创建或加载研究
    study = optuna.create_study(
        study_name=study_name,
        storage=args.storage,
        direction=args.direction,
        sampler=sampler,
        pruner=pruner,
        load_if_exists=True
    )
    
    # 创建目标函数
    objective = OptunaObjective(
        data_config=data_config,
        base_config=model_config,
        max_epochs=args.max_epochs,
        min_epochs=args.min_epochs,
        patience=args.patience,
        metric_name=args.metric,
        direction=args.direction,
        log_dir=args.log_dir
    )
    
    # 执行优化
    try:
        study.optimize(
            objective, 
            n_trials=args.n_trials,
            show_progress_bar=args.verbose
        )
        
        # 输出结果
        print("\n" + "="*50)
        print("Optuna优化完成！")
        print("="*50)
        
        print(f"最佳试验号: {study.best_trial.number}")
        print(f"最佳指标值: {study.best_value:.6f}")
        print(f"最佳参数:")
        for key, value in study.best_params.items():
            print(f"  {key}: {value}")
        
        # 保存最佳参数到配置文件
        best_config = base_config.copy()
        
        # 更新模型参数 - 改进的参数合并逻辑
        if 'model' in best_config:
            if 'init_args' in best_config['model']:
                best_config['model']['init_args'].update(study.best_params)
            else:
                best_config['model'].update(study.best_params)
        else:
            # 如果原配置没有model字段，创建一个
            best_config['model'] = study.best_params
        
        # 保存配置文件
        output_config_path = f"best_config_{study_name}.yaml"
        with open(output_config_path, 'w', encoding='utf-8') as f:
            yaml.dump(best_config, f, default_flow_style=False, allow_unicode=True, indent=2)
        
        print(f"\n最佳配置已保存到: {output_config_path}")
        print(f"TensorBoard日志位置: {Path(args.log_dir).absolute()}")
        print(f"查看日志命令: tensorboard --logdir {args.log_dir}")
        
        # 输出重要参数摘要
        print(f"\n关键参数摘要:")
        key_params = ['learning_rate', 'weight_decay', 'loss_l2', 'loss_l1', 'scheduler_type', 'optimizer_type']
        for param in key_params:
            if param in study.best_params:
                print(f"  {param}: {study.best_params[param]}")
        
        # 输出优化历史统计
        print(f"\n优化统计:")
        print(f"  完成的试验数: {len(study.trials)}")
        print(f"  剪枝的试验数: {len([t for t in study.trials if t.state == optuna.trial.TrialState.PRUNED])}")
        print(f"  失败的试验数: {len([t for t in study.trials if t.state == optuna.trial.TrialState.FAIL])}")
        
    except KeyboardInterrupt:
        print("\n优化被用户中断")
        if len(study.trials) > 0:
            print(f"当前最佳试验: {study.best_trial.number}")
            print(f"当前最佳值: {study.best_value:.6f}")
    except Exception as e:
        print(f"\n优化过程出错: {str(e)}")
        sys.exit(1)


if __name__ == "__main__":
    main()
