import json
import os
import shutil
import traceback
import wandb
import torch
from datetime import datetime
from training.utils.common_util import CommonUtils, load_run_info, save_run_info, cleanup_run_info
from training.utils.ddp_util import DDPUtil
class ExperimentResumer:
    """实验恢复工具类，用于管理实验的恢复和重新启动"""
    
    def __init__(self, script_dir=None):
        """
        初始化实验恢复器
        
        Args:
            script_dir: 脚本目录，如果为None则使用当前文件所在目录
        """
        self.script_dir = CommonUtils.get_script_dir(script_dir)
        self.project_root = CommonUtils.ensure_project_root(self.script_dir)
        self.config_path = os.path.join(self.script_dir, 'config.yaml')
        self.config = None
        self.run = None
        self.run_name = None
        
        # 切换到项目根目录
        os.chdir(self.project_root)
        
    def load_config(self, config_loader):
        """加载配置文件"""
        print(f"加载配置文件: {self.config_path}")
        self.config = config_loader(self.config_path)
        if 'trainer' not in self.config:
            self.config['trainer'] = {}
        self.config['trainer']['save_dir'] = self.script_dir
        return self.config
        
    def load_run_info(self, run_info_loader=None):
        """加载运行信息"""
        # 如果提供了自定义加载器，则使用自定义加载器，否则使用默认加载器
        if run_info_loader:
            print(f"使用自定义加载器检查运行信息")
            return run_info_loader(self.script_dir)
        
        print(f"检查是否存在之前的运行信息")
        return load_run_info(self.script_dir)
        
    def save_run_info(self, run_info_saver=None, run_id=None, run_name=None, config=None):
        """保存运行信息"""
        # 如果提供了自定义保存器，则使用自定义保存器
        if run_info_saver and run_id and run_name and config:
            print(f"使用自定义保存器保存运行信息")
            return run_info_saver(self.script_dir, run_id, run_name, config)
        
        # 否则使用默认保存器，使用实例的属性
        if self.run and self.config:
            print(f"保存运行信息")
            return save_run_info(self.script_dir, self.run.id, self.run_name, self.config)
        
        return False
        
    def init_wandb(self, resume=False, run_info_loader=None):
        """
        初始化wandb运行
        
        Args:
            resume: 是否尝试恢复之前的运行
            run_info_loader: 运行信息加载函数
            
        Returns:
            wandb run对象
        """
        # 检查是否在分布式环境中，只有主进程(rank 0)初始化wandb
        is_distributed = False
        rank = 0
        
        # 检查是否由torch.distributed.launch或torchrun启动
        if 'LOCAL_RANK' in os.environ:
            is_distributed = True
            rank = int(os.environ.get('LOCAL_RANK', 0))
        # 检查是否已初始化分布式进程组
        elif torch.distributed.is_available() and torch.distributed.is_initialized():
            is_distributed = True
            rank = torch.distributed.get_rank()
        
        # 只有主进程(rank 0)或非分布式环境初始化wandb
        if not is_distributed or rank == 0:
            if resume and run_info_loader:
                run_info = self.load_run_info(run_info_loader)
                if run_info:
                    print(f"恢复之前的运行: {run_info['run_name']}")
                    self.run = wandb.init(
                        project=run_info['config']['wandb']['project'],
                        dir= self.project_root,
                        id=run_info['run_id'],
                        resume="must",
                        config=run_info['config'],
                        mode=run_info['config']['wandb']['mode']
                    )
                    self.run_name = run_info['run_name']
                    self.config = run_info['config']
                    return self.run
            
            # 初始化新的wandb运行
            self.run_name = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
            print(f"初始化新的运行: {self.run_name}")
            self.run = wandb.init(
                project=self.config['wandb']['project'],
                dir=self.project_root,
                name=self.run_name,
                config=self.config,
                mode=self.config['wandb']['mode']
            )
            
            return self.run
        else:
            # 非主进程不初始化wandb，但仍需要设置run_name和config用于保存运行信息
            print(f"进程 {rank}: 在分布式环境中，不初始化wandb")
            if resume and run_info_loader:
                run_info = self.load_run_info(run_info_loader)
                if run_info:
                    self.run_name = run_info['run_name']
                    self.config = run_info['config']
                    return None
            
            # 为非主进程也设置run_name
            self.run_name = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}_rank{rank}"
            return None
    
    def cleanup(self):
        """清理运行信息文件并结束wandb运行"""
        # 检查是否在分布式环境中，只有主进程结束wandb
        is_distributed = False
        rank = 0
        
        # 检查是否由torch.distributed.launch或torchrun启动
        if 'LOCAL_RANK' in os.environ:
            is_distributed = True
            rank = int(os.environ.get('LOCAL_RANK', 0))
        # 检查是否已初始化分布式进程组
        elif torch.distributed.is_available() and torch.distributed.is_initialized():
            is_distributed = True
            rank = torch.distributed.get_rank()
        
        # 只有主进程(rank 0)或非分布式环境结束wandb
        if not is_distributed or rank == 0:
            # 结束wandb运行，确保数据正确上传完成
            try:
                if hasattr(wandb, 'run') and wandb.run is not None:
                    print("结束wandb运行，正在完成数据上传...")
                    wandb.finish()
            except Exception as e:
                print(f"结束wandb运行时出错: {e}")
                import traceback
                traceback.print_exc()
        
        # 清理运行信息文件
        # 注意：在分布式环境中，只让主进程清理运行信息文件
        if not is_distributed or rank == 0:
            cleanup_run_info(self.script_dir)
    
    def run_experiment(self, main_func, config_loader, run_info_loader, run_info_saver, resume=True):
        """
        运行或恢复实验
        
        Args:
            main_func: 主训练函数
            config_loader: 配置加载函数
            run_info_loader: 运行信息加载函数
            run_info_saver: 运行信息保存函数
            resume: 是否尝试恢复之前的运行
            
        Returns:
            实验结果
        """
        experiment_succeeded = False
        try:
            # 加载配置
            self.load_config(config_loader)
            
            # 初始化wandb（只有主进程会实际初始化）
            self.init_wandb(resume=resume, run_info_loader=run_info_loader)
            
            # 检查是否在分布式环境中
            is_distributed = False
            rank = 0
            
            # 检查是否由torch.distributed.launch或torchrun启动
            if 'LOCAL_RANK' in os.environ:
                is_distributed = True
                rank = int(os.environ.get('LOCAL_RANK', 0))
                # 初始化分布式环境
                world_size = int(os.environ.get('WORLD_SIZE', 1))
                print(f"初始化分布式训练环境，rank={rank}, world_size={world_size}")
                DDPUtil.setup(rank, world_size)
            # 检查是否已初始化分布式进程组
            elif torch.distributed.is_available() and torch.distributed.is_initialized():
                is_distributed = True
                rank = torch.distributed.get_rank()
            
            # 保存运行信息用于后续恢复
            # 注意：在分布式环境中，只让主进程保存运行信息
            if not is_distributed or rank == 0:
                if self.run and self.config:
                    self.save_run_info(run_info_saver, self.run.id, self.run_name, self.config)
            
            print(f"开始/恢复运行: {self.run_name}")
            # 运行主训练函数
            main_func(self.config)
            
            experiment_succeeded = True
            return True

        except Exception as e:
            print(f"实验失败: {str(e)}")
            traceback.print_exc()
            # 重新抛出异常，让外部能感知到错误
            raise
        finally:
            # 实验成功完成后清理
            if experiment_succeeded:
                self.cleanup()
    
    def create_new_experiment(self, main_func, config_loader, run_info_saver):
        """创建新的实验，不尝试恢复"""
        return self.run_experiment(main_func, config_loader, None, run_info_saver, resume=False)
        
    def force_resume(self, main_func, config_loader, run_info_loader, run_info_saver):
        """强制尝试恢复之前的实验，如果没有则报错"""
        if run_info_loader:
            run_info = self.load_run_info(run_info_loader)
            if not run_info:
                raise ValueError("没有找到可恢复的运行信息")
        
        self.load_config(config_loader)
        self.init_wandb(resume=True, run_info_loader=run_info_loader)
        if self.run and self.config:
            self.save_run_info(run_info_saver, self.run.id, self.run_name, self.config)
        print(f"强制恢复运行: {self.run_name}")
        main_func(self.config)
        self.cleanup()

    def list_runs(self):
        """列出当前目录下的运行信息（如果有多个备份）"""
        import glob
        run_info_path = CommonUtils.get_run_info_file_path(self.script_dir)
        run_files = glob.glob(os.path.join(self.script_dir, 'run_info_*.json'))
        run_files.append(run_info_path) if os.path.exists(run_info_path) else None
        
        if not run_files:
            print("没有找到运行信息文件")
            return []
            
        runs = []
        for file_path in run_files:
            try:
                with open(file_path, 'r') as f:
                    run_data = json.load(f)
                    runs.append({
                        'file': os.path.basename(file_path),
                        'run_name': run_data.get('run_name', 'Unknown'),
                        'run_id': run_data.get('run_id', 'Unknown'),
                        'project': run_data.get('config', {}).get('wandb', {}).get('project', 'Unknown')
                    })
            except Exception as e:
                print(f"警告: 读取运行信息文件失败 {file_path}: {str(e)}")

        return runs

    def backup_run_info(self, suffix=None):
        """
        备份当前运行信息文件
        
        Args:
            suffix: 备份文件的后缀，如果为None则使用时间戳
        """
        run_info_path = CommonUtils.get_run_info_file_path(self.script_dir)
        if not os.path.exists(run_info_path):
            print("警告: 没有找到运行信息文件，无法备份")
            return None
            
        suffix = suffix or datetime.now().strftime('%Y%m%d_%H%M%S')
        backup_path = f"{run_info_path}_{suffix}"
        
        try:
            shutil.copy2(run_info_path, backup_path)
            print(f"已备份运行信息到: {backup_path}")
            return backup_path
        except Exception as e:
            print(f"错误: 备份运行信息失败: {str(e)}")
            return None

