import json
import os
import torch
from transformers import AutoTokenizer


class DeepSpeedLauncherPlugin:
    """DeepSpeed启动插件"""
    
    def __init__(self, config):
        """
        初始化DeepSpeed启动插件
        
        Args:
            config (dict): 配置参数字典
        """
        self.config = config
        self.deepspeed = None
        self._init_deepspeed()
        
    def _init_deepspeed(self):
        """初始化DeepSpeed环境"""
        try:
            import deepspeed
            self.deepspeed = deepspeed
        except ImportError:
            raise ImportError("DeepSpeed is not installed. Please install it using 'pip install deepspeed'")
            
    def prepare_model_and_optimizer(self, model, optimizer):
        """
        准备模型和优化器以支持DeepSpeed
        
        Args:
            model: 模型实例
            optimizer: 优化器实例
            
        Returns:
            tuple: (model_engine, optimizer) DeepSpeed包装的模型和优化器
        """
        # 获取DeepSpeed配置
        ds_config = self._get_deepspeed_config()
        
        # 使用DeepSpeed初始化模型和优化器
        model_engine, optimizer, _, _ = self.deepspeed.initialize(
            model=model,
            optimizer=optimizer,
            config=ds_config
        )
        
        return model_engine, optimizer
        
    def _get_deepspeed_config(self):
        """
        获取DeepSpeed配置
        
        Returns:
            dict: DeepSpeed配置字典
        """
        # 从配置中获取DeepSpeed配置文件路径
        ds_config_path = self.config.get('deepspeed_config_path', '../ds_config.json')
        
        # 如果配置文件存在，则加载它
        if os.path.exists(ds_config_path):
            with open(ds_config_path, 'r') as f:
                ds_config = json.load(f)
        else:
            # 使用默认配置
            ds_config = self._get_default_deepspeed_config()
            
        return ds_config
        
    def _get_default_deepspeed_config(self):
        """
        获取默认的DeepSpeed配置
        
        Returns:
            dict: 默认DeepSpeed配置
        """
        # 根据配置中的zero_stage参数设置不同的Zero优化级别
        zero_stage = self.config.get('deepspeed_zero_stage', 3)
        
        # 获取训练批次大小
        batch_size = self.config.get('batch_size', 32)
        accumulation_steps = self.config.get('accumulation_steps', 1)
        
        # 尝试获取GPU数量，如果在分布式环境中可能需要通过环境变量获取
        try:
            world_size = torch.distributed.get_world_size() if torch.distributed.is_initialized() else torch.cuda.device_count()
        except:
            world_size = torch.cuda.device_count()
            
        # 计算训练批次大小
        train_batch_size = batch_size * accumulation_steps * max(1, world_size)
        
        ds_config = {
            "train_batch_size": train_batch_size,
            "train_micro_batch_size_per_gpu": batch_size,
            "gradient_accumulation_steps": accumulation_steps,
            "zero_optimization": {
                "stage": zero_stage,
            },
            "fp16": {
                "enabled": self.config.get('dtype', 'bfloat16') == 'float16'
            },
            "bf16": {
                "enabled": self.config.get('dtype', 'bfloat16') == 'bfloat16'
            },
            "wall_clock_breakdown": False,
            "zero_force_ds_cpu_optimizer": False
        }
        
        # 根据Zero阶段添加特定配置
        if zero_stage >= 1:
            offload_optimizer = self.config.get('deepspeed_offload_optimizer', True)
            if offload_optimizer:
                ds_config["zero_optimization"]["offload_optimizer"] = {
                    "device": "cpu",
                    "pin_memory": True
                }
            
        if zero_stage >= 2:
            # Zero-2特定配置
            ds_config["zero_optimization"]["overlap_comm"] = True
            ds_config["zero_optimization"]["reduce_scatter"] = True
            ds_config["zero_optimization"]["reduce_bucket_size"] = 5e8
            ds_config["zero_optimization"]["contiguous_gradients"] = True
            
        if zero_stage >= 3:
            offload_param = self.config.get('deepspeed_offload_param', True)
            if offload_param:
                ds_config["zero_optimization"]["offload_param"] = {
                    "device": "cpu",
                    "pin_memory": True
                }
            ds_config["zero_optimization"]["overlap_comm"] = True
            ds_config["zero_optimization"]["contiguous_gradients"] = True
            ds_config["zero_optimization"]["sub_group_size"] = 1e9
            ds_config["zero_optimization"]["reduce_bucket_size"] = 5e8
            ds_config["zero_optimization"]["stage3_prefetch_bucket_size"] = 5e8
            ds_config["zero_optimization"]["stage3_param_persistence_threshold"] = 1e6
            ds_config["zero_optimization"]["stage3_max_live_parameters"] = 1e9
            ds_config["zero_optimization"]["stage3_max_reuse_distance"] = 1e9
            
        # 优化器配置
        ds_config["optimizer"] = {
            "type": "AdamW",
            "params": {
                "lr": self.config.get('learning_rate', 5e-4),
                "betas": [0.9, 0.999],
                "eps": 1e-8,
                "weight_decay": 0.0
            }
        }
        
        # 调度器配置
        scheduler_type = self.config.get('scheduler_type', 'WarmupLR')
        if scheduler_type == 'WarmupLR':
            ds_config["scheduler"] = {
                "type": "WarmupLR",
                "params": {
                    "warmup_min_lr": 0,
                    "warmup_max_lr": self.config.get('learning_rate', 5e-4),
                    "warmup_num_steps": self.config.get('warmup_steps', 1000)
                }
            }
        elif scheduler_type == 'WarmupDecayLR':
            ds_config["scheduler"] = {
                "type": "WarmupDecayLR",
                "params": {
                    "warmup_min_lr": 0,
                    "warmup_max_lr": self.config.get('learning_rate', 5e-4),
                    "warmup_num_steps": self.config.get('warmup_steps', 1000),
                    "total_num_steps": self.config.get('total_steps', 10000)
                }
            }
            
        return ds_config
        
    def is_local_rank_0(self):
        """
        判断是否是本地rank 0进程
        
        Returns:
            bool: 是否是本地rank 0进程
        """
        if not self.deepspeed:
            return True
        return self.deepspeed.comm.get_local_rank() == 0