#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP


class TorchRunLauncherPlugin:
    """TorchRun训练启动插件"""
    
    def __init__(self, config):
        """
        初始化TorchRun训练启动器
        
        Args:
            config (dict): 配置参数字典
        """
        self.config = config
        self.is_ddp = int(os.environ.get("RANK", -1)) != -1
        self.rank = 0
        self.local_rank = 0
        self.world_size = 1
        
    def setup_distributed(self):
        """设置分布式训练环境"""
        if not self.is_ddp:
            return None
            
        # 初始化分布式训练
        dist.init_process_group(backend="nccl")
        self.rank = int(os.environ["RANK"])
        self.local_rank = int(os.environ["LOCAL_RANK"])
        self.world_size = int(os.environ["WORLD_SIZE"])
        
        # 设置设备
        device = f"cuda:{self.local_rank}"
        torch.cuda.set_device(device)
        
        return device
        
    def wrap_model(self, model):
        """
        包装模型以支持分布式训练
        
        Args:
            model: 模型实例
            
        Returns:
            DDP模型实例
        """
        if self.is_ddp:
            model._ddp_params_and_buffers_to_ignore = {"pos_cis"}
            model = DDP(model, device_ids=[self.local_rank])
        return model
        
    def is_available(self):
        """
        检查是否在DDP环境中
        
        Returns:
            bool: 是否在DDP环境中
        """
        return self.is_ddp
        
    def get_rank(self):
        """
        获取当前进程的rank
        
        Returns:
            int: 当前进程的rank
        """
        return self.rank
        
    def get_world_size(self):
        """
        获取world size
        
        Returns:
            int: world size
        """
        return self.world_size