#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import torch
from accelerate import Accelerator


class AccelerateLauncherPlugin:
    """Accelerate训练启动插件"""
    
    def __init__(self, config):
        """
        初始化Accelerate训练启动器
        
        Args:
            config (dict): 配置参数字典
        """
        self.config = config
        self.accelerator = None
        
    def prepare_accelerator(self):
        """准备Accelerator实例"""
        dtype = self.config.get('dtype', 'bfloat16')
        
        # 检查是否支持bf16
        mixed_precision = "no"
        if dtype == "bfloat16":
            # 检查PyTorch版本和支持的设备
            if torch.__version__ >= "1.10" and torch.cuda.is_available():
                mixed_precision = "bf16"
        elif dtype == "float16":
            mixed_precision = "fp16"
            
        self.accelerator = Accelerator(
            mixed_precision=mixed_precision,
            gradient_accumulation_steps=self.config.get('accumulation_steps', 1)
        )
        
        return self.accelerator
        
    def prepare_components(self, model, optimizer, train_loader):
        """
        准备模型、优化器和数据加载器
        
        Args:
            model: 模型实例
            optimizer: 优化器实例
            train_loader: 训练数据加载器
            
        Returns:
            tuple: 准备好的(model, optimizer, train_loader)
        """
        if self.accelerator is None:
            raise RuntimeError("Accelerator未初始化，请先调用prepare_accelerator方法")
            
        return self.accelerator.prepare(model, optimizer, train_loader)
        
    def is_available(self):
        """
        检查Accelerate是否可用
        
        Returns:
            bool: Accelerate是否可用
        """
        try:
            import accelerate
            return True
        except ImportError:
            return False