from .optimizer import Optimizer
import torch
import cupy as cp

class DistrubutedSGD(Optimizer):
    def __init__(self, params, group=None, lr=0.01):
        super().__init__(params)
        self.lr = lr
        self.group = group

    def update_one(self, param):
        torch_grad = torch.from_numpy(param.grad.data.get()).to("cuda")
        torch.distributed.all_reduce(torch_grad, group=self.group)
        param.grad.data = cp.asarray(torch_grad.cpu())
        print("param.grad.data", param.grad.data)
        param.data -= self.lr * param.grad.data

