from torch import optim

class Adam(object):
    def __init__(self,parameters,lr=0.001,betas=(0.9,0.999),eps = 1e-8):
        self.parameters = parameters
        self.lr = lr
        self.betas = betas
        self.eps = eps

    def __call__(self,):
        return optim.Adam(params=self.parameters,lr=self.lr,betas=self.betas,eps=self.eps)

class SGD(object):
    def __init__(self,parameters,lr = 0.001,momentum = 0.949,weight_decay = 0.0005):
        self.parameters = parameters
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay

    def __call__(self,):
        return optim.SGD(params=self.parameters,lr=self.lr,momentum=self.momentum,weight_decay=self.weight_decay)

class AdamW(object):
    def __init__(self,parameters, lr = 0.001, betas = (0.9,0.999), eps =1e-8, weight_decay = 0.01, amsgrad = False):
        self.parameters = parameters
        self.lr = lr
        self.betas = betas
        self.eps = eps
        self.weight_decay = weight_decay
        self.amsgrad = amsgrad
    def __call__(self,):
        return optim.AdamW(params=self.parameters,lr=self.lr,betas=self.betas,eps=self.eps,weight_decay=self.weight_decay,amsgrad=self.amsgrad)

class Adadelta(object):
    def __init__(self,parameters,lr = 1,rho = 0.9,eps = 1e-6,weight_decay = 0):
        self.parameters = parameters
        self.lr = lr
        self.rho = rho
        self.eps = eps
        self.weight_decay = weight_decay

    def __call__(self,):
        return optim.Adadelta(self.parameters(),lr=self.lr,rho=self.rho,eps=self.eps,weight_decay=self.weight_decay)

class RMSprop(object):
    def __init__(self,parameters,lr = 1,alpha = 0.99,eps = 1e-8,weight_decay = 0,momentum = 0,centered = False):
        self.parameters = parameters
        self.lr = lr
        self.alpha = alpha
        self.eps = eps
        self.weight_decay = weight_decay
        self.momentum = momentum
        self.centered = centered

    def __call__(self,):
        return optim.RMSprop(self.parameters(),lr=self.lr,alpha=self.alpha,eps=self.eps,weight_decay=self.weight_decay,momentum=self.momentum,centered=self.centered)


class Sdiffer_weight_decay_SGD(object):
    '''w和b的下降速度不一样，b还是默认，w的下降速度与batch_size有关，batch_size越大，下降速度越慢，要是batch是1或者2的话，就weight_decay就会是默认值'''
    def __init__(self,model,lr = 0.001,nbs = 64,weight_decay_default = 0.0005,momentum = 0.949,batch_size=3,use = True,logger=None):
        self.nbs = nbs
        self.weight_decay_default = weight_decay_default
        self.momentum = momentum
        self.lr = lr
        self.batch_size = batch_size
        self.accumulate = max(round(self.nbs / self.batch_size), 1)
        self.weight_decay = (self.batch_size * self.accumulate / self.nbs) * self.weight_decay_default
        self.use = use
        self.model = model
        self.logger = logger

    def __call__(self,):
        if self.use == True:
            pg0, pg1, pg2 = [], [], []
            for k, v in self.model.named_parameters():
                v.requires_grad = True
                if '.bias' in k:
                    pg2.append(v)  # biases
                elif '.weight' in k and '.bn' not in k:
                    pg1.append(v)  # apply weight decay
                else:
                    pg0.append(v)  # all else
            optimizer = optim.SGD(pg0, lr=self.lr, momentum=self.momentum, nesterov=True)
            optimizer.add_param_group({'params': pg1, 'weight_decay': self.weight_decay})  # add pg1 with weight_decay
            optimizer.add_param_group({'params': pg2})  # add pg2 (biases)
            self.logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
            del pg0, pg1, pg2
            return optimizer
        else:
            return optim.SGD(params=self.model.parameters,lr=self.lr,momentum=self.momentum,weight_decay=self.weight_decay)