from math import log
import oneflow as torch
import logging

logger = logging.getLogger(__name__)


# class Optimizer(object):
#     def __init__(self, model, params, update_lr_stepwise=False, parallel_mode='dp'):

#         self.params = params
#         self.model = model
#         self.update_lr_stepwise = update_lr_stepwise
#         self.parralle_mode = parallel_mode

#         self.lr = 0.0
#         self.global_step = 1
#         self.global_epoch = 0

#         weight_decay = params['weight_decay'] if 'weight_decay' in params else 0.0
#         logging.info('Apply %s optimizer with weight decay %.6f' % (params['optimizer'], weight_decay))

#         if params['optimizer'] == 'adam':
#             self.optimizer = torch.optim.Adam(
#                 filter(lambda p: p.requires_grad, model.parameters()),
#                 lr=self.lr, betas=(0.9, 0.98), eps=1e-9,
#                 weight_decay=weight_decay)
#         elif params['optimizer'] == 'sgd':
#             self.optimizer = torch.optim.SGD(
#                 filter(lambda p: p.requires_grad, model.parameters()),
#                 lr=self.lr, momentum=0.9, weight_decay=weight_decay)
#         elif params['optimizer'] == 'adadelate':
#             self.optimizer = torch.optim.Adadelta(
#                 filter(lambda p: p.requires_grad, model.parameters()),
#                 weight_decay=weight_decay)
#         else:
#             raise NotImplementedError

#     def zero_grad(self):
#         self.optimizer.zero_grad()

#     def state_dict(self):
#         return self.optimizer.state_dict()

#     def load_state_dict(self, state_dict):
#         self.optimizer.load_state_dict(state_dict)

#     def get_lr(self):
#         raise NotImplementedError

#     def set_lr(self, lr=None):
#         new_lr = self.lr if lr is None else lr
#         for param_group in self.optimizer.param_groups:
#             param_group['lr'] = new_lr

#     def step(self):
#         self.optimizer.step()
#         if self.update_lr_stepwise:
#             self.lr = self.get_lr()
#             self.set_lr()
#         self.global_step += 1

#     def epoch(self):
#         if not self.update_lr_stepwise:
#             self.lr = self.get_lr()
#             self.set_lr()
#         self.global_epoch += 1


# class TransformerOptimizer(Optimizer):

#     def __init__(self, model, params, parallel_mode='dp'):
#         super(TransformerOptimizer, self).__init__(model, params, True, parallel_mode)

#         self.model_size = params['model_size']
#         self.factor = params['lr']
#         self.warmup_steps = params['warmup_steps']

#         logger.info('[TransformerOptimizer] Set model_size as %d' % self.model_size)
#         logger.info('[TransformerOptimizer] Set lr_factor as %.2f' % self.factor)
#         logger.info('[TransformerOptimizer] Set warmup_steps as %d' % self.warmup_steps)

#         self.lr = self.get_lr()
#         self.set_lr()

#     def get_lr(self):
#         return self.factor * self.model_size ** (-0.5) * min(self.global_step ** (-0.5), self.global_step * self.warmup_steps ** (-1.5))


# class WarmupAndDecayOptimizer(Optimizer):

#     def __init__(self, model, params, parallel_mode='dp'):
#         super(WarmupAndDecayOptimizer, self).__init__(model, params, True, parallel_mode)

#         self.init_lr = params['init_lr']
#         self.peak_lr = params['peak_lr']
#         self.warmup_steps = params['warmup_steps']

#         logger.info('[WarmupAndDecayOptimizer] Set init_lr as %f' % self.init_lr)
#         logger.info('[WarmupAndDecayOptimizer] Set peak_lr as %f' % self.peak_lr)
#         logger.info('[WarmupAndDecayOptimizer] Set warmup_steps as %d' % self.warmup_steps)

#         self.set_lr(lr=self.init_lr)

#     def get_lr(self):
#         return self.factor * self.model_size ** (-0.5) * min(self.global_step ** (-0.5), self.global_step * self.warmup_steps ** (-1.5))

# class FixedLROptimizer(Optimizer):
#     def __init__(self, model, params, parallel_mode='dp'):
#         super(FixedLROptimizer, self).__init__(model, params, update_lr_stepwise=False, parallel_mode=parallel_mode)

#         self.lr = self.params['lr']
#         logger.info('[FixedLROptimizer] Set lr as %f' % self.warmup_steps)

#     def get_lr(self):
#         return self.lr


# class LinearOptimizer(Optimizer):
#     def __init__(self, model, params, parallel_mode='dp'):
#         super(LinearOptimizer, self).__init__(model, params,
#               update_lr_stepwise=False, parallel_mode=parallel_mode)

#         self.init_lr = params['init_lr']
#         self.final_lr = params['final_lr']

#         logger.info('[LinearOptimizer] Set init_lr as %f' % self.init_lr)
#         logger.info('[LinearOptimizer] Set final_lr as %f' % self.final_lr)

#         self.epochs = params['epochs'] - 1
#         self.decay_factor = (self.final_lr - self.init_lr) / self.epochs
#         logger.info('[LinearOptimizer] The computed decay_factor is %f' % self.decay_factor)

#     def get_lr(self):
#         return self.decay_factor * self.global_epoch + self.init_lr


# KindsOfOptimizer = {
#     'fixed': FixedLROptimizer,
#     'linear': LinearOptimizer
# }


# def build_optimizer(args, params, model):
#     if params['train']['scheduler'] == 'stepwise':
#         return TransformerOptimizer(model, params['train'], parallel_mode=args.parallel_mode)
#     elif params['train']['scheduler'] == 'fixed':
#         return FixedLROptimizer(model, params['train'], parallel_mode=args.parallel_mode)
#     elif params['train']['scheduler'] == 'linear':
#         return LinearOptimizer(model, params['train'], parallel_mode=args.parallel_mode)
#     else:
#         raise NotImplementedError


BuildOptimizer = {
    'sgd': torch.optim.SGD,
    'adam': torch.optim.Adam
}


# class BaseLRScheduler(object):
#     def __init__(self) -> None:

#     def state

# class TransformerScheduler(BaseLRScheduler):
#     def