"""
utils for training, including optimizers, learning rate adjustment, etc.
"""
import inspect
import math
import torch


def configure_optimizers(model, weight_decay, learning_rate, betas, device_type):
    # start with all the candidate parameters
    param_dict = {pn: p for pn, p in model.named_parameters()}
    # filter out those that do not require grad
    param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}
    # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.
    # weight decay formula:
    # moving_avg = alpha * moving_avg + (1 - alpha) * w.grad
    # w = w - lr * moving_avg - lr * weight_decay * w
    # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.
    decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
    nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
    optim_groups = [
        {'params': decay_params, 'weight_decay': weight_decay},
        {'params': nodecay_params, 'weight_decay': 0.0}
    ]
    num_decay_params = sum(p.numel() for p in decay_params)
    num_nodecay_params = sum(p.numel() for p in nodecay_params)
    print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
    print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
    # Create AdamW optimizer and use the fused version if it is available
    fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
    use_fused = (fused_available and device_type == 'cuda')
    extra_args = dict(fused=True) if use_fused else dict()
    optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
    print(f"using fused AdamW: {use_fused}")

    return optimizer


def get_lr(method, cur_iter, warmup_iters, lr_decay_iters, min_lr, max_lr, default_lr=6e-4):
    if method == 'cosine_with_warmup':
        # learning rate decay scheduler (cosine with warmup)

        # 1) linear warmup for warmup_iters steps
        if cur_iter < warmup_iters:
            return max_lr * cur_iter / warmup_iters
        # 2) if it > lr_decay_iters, return min learning rate
        if cur_iter > lr_decay_iters:
            return min_lr
        # 3) in between, use cosine decay down to min learning rate
        decay_ratio = (cur_iter - warmup_iters) / (lr_decay_iters - warmup_iters)
        assert 0 <= decay_ratio <= 1
        coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # coeff ranges 0..1
        return min_lr + coeff * (max_lr - min_lr)
    elif method is None:
        if default_lr is None:
            return max_lr
        return default_lr
    else:
        raise NotImplementedError
