

import torch
import parameters as p
from torch.optim.lr_scheduler import SequentialLR, LinearLR, ExponentialLR
from torch.optim import Optimizer
from torch.nn import Module


def to_one_hot(labels: torch.Tensor):
    o = torch.full([labels.size(0), 10], fill_value=p.LABEL_SMOOTHING)
    for ind, one_hot_vec in zip(labels, o):
        one_hot_vec[ind] = 1 - p.LABEL_SMOOTHING
    return o


def load() -> tuple[int, Module, Optimizer]:

    cache = torch.load(p.CHECKPOINT_PATH + 'checkpoint.pt')
    model = cache['model']
    optimizer = cache['optimizer']
    last_epoch = cache['last_epoch']
    optimizer = torch.optim.Adam(model.parameters(), lr=p.LEARNING_RATE)
    return last_epoch, model, optimizer


def save(model: Module, last_epoch: int, optimizer: Optimizer):
    
    cache = dict(optimizer=optimizer, last_epoch=last_epoch, model=model)
    torch.save(cache, p.CHECKPOINT_PATH+'checkpoint.pt')


def get_scheduler(optimizer: Optimizer, last_epoch: int = -1):
    if last_epoch == 0:
        last_epoch = -1
    milestones = [p.WARM_UP_STEPS]
    warmup_sheduler = LinearLR(last_epoch=last_epoch,
                               optimizer=optimizer, start_factor=0.0000001, end_factor=1, total_iters=p.WARM_UP_STEPS)
    exponential_decay_sheduler = torch.optim.lr_scheduler.ExponentialLR(
        optimizer, gamma=0.997, last_epoch=last_epoch)
    schedulers = [warmup_sheduler, exponential_decay_sheduler]

    return SequentialLR(verbose=True,optimizer=optimizer, schedulers=schedulers, milestones=milestones,last_epoch=last_epoch)
