import torch
from tqdm import tqdm
from helper import Helper


def run(hlpr:Helper) -> None:
    """
    description:
        run training process
    params:
        @hlpr:
    rets:
        -
    """
    epochs:int = hlpr.params.E
    pbar_info = {
                "test_acc":0.0,
                "cur_epoch":1,
            }
    pbar = tqdm(range(epochs), desc = f"training_process" , ncols = 100, postfix = pbar_info)
    device = hlpr.params.device
    for epoch in range(epochs):

        train(model=hlpr.task.model,
                    dataloader=hlpr.task.train_dataloader,
                    optimizer=hlpr.task.optimizer,
                    loss_fn=hlpr.task.loss_fn,
                    device=device,
                    cur_epoch=epoch
                    )

        test_loss,test_acc = test(model=hlpr.task.model,
                                 dataloader=hlpr.task.test_dataloader,
                                 loss_fn=hlpr.task.loss_fn,
                                 device=device)
        
        pbar_info = {
                "test_acc":test_acc,
                "cur_epoch":epoch + 2,
            }
        
        pbar.set_postfix(pbar_info)
        pbar.update(1)
    
    pbar.close()


def train(model, dataloader, optimizer, loss_fn, device, cur_epoch = 0, info_iter = 100):
    """
    description:
        using dataset training the model in one epoch
    params:
        @model:
        @dataloader:
        @optimizer:
        @loss_fn:
        @device:
        @cur_epoch:
        @info_iter:update loss per info_iter
    rets:
        @loss:
    """
    
    size:int = len(dataloader.dataset)
    pbar_info = {"loss":"nan"}
    pbar = tqdm(range(size), desc = f"excute epoch:{cur_epoch}" , ncols = 150, leave = False , postfix = pbar_info)
    model.train()

    for idx, (X, y) in enumerate(dataloader):
        X, y = X.to(device), y.to(device)
        # Compute prediction error
        pred = model(X)
        loss = loss_fn(pred, y)
        # Backpropagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if idx % info_iter == 0:
            loss, current = loss.item(), idx * len(X)

            pbar_info["loss"] = f"{loss:>7f}"
            pbar.set_postfix(pbar_info)

        pbar.update(len(X))
    pbar.close()
    return loss


def test(model, dataloader, loss_fn, device):
    """
    description:
        test function
    params:
        @model:
        @dataloader:
        @loss_fn:
        @param device:
    rets:
        @loss:each epochs' test loss
        @acc:each epochs' test accuracy
    """

    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            test_loss += loss_fn(pred, y).item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
    test_loss /= num_batches
    correct /= size

    return test_loss, 100*correct