import torch
import torch.nn.functional as F
from tqdm import tqdm

def train_one_epoch(dataloader, model, device, num_classes, batch_size, optimizer, i):
    for samples, targets in tqdm(dataloader, 
                                 desc=f'\033[1;32mtraining   epoch[{i}]\033[0m', 
                                 ncols=100,
                                 bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt}'):
        samples = samples.to(device)
        targets = targets.to(device)
        outputs = model(samples) #8x257
        one_hot_labels = F.one_hot(targets, num_classes=num_classes) #8x257
        loss_matrix = -torch.log(outputs) * one_hot_labels

        # loss_matrix = focal_loss(outputs, one_hot_labels, 0.25, 2)
        loss = loss_matrix.sum()/batch_size
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

def seed_every(seed=42):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic=True
    torch.backends.cudnn.benchmark=False

def focal_loss(prob, one_hot_labels, alpha, gamma):
    loss_pos_matrix = alpha * (1 - prob) **gamma * torch.log(prob) * one_hot_labels
    loss_neg_matrix = (1 - alpha) * prob ** gamma * torch.log(1 - prob) * (1 - one_hot_labels)
    return -(loss_pos_matrix + loss_neg_matrix)


def lr_lambda(epoch):
    return 0.1 if epoch < 50 else 0.01
