import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
import input_data

def validation(model, valid_loader):
    result = [0, 0]
    with torch.no_grad():
        for X, y in valid_loader:
            r = model(X, y)[0]
            norms = torch.norm(r, p=2, dim=2)
            t_num = (torch.argmax(norms, dim = 1) == y).sum().item()
            result[0] += t_num
            result[1] += y.size(0) - t_num
    print('Val Accuracy: {}%'.format(round(result[0] / sum(result) * 100, 2)))

def FSL_testing(model, fsl_loader):
    result = [0, 0]
    for i in range(5):
        for X, y in fsl_loader:
            vec = model.ZeroShotPrediction(X, y)
            vec_norm = torch.norm(vec, p=2, dim = 2)
            t_num = (vec_norm.argmax(dim = 1) == y).sum().item()
            result[0] += t_num
            result[1] += y.size(0) - t_num
    print('Accuracy: {}%'.format(round(result[0] / sum(result) * 100, 2)))

class MyDataset(Dataset):
    def __init__(self, x, x_len, y):
        super().__init__()
        self.src = x
        self.s_len = x_len
        self.trg = y

    def __getitem__(self, index):
        return (self.src[index], self.s_len[index]), self.trg[index]

    def __len__(self):
        return len(self.src)

def Max_margin_loss(feat, atte_w, y, m_upp = 0.9, m_low = 0.1, down_coeff = 0.5, alpha = 1e-4):
    """
    feat: (batch, out_caps, out_caps_dim)
    atte_w: (batch, head_num, seq_len)
    y:    (batch,)
    """
    loss = 0
    K = feat.size(1)
    for idx in range(feat.size(0)):
        for k in range(K):
            if k == y[idx]:
                val = max(0, m_upp - torch.norm(feat[idx, k], p = 2))
                loss += val ** 2
            else:
                val = max(0, torch.norm(feat[idx, k], p = 2) - m_low)
                loss += down_coeff * (val ** 2)
    loss_norm = torch.norm(torch.bmm(atte_w, atte_w.permute([0, 2, 1])) - torch.eye(atte_w.size(1)))
    loss += alpha * (loss_norm ** 2)
    return loss

def train(model, train_loader, epoch = 20, lr = 1e-4, verbose = False):
    optimizer = optim.Adam(model.parameters(), lr = lr)
    if verbose:
        e_t = 0
    for e in range(epoch):
        for X, y in train_loader:#tqdm(train_loader):
            result, atte = model(X, y)
            loss = Max_margin_loss(result, atte, y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if verbose and e != e_t:
                e_t = e
                print('loss: ', loss.detach().item())
