import os
import torch
from torch import nn
from tqdm import tqdm
import sys
from config_hyperparam import cfg
import torch.nn.functional as F


def negative_pair_loss(batch_vector, labels):
    B, _ = batch_vector.shape
    features = F.normalize(batch_vector, dim=1)
    cos_matrix = torch.matmul(features, features.t()) 
    neg_label_matrix = (labels.unsqueeze(1) != labels.unsqueeze(0)).float()
    neg_label_matrix.fill_diagonal_(0)
    neg_label_matrix = torch.triu(neg_label_matrix, diagonal=1)
    neg_pairs = neg_label_matrix.sum().item() 
    neg_cos_matrix = torch.clamp(cos_matrix - 0.4, min=0)

    loss = (neg_cos_matrix * neg_label_matrix).sum()
    if neg_pairs > 0:
        loss /= neg_pairs
    else:
        loss = torch.tensor(0.0, device=loss.device)

    return loss



def train_one_epoch(model, optimizer, data_loader, device, epoch, center_loss, optimzer4center):
    model.train()

    cls_loss_func = torch.nn.CrossEntropyLoss(label_smoothing=0.1)

    accu_loss = torch.zeros(1).to(device) 
    accu_num = torch.zeros(1).to(device) 

    optimizer.zero_grad()
    optimzer4center.zero_grad()

    sample_num = 0
    data_loader = tqdm(data_loader, file=sys.stdout)
    for step, data in enumerate(data_loader):
        images, labels = data
        images, labels, = images.to(device), labels.to(device)
        sample_num += images.shape[0]
        pres, x = model(images)
        c_loss = center_loss(x, labels) * cfg.ct_weight
        neg_loss = negative_pair_loss(x, labels) * cfg.neg_weight
        cls_loss = cls_loss_func(pres, labels)
        loss = c_loss + cls_loss + neg_loss
        loss.backward()

        if not torch.isfinite(loss):
            print('WARNING: non-finite loss, ending training ', loss)
            sys.exit(1)

        pred_classes = torch.max(nn.Softmax(dim=1)(pres), dim=1)[1]
        accu_num += torch.eq(pred_classes, labels).sum()
        accu_loss += loss.detach()

        cur_loss = accu_loss.item() / (step + 1)
        cur_acc = accu_num.item() / sample_num

        data_loader.desc = "[train epoch {}] loss: {:.3f}, acc: {:.3f}, lr: {:.6f}".format(
            epoch,
            cur_loss,
            cur_acc,
            optimizer.param_groups[0]["lr"],
            width=os.get_terminal_size().columns)

        optimizer.step()
        optimzer4center.step()
        optimizer.zero_grad()
        optimzer4center.zero_grad()

    return cur_loss, cur_acc


@torch.no_grad()
def evaluate(model, data_loader, device, epoch, center_loss):
    model.eval()
    cls_loss_func = torch.nn.CrossEntropyLoss()
    accu_loss = torch.zeros(1).to(device)
    accu_num = torch.zeros(1).to(device)

    sample_num = 0
    data_loader = tqdm(data_loader, file=sys.stdout)
    for step, data in enumerate(data_loader):
        images, labels = data
        images, labels = images.to(device), labels.to(device)
        sample_num += images.shape[0]
        pres, x = model(images)

        c_loss = center_loss(x, labels)  * cfg.ct_weight
        neg_loss = negative_pair_loss(x, labels) * cfg.neg_weight
        cls_loss = cls_loss_func(pres, labels)

        loss = c_loss + cls_loss + neg_loss

        pred_classes = torch.max(nn.Softmax(dim=1)(pres), dim=1)[1]
        accu_num += torch.eq(pred_classes, labels).sum()
        accu_loss += loss.detach()

        cur_loss = accu_loss.item() / (step + 1)
        cur_acc = accu_num.item() / sample_num

        data_loader.desc = "[test epoch {}] loss: {:.3f}, acc: {:.3f}".format(epoch, cur_loss, cur_acc, width=os.get_terminal_size().columns)

    return cur_loss, cur_acc


@torch.no_grad()
def test(model, data_loader, device):
    predictions = []
    true_labels = []

    assert os.path.isfile(cfg.best_model_path)
    checkpoint = torch.load(cfg.best_model_path) 
    print(model.load_state_dict(checkpoint["model"]))

    cls_loss_func = torch.nn.CrossEntropyLoss()

    model.eval()

    accu_num = torch.zeros(1).to(device)
    accu_loss = torch.zeros(1).to(device) 

    sample_num = 0
    data_loader = tqdm(data_loader, file=sys.stdout)
    for step, data in enumerate(data_loader):
        images, labels = data
        images, labels = images.to(device), labels.to(device)
        sample_num += images.shape[0]
        pre, _ = model(images)

        cls_loss = cls_loss_func(pre, labels)

        pred_classes = torch.max(nn.Softmax(dim=1)(pre), dim=1)[1]
        accu_num += torch.eq(pred_classes, labels).sum()
        accu_loss += cls_loss

        cur_loss = accu_loss.item() / (step + 1)
        cur_acc = accu_num.item() / sample_num

        data_loader.desc = "[final test] loss: {:.3f}, acc: {:.3f}".format(cur_loss, cur_acc)
        
        predictions.extend(pred_classes.tolist())
        true_labels.extend(labels.tolist())
    return predictions, true_labels