import os
import time
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision as tv
import torchvision.transforms as transforms
import torchnet as tnt
from args import *
from tool import *
from wheel import *
import evaluate
from models import *


info_str = args.model_type
if args.bias:
    info_str += "_b"
if args.Lambda != 0:
    info_str += "_{}lambda".format(args.Lambda)
if args.model_type in ("a-softmax", "am-softmax"):
    info_str += "_{}m".format(args.margin)
if args.model_type in ("normface", "am-softmax"):
    info_str += "_{}{}".format(
        args.scale, "dy-s" if args.train_scale else "s")
if args.norm_fea:
    info_str += "_nf"
args.log_path = os.path.join(args.log_path, info_str)
save_pattern = os.path.join(args.log_path, "{}e_{:.4g}acc.pth")

logger = Logger(args)
logger.log("PyTorch version: {}".format(torch.__version__))
logger.log("cuda version: {}".format(torch.version.cuda))
logger.log("cuDNN version: {}".format(torch.backends.cudnn.version()))
logger.log("GPU type: {}".format(torch.cuda.get_device_name(0)))


train_loader = torch.utils.data.DataLoader(
    tv.datasets.MNIST(args.data_path, train=True, download=True,
                   transform=transforms.Compose([
                    #    transforms.Resize([224, 224]),
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,)),
                    #    transforms.Lambda(lambda x: x.repeat(3, 1, 1))
                   ])),
    batch_size=args.batch_size, shuffle=True)

test_loader = torch.utils.data.DataLoader(
    tv.datasets.MNIST(args.data_path, train=False,
                   transform=transforms.Compose([
                    #    transforms.Resize([224, 224]),
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,)),
                    #    transforms.Lambda(lambda x: x.repeat(3, 1, 1))
                   ])),
    batch_size=args.batch_size, shuffle=True)


C = torch.randn(args.n_class, 2).cuda()
# labels_C = torch.arange(args.n_class).cuda()  # [c]

# if args.model_type == "lenet++":
model = LeNetPP().cuda()
# elif args.model_type == "mlp":
#     model = MLP().cuda()


# model type: naive, l-/a-softmax, am-softmax, centre, normface
if args.model_type in ("naive", "centre", "normface"):
    criterion = nn.CrossEntropyLoss().cuda()
# elif args.loss == "mse":
#     criterion = nn.MSELoss().cuda()
elif args.model_type == "a-softmax":
    criterion = AngularSoftmaxLoss(
        lambda_min=5, lambda_max=1500, gamma=0).cuda()  # logit = (cos_theta, phi_theta)
elif args.model_type == "am-softmax":
    criterion = am_softmax_loss
elif args.model_type == "c-contrastive":
    criterion = C_contrastive_loss  # logit = D
elif args.model_type == "c-triplet":
    criterion = C_triplet_loss  # logit = D


C = torch.randn(args.n_class, 2).cuda()
# C = torch.zeros(args.n_class, 2).cuda()
# labels_C = torch.arange(args.n_class).cuda()  # [c]
# print("- init centres -")
# _cnt = torch.zeros(args.n_class).cuda()
# with torch.no_grad():
#     for image, label in train_loader:
#         image, label = image.cuda(), label.cuda()
#         _fea, _ = model(image)
#         for i in range(label.size(0)):
#             C[label[i]] += _fea[i]
#             _cnt[label[i]] += 1
#         if _cnt.min() > 2000:
#             break
# C = C / _cnt.unsqueeze(1)


var_list_w, var_list_b, var_list_else = [], [], []
for name, p in model.named_parameters():
    if "bias" in name:
        var_list_b.append(p)
    elif "weight" in name:
        if "1.weight" in name or "3.weight" in name:  # PReLU
            var_list_else.append(p)
        else:  # conv, fc
            var_list_w.append(p)
    else:
        var_list_else.append(p)

param_group = [
    {"params": var_list_else},
    {"params": var_list_w, "weight_decay": args.weight_decay},
    {"params": var_list_b, "lr": args.lr * 2}
]
if args.model_type == "am-softmax":
    optimizer = torch.optim.Adam(param_group)#, lr=args.lr)
else:
    optimizer = torch.optim.SGD(
        param_group, lr=args.lr, momentum=args.momentum, nesterov=True)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=[5000, 8000, 10000], gamma=0.8)
    # scheduler = torch.optim.lr_scheduler.MultiStepLR(
    #     optimizer, milestones=[15, 30, 45, 60, 75, 90, 105, 120], gamma=0.4)
    # scheduler = torch.optim.lr_scheduler.ExponentialLR(
    #     optimizer, gamma=0.8)


def adjust_learning_rate(optimizer, epoch):
    """for A-Softmax
    ref: https://github.com/Joyako/SphereFace-pytorch/blob/master/train.py#L103
    """
    m = epoch // 5
    lr = args.lr * (0.1 ** m)
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


avg_loss = tnt.meter.AverageValueMeter()
avg_xent = tnt.meter.AverageValueMeter()
avg_centre = tnt.meter.AverageValueMeter()

record = Record()
record.add_big("acc")
record.add_small("loss", "loss_xent", "loss_centre")


def test(epoch):
    model.eval()
    avg_loss.reset()
    avg_xent.reset()
    avg_centre.reset()
    n_correct = 0
    fea_list, lab_list = [], []
    _cnt = 0
    with torch.no_grad():
        for image, label in test_loader:
            _batch_sz = label.size(0)
            X, L = image.cuda(), label.cuda()
            fea, logit = model(X, args.norm_fea)

            loss_xent = criterion(logit, L)
            loss_centre = centre_loss(fea, L, C)
            loss = loss_base + args.Lambda * loss_centre
            avg_xent.add(loss_xent.cpu().item(), _batch_sz)
            avg_centre.add(loss_centre.cpu().item(), _batch_sz)
            avg_loss.add(loss.cpu().item(), _batch_sz)

            if _cnt < 2000:
                fea_list.append(fea.cpu().numpy())
                lab_list.append(label.numpy())
                _cnt += fea.size(0)

            if args.model_type == "a-softmax":
                logit = logit[0]  # logit = (cos(x), phi(x))
            elif args.model_type in ("c-contrastive", "c-triplet"):
                # print(args.model_type)
                logit = - logit  # euc dist -> smaller is more similar
            pred = logit.argmax(dim=1).cpu()
            n_correct += pred.eq(label.view(-1)).sum().item()

    acc = n_correct / len(test_loader.dataset)
    # print(acc)
    record.update("acc", acc)
    record.update("loss", avg_loss.value()[0])  # (mean, std)
    record.update("loss_xent", avg_xent.value()[0])  # (mean, std)
    record.update("loss_centre", avg_centre.value()[0])  # (mean, std)

    F = np.vstack(fea_list)
    L = np.concatenate(lab_list)
    # evaluate.t_sne(F, L, "fea_{}".format(epoch))
    W = model.state_dict()["clf_layer.weight"].cpu().numpy()  # [10, 2]
    evaluate.vis_FW(F, L, W, np.arange(args.n_class), "fea_wv_{}".format(epoch))
    evaluate.vis_FW(F, L, C.cpu().numpy(), np.arange(args.n_class), "fea_c_{}".format(epoch))

    if args.model_type in ("normface", "am-softmax"):
        print("scale: {}".format(model.clf_layer.scale))

    return acc


n_it = 0
best_acc = 0
for epoch in range(args.epoch):
    logger.log("--- {} ---".format(epoch))
    if args.model_type == "a-softmax":
        adjust_learning_rate(optimizer, epoch)
    model.train()
    for _b, (image, label) in enumerate(train_loader):
        # print("- batch {} -".format(_b))
        n_it += 1
        image, label = image.cuda(), label.cuda()
        fea, logit = model(image)

        # assert not check_nan_inf(fea)
        # assert not check_nan_inf(logit)
        # if args.model_type in ("c-contrastive", "c-triplet"):
        #     _D = logit.detach()
        #     _u, _m, _l = _D.max().cpu(), _D.mean().cpu(), _D.min().cpu()
        #     print("D:", _u.item(), _m.item(), _l.item())

        if args.model_type == "c-contrastive":
            loss_base = criterion(logit, label,
                # weight_neg=1. / (args.n_class - 1), margin_neg=args.margin)
                weight_neg=1, margin_neg=args.margin)
        elif args.model_type == "am-softmax":
            loss_base = criterion(logit, label, scale=args.scale, margin=args.margin)
        else:
            loss_base = criterion(logit, label)
        loss_centre = centre_loss(fea, label, C)
        loss = loss_base + args.Lambda * loss_centre

        optimizer.zero_grad()
        loss.backward()
        # nn.utils.clip_grad_norm_(model.parameters(), max_norm=5.0)
        optimizer.step()
        if args.model_type not in ("a-softmax", "am-softmax"):
            scheduler.step()

        with torch.no_grad():
            for i in range(args.n_class):
                _cnt = (label == i).sum().item()
                if _cnt > 0:
                    _diff = C[i].unsqueeze(0) - fea[label == i]
                    C[i] -= args.alpha * _diff.sum(0) / (1. + _cnt)

    acc = test(epoch)
    logger.log(record.log_new())
    if best_acc < acc:
        torch.save(model, save_pattern.format(epoch, acc))
        best_acc = acc
    _centres = C.cpu().numpy()
    np.save(os.path.join(args.log_path, "C.{}e.npy".format(epoch)), _centres)

logger.log("--- best ---")
logger.log(record.log_best())
evaluate.vis_curve(record.seq["acc"], title="acc")
evaluate.vis_curve(record.seq["loss"], title="loss")
evaluate.vis_curve(record.seq["loss_xent"], title="loss_xent")
evaluate.vis_curve(record.seq["loss_centre"], title="loss_centre")
torch.cuda.empty_cache()
