"""
I can write this if we need custom training loop etc.
I usually use this in PyTorch.
"""

__all__ = ["train_step", "val_step", "create_embedding"]

import torch
import torch.nn as nn
import numpy as np
import seaborn as sn
import matplotlib.pyplot as plt
import matplotlib
import config_efficient_resnet18_capsule_oil as config
import torch.nn.functional as F
matplotlib.use('TkAgg')

def chunk_avg(x,n_chunks=2,normalize=False):
    x_list = x.chunk(n_chunks,dim=0)
    x = torch.stack(x_list,dim=0)
    if not normalize:
        return x.mean(0)
    else:
        return F.normalize(x.mean(0),dim=1)

def block_avg(x,n_chunks=2, normalize=False):
    x_list = x.chunk(n_chunks,dim=0)
    x = torch.stack(x_list,dim=0)
    if not normalize:
        return x.mean(0)[0]
    else:
        return F.normalize(x.mean(0),dim=1)[0]

# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def topk_errors(preds, labels, ks):
    """Computes the top-k error for each k."""
    err_str = "Batch dim of predictions and labels must match"
    assert preds.size(0) == labels.size(0), err_str
    # Find the top max_k predictions for each sample
    _top_max_k_vals, top_max_k_inds = torch.topk(
        preds, max(ks), dim=1, largest=True, sorted=True
    )
    # (batch_size, max_k) -> (max_k, batch_size)
    top_max_k_inds = top_max_k_inds.t()
    # (batch_size, ) -> (max_k, batch_size)
    rep_max_k_labels = labels.view(1, -1).expand_as(top_max_k_inds)
    # (i, j) = 1 if top i-th prediction for the j-th sample is correct
    top_max_k_correct = top_max_k_inds.eq(rep_max_k_labels)
    # Compute the number of topk correct predictions for each k
    topks_correct = [top_max_k_correct[:k, :].view(-1).float().sum() for k in ks]
    return [(1.0 - x / preds.size(0)) * 100.0 for x in topks_correct]

def train_step(encoder, decoder, train_loader, enc_loss_fn, dec_loss_fn, cls_loss_fn, optimizer, device):
    """
    Performs a single training step
    Args:
    encoder: A convolutional Encoder. E.g. torch_model ConvEncoder
    decoder: A convolutional Decoder. E.g. torch_model ConvDecoder
    train_loader: PyTorch dataloader, containing (images, images).
    loss_fn: PyTorch loss_fn, computes loss between 2 images.
    optimizer: PyTorch optimizer.
    device: "cuda" or "cpu"

    Returns: Train Loss
    """
    encoder.train()
    decoder.train()

    # print(device)
    top1_train_accuracy = 0
    for batch_idx, (train_img, target_label) in enumerate(train_loader):
        train_img = train_img.to(device)
        # target_label = target_label.to(device)

        optimizer.zero_grad()

        enc_output, enc_prob_out = encoder(train_img)
        dec_output = decoder(enc_output)

        num_patches = 4
        # z_list = enc_output.chunk(num_patches, dim=0)
        z_list = [torch.squeeze(i, dim=0) for i in enc_output.chunk(num_patches, dim=0)]
        z_avg = block_avg(enc_output, num_patches)
        enc_loss = enc_loss_fn(z_list, z_avg)
        # enc_loss, _ = enc_loss_fn(enc_output, train_img)
        dec_loss = dec_loss_fn(dec_output, train_img)
        cls_loss = cls_loss_fn(enc_prob_out, target_label)

        loss = config.ENC_LOSS_WEIGHT * enc_loss + config.DEC_LOSS_WEIGHT * dec_loss + config.CLS_LOSS_WEIGHT * cls_loss

        #评价
        top1 = accuracy(enc_prob_out.detach().cpu(), target_label.detach().cpu(), topk=(1,))
        top1_train_accuracy += top1[0]

        loss.backward()

        optimizer.step()

    top1_train_accuracy /= (batch_idx + 1)

    return loss.item(), top1_train_accuracy


def val_step(encoder, decoder, val_loader, enc_loss_fn, dec_loss_fn, cls_loss_fn, device):
    """
    Performs a single training step
    Args:
    encoder: A convolutional Encoder. E.g. torch_model ConvEncoder
    decoder: A convolutional Decoder. E.g. torch_model ConvDecoder
    val_loader: PyTorch dataloader, containing (images, images).
    loss_fn: PyTorch loss_fn, computes loss between 2 images.
    device: "cuda" or "cpu"

    Returns: Validation Loss
    """

    encoder.eval()
    decoder.eval()

    top1_accuracy = 0
    top5_accuracy = 0
    test_acc_list = []

    # confusion_mat = np.zeros((6, 6), dtype=int)
    with torch.no_grad():
        correct = 0
        for batch_idx, (train_img, target_label) in enumerate(val_loader):
            train_img = train_img.to(device)
            # target_label = target_label.to(device)

            enc_output, enc_prob_out = encoder(train_img)
            dec_output = decoder(enc_output)

            enc_loss = enc_loss_fn(enc_output, train_img)
            dec_loss = dec_loss_fn(dec_output, train_img)
            cls_loss = cls_loss_fn(enc_prob_out, target_label)

            loss = config.ENC_LOSS_WEIGHT * enc_loss + config.DEC_LOSS_WEIGHT * dec_loss + config.CLS_LOSS_WEIGHT * cls_loss

            top1, top5 = accuracy(enc_prob_out.detach().cpu(), target_label.cpu().detach().cpu(), topk=(1, 5))
            top1_accuracy += top1[0]
            top5_accuracy += top5[0]
            pred = enc_prob_out.data.max(1, keepdim=True)[
                1
            ]  # get the index of the max probability
            # correct += pred.eq(target.data.view_as(pred)).cpu().sum()

            # for i in range(len(pred)):
            #     true_label = target_label[i].item()
            #     pred_label = pred[i].item()
            #     confusion_mat[true_label][pred_label] += 1
            # sn.heatmap(confusion_mat, annot=True, cmap="Blues", fmt="d")
            # plt.xlabel("Predicted label")
            # plt.ylabel("True label")
            # plt.title("Efficient CapsNet - Confusion Matrix")
            # plt.savefig(
            #     "confusion_mat-reconstruction{}.png".format(0.2)
            # )

        top1_accuracy /= (batch_idx + 1)
        top5_accuracy /= (batch_idx + 1)

    test_acc_list.append(top1_accuracy)
    acc_vect = torch.tensor(test_acc_list)


    return loss.item(), top1_accuracy, top5_accuracy, acc_vect


def create_embedding(encoder, full_loader, embedding_dim, device):
    """
    Creates embedding using encoder from dataloader.
    encoder: A convolutional Encoder. E.g. torch_model ConvEncoder
    full_loader: PyTorch dataloader, containing (images, images) over entire dataset.
    embedding_dim: Tuple (c, h, w) Dimension of embedding = output of encoder dimesntions.
    device: "cuda" or "cpu"

    Returns: Embedding of size (num_images_in_loader + 1, c, h, w)
    """
    encoder.eval()
    embedding = torch.randn(embedding_dim)
    # print(embedding.shape)

    with torch.no_grad():
        for batch_idx, (train_img, target_label) in enumerate(full_loader):
            train_img = train_img.to(device)
            # enc_output = encoder(train_img).cpu()
            enc_output, enc_prob_out = encoder(train_img)

            enc_output_content = enc_output.cpu()
            # print(enc_output_content.shape)
            embedding = torch.cat((embedding, enc_output_content), 0)
            # print(embedding.shape)

    return embedding


def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""

    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res