"""
I can write this if we need custom training loop etc.
I usually use this in PyTorch.
"""

__all__ = ["train_step", "val_step", "create_embedding",
           "feature_extrat_step", "accuracy", "progress_bar",
           "topk_errors","data_label_extrat_step"]

import sys

import numpy as np
import torch

# from progressbar import ProgressBar, Percentage, Bar, Timer, ETA, FileTransferSpeed
from tqdm import tqdm

def progress_bar(finish_tasks_number, tasks_number):
    # 原文链接：https://blog.csdn.net/TaoismHuang/article/details/120747536
    """
    进度条

    :param finish_tasks_number: int, 已完成的任务数
    :param tasks_number: int, 总的任务数
    :return:
    """

    percentage = round(finish_tasks_number / tasks_number * 100)
    print("\r进度: {}%: ".format(percentage), "▓" * (percentage // 2), end="")
    sys.stdout.flush()



# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def topk_errors(preds, labels, ks):
    """Computes the top-k error for each k."""
    err_str = "Batch dim of predictions and labels must match"
    assert preds.size(0) == labels.size(0), err_str
    # Find the top max_k predictions for each sample
    _top_max_k_vals, top_max_k_inds = torch.topk(
        preds, max(ks), dim=1, largest=True, sorted=True
    )
    # (batch_size, max_k) -> (max_k, batch_size)
    top_max_k_inds = top_max_k_inds.t()
    # (batch_size, ) -> (max_k, batch_size)
    rep_max_k_labels = labels.view(1, -1).expand_as(top_max_k_inds)
    # (i, j) = 1 if top i-th prediction for the j-th sample is correct
    top_max_k_correct = top_max_k_inds.eq(rep_max_k_labels)
    # Compute the number of topk correct predictions for each k
    topks_correct = [top_max_k_correct[:k, :].view(-1).float().sum() for k in ks]
    return [(1.0 - x / preds.size(0)) * 100.0 for x in topks_correct]


def feature_extrat_step(encoder, train_loader, device):
    """
    Performs a single training step
    Args:
    encoder: A convolutional Encoder. E.g. torch_model ConvEncoder
    train_loader: PyTorch dataloader, containing (images, images).
    device: "cuda" or "cpu"

    Returns: features, labels
    """
    # encoder.train()
    encoder.eval()

    features_z_list = []
    lables_list = []
    # widgets = ['Progress: ', Percentage(), ' ', Bar('#'), ' ', Timer(), ' ', ETA(), ' ', FileTransferSpeed()]
    # progress = ProgressBar(widgets=widgets)
    # data_length = len(train_loader)

    # for batch_idx, (train_img, target_label) in enumerate(train_loader):
    for train_img, target_label in tqdm(train_loader):
        # train_img = train_img.to(device)
        # target_label = target_label.to(device)

        enc_output = encoder(train_img)
        enc_output = torch.from_numpy(enc_output)
        features_z_list.append(enc_output)
        lables_list.append(target_label)
        # progress_bar(batch_idx, data_length)

    features_full, labels_full = torch.cat(features_z_list, dim=0), torch.cat(lables_list, dim=0)
    return features_full, labels_full


def data_label_extrat_step(train_loader):
    """
    Performs a single training step
    Args:
    train_loader: PyTorch dataloader, containing (images, images).
    device: "cuda" or "cpu"

    Returns: features, labels
    """

    features_z_list = []
    lables_list = []
    # widgets = ['Progress: ', Percentage(), ' ', Bar('#'), ' ', Timer(), ' ', ETA(), ' ', FileTransferSpeed()]
    # progress = ProgressBar(widgets=widgets)
    # data_length = len(train_loader)

    # for batch_idx, (train_img, target_label) in enumerate(train_loader):
    for train_img, target_label in tqdm(train_loader):
        # train_img = train_img.to(device)
        # target_label = target_label.to(device)

        features_z_list.append(train_img)
        lables_list.append(target_label)
        # progress_bar(batch_idx, data_length)

    features_full, labels_full = torch.cat(features_z_list, dim=0), torch.cat(lables_list, dim=0)
    return features_full, labels_full


def train_step(knn_classifier, decoder, train_loader, cls_loss_fn, optimizer, device):
    """
    Performs a single training step
    Args:
    knn_classifier: A knn_classifier Encoder. E.g. torch_model knn_classifier
    decoder: A convolutional Decoder. E.g. torch_model ConvDecoder, classifier
    train_loader: PyTorch dataloader, containing (images, images).
    loss_fn: PyTorch loss_fn, computes loss between 2 images.
    optimizer: PyTorch optimizer.
    device: "cuda" or "cpu"

    Returns: Train Loss
    """
    # encoder.train()
    # encoder.eval()
    decoder.train()

    # print(device)
    top1_train_accuracy = 0
    for batch_idx, (train_img, target_label) in enumerate(train_loader):
        train_img = train_img.to(device)
        # target_label = target_label.to(device)

        optimizer.zero_grad()

        # enc_output = encoder(train_img)
        # enc_output = torch.from_numpy(enc_output)
        dec_output_out = decoder(train_img)
        cls_los = cls_loss_fn(dec_output_out, target_label)
        knn_classifier.update(train_features=train_img, train_targets=target_label)

        loss =  cls_los

        #评价
        # pred = dec_output_out.data.max(1, keepdim=True)[
        #     1
        # ]  # get the index of the max probability
        top1 = accuracy(dec_output_out.detach().cpu(), target_label.detach().cpu(), topk=(1,))
        top1_train_accuracy += top1[0]

        loss.backward()

        optimizer.step()

    top1_train_accuracy /= (batch_idx + 1)

    return loss.item(), top1_train_accuracy


def val_step(knn_classifier, decoder, val_loader, cls_loss_fn, device):
    """
    Performs a single training step
    Args:
    knn_classifier: A knn_classifier. E.g. torch_model knn_classifier
    decoder: A convolutional Decoder. E.g. torch_model ConvDecoder
    val_loader: PyTorch dataloader, containing (images, images).
    loss_fn: PyTorch loss_fn, computes loss between 2 images.
    device: "cuda" or "cpu"

    Returns: Validation Loss
    """

    # encoder.eval()
    decoder.eval()

    top1_accuracy = 0
    top5_accuracy = 0
    test_acc_list = []
    pred_list = []
    preds_2 = []
    with torch.no_grad():
        correct = 0
        for batch_idx, (train_img, target_label) in enumerate(val_loader):
            train_img = train_img.to(device)
            # target_label = target_label.to(device)

            # enc_output = encoder(train_img)
            # enc_output = torch.from_numpy(enc_output)
            dec_output_out = decoder(train_img)
            knn_classifier.update(test_features=train_img, test_targets=target_label)

            cls_los = cls_loss_fn(dec_output_out, target_label)
            loss = cls_los

            top1, top5 = accuracy(dec_output_out.detach().cpu(), target_label.cpu().detach().cpu(), topk=(1, 5))
            top1_accuracy += top1[0]
            top5_accuracy += top5[0]
            pred = dec_output_out.data.max(1, keepdim=True)[
                1
            ]  # get the index of the max probability
            _, pred1 = dec_output_out.topk(1, 1, True, True)
            pred1 = pred1.t()
            output = torch.nn.functional.softmax(dec_output_out, dim=0)
            preds_2.extend(dec_output_out.argmax(dim=1).type(torch.int32).cpu().numpy())
            # correct += pred.eq(target.data.view_as(pred)).cpu().sum()
            pred_list.append(pred.detach().cpu().numpy().flatten())
        top1_accuracy /= (batch_idx + 1)
        top5_accuracy /= (batch_idx + 1)

    test_acc_list.append(top1_accuracy)
    acc_vect = torch.tensor(test_acc_list)
    pred_list_array = np.asarray(pred_list).flatten()
    return loss.item(), top1_accuracy, top5_accuracy, acc_vect, pred_list_array


def create_embedding(encoder, full_loader, embedding_dim, device):
    """
    Creates embedding using encoder from dataloader.
    encoder: A convolutional Encoder. E.g. torch_model ConvEncoder
    full_loader: PyTorch dataloader, containing (images, images) over entire dataset.
    embedding_dim: Tuple (c, h, w) Dimension of embedding = output of encoder dimesntions.
    device: "cuda" or "cpu"

    Returns: Embedding of size (num_images_in_loader + 1, c, h, w)
    """
    encoder.eval()
    # embedding = np.random.randn(embedding_dim)
    # embedding = np.expand_dims(embedding, 0)
    embedding = torch.randn(embedding_dim)
    print(embedding.shape)

    with torch.no_grad():
        for batch_idx, (train_img, target_label) in enumerate(full_loader):
            # train_img = train_img.to(device)
            # enc_output = encoder(train_img).cpu()
            enc_output = encoder(train_img)
            enc_output_content = torch.from_numpy(enc_output)
            # enc_output_content = enc_output.cpu()
            # enc_output_content = enc_output

            # print(enc_output_content.shape)
            embedding = torch.cat((embedding, enc_output_content), 0)
            # embedding = np.concatenate((embedding, enc_output_content), 0)
            # print(embedding.shape)

    return embedding


def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""

    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res