# Set up data loaders
from datasets import SiameseMNIST
import torch
from torch.optim import lr_scheduler
import torch.optim as optim
from torch.autograd import Variable
import torch
from torchvision.datasets import FashionMNIST
from torchvision import transforms

from trainer import fit
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Set up the network and training parameters
from networks import EmbeddingNet, SiameseNet
from losses import ContrastiveLoss

fashion_mnist_classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
                         'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
          '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
          '#bcbd22', '#17becf']
mnist_classes = fashion_mnist_classes

mean, std = 0.28604059698879553, 0.35302424451492237
batch_size = 4
cuda = torch.cuda.is_available()

train_dataset = FashionMNIST('../data/FashionMNIST', train=True, download=True,
                             transform=transforms.Compose([
                                 transforms.ToTensor(),
                                 transforms.Normalize((mean,), (std,))
                             ]))
test_dataset = FashionMNIST('../data/FashionMNIST', train=False, download=True,
                            transform=transforms.Compose([
                                transforms.ToTensor(),
                                transforms.Normalize((mean,), (std,))
                            ]))


def plot_embeddings(embeddings, targets, xlim=None, ylim=None):
    plt.figure(figsize=(10, 10))
    for i in range(10):
        inds = np.where(targets == i)[0]
        plt.scatter(embeddings[inds, 0], embeddings[inds, 1], alpha=0.5, color=colors[i])
    if xlim:
        plt.xlim(xlim[0], xlim[1])
    if ylim:
        plt.ylim(ylim[0], ylim[1])
    plt.legend(mnist_classes)


def extract_embeddings(dataloader, model):
    with torch.no_grad():
        model.eval()
        embeddings = np.zeros((len(dataloader.dataset), 2))
        labels = np.zeros(len(dataloader.dataset))
        k = 0
        for images, target in dataloader:
            if cuda:
                images = images.cuda()
            embeddings[k:k + len(images)] = model.get_embedding(images).data.cpu().numpy()
            labels[k:k + len(images)] = target.numpy()
            k += len(images)
    return embeddings, labels


if __name__ == '__main__':
    from datasets import BalancedBatchSampler

    # We'll create mini batches by sampling labels that will be present in the mini batch and number of examples from each class
    # train_labels其中train_labels数据就是所有训练数据的标签
    train_batch_sampler = BalancedBatchSampler(train_dataset.train_labels, n_classes=10, n_samples=25)
    test_batch_sampler = BalancedBatchSampler(test_dataset.test_labels, n_classes=10, n_samples=25)

    kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
    # batch_sampler相当于给train_dataset函数的__getitem__(self, index)提供指定的index
    online_train_loader = torch.utils.data.DataLoader(train_dataset, batch_sampler=train_batch_sampler, **kwargs)
    online_test_loader = torch.utils.data.DataLoader(test_dataset, batch_sampler=test_batch_sampler, **kwargs)

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)

    # Set up the network and training parameters
    from networks import EmbeddingNet
    from losses import OnlineTripletLoss
    from utils import AllTripletSelector, HardestNegativeTripletSelector, RandomNegativeTripletSelector, \
        SemihardNegativeTripletSelector  # Strategies for selecting triplets within a minibatch
    from metrics import AverageNonzeroTripletsMetric

    margin = 1.
    embedding_net = EmbeddingNet()
    model = embedding_net
    if cuda:
        model.cuda()
    loss_fn = OnlineTripletLoss(margin, HardestNegativeTripletSelector(margin))
    lr = 1e-3
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-4)
    scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)
    n_epochs = 20
    log_interval = 150
    fit(online_train_loader, online_test_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval,
        metrics=[AverageNonzeroTripletsMetric()])

    train_embeddings_otl, train_labels_otl = extract_embeddings(train_loader, model)
    plot_embeddings(train_embeddings_otl, train_labels_otl)
    val_embeddings_otl, val_labels_otl = extract_embeddings(test_loader, model)
    plot_embeddings(val_embeddings_otl, val_labels_otl)
    plt.title("trainFashion_triplt_online_AllTripletSelector_2_4")

    plt.savefig("space//trainFashion_triplt_online_AllTripletSelector_2_4.jpg")

    # display_emb_online, display_emb, display_label_online, display_label = train_embeddings_ocl, train_embeddings_cl, train_labels_ocl, train_labels_cl
    # # display_emb_online, display_emb, display_label_online, display_label = val_embeddings_ocl, val_embeddings_cl, val_labels_ocl, val_labels_cl
    #
    # x_lim = (np.min(display_emb_online[:, 0]), np.max(display_emb_online[:, 0]))
    # y_lim = (np.min(display_emb_online[:, 1]), np.max(display_emb_online[:, 1]))
    # x_lim = (min(x_lim[0], np.min(display_emb[:, 0])), max(x_lim[1], np.max(display_emb[:, 0])))
    # y_lim = (min(y_lim[0], np.min(display_emb[:, 1])), max(y_lim[1], np.max(display_emb[:, 1])))
    # plot_embeddings(display_emb, display_label, x_lim, y_lim)
    # plot_embeddings(display_emb_online, display_label_online, x_lim, y_lim)

    # display_emb_online, display_emb, display_label_online, display_label = train_embeddings_otl, train_embeddings_tl, train_labels_otl, train_labels_tl
    # # display_emb_online, display_emb, display_label_online, display_label = val_embeddings_otl, val_embeddings_tl, val_labels_otl, val_labels_tl
    # x_lim = (np.min(display_emb_online[:, 0]), np.max(display_emb_online[:, 0]))
    # y_lim = (np.min(display_emb_online[:, 1]), np.max(display_emb_online[:, 1]))
    # x_lim = (min(x_lim[0], np.min(display_emb[:, 0])), max(x_lim[1], np.max(display_emb[:, 0])))
    # y_lim = (min(y_lim[0], np.min(display_emb[:, 1])), max(y_lim[1], np.max(display_emb[:, 1])))
    # plot_embeddings(display_emb, display_label, x_lim, y_lim)
    # plot_embeddings(display_emb_online, display_label_online, x_lim, y_lim)
