#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 10:00:24 2018
Paper: Siamese Neural Networks for One-shot Image Recognition
links: https://www.cnblogs.com/denny402/p/7520063.html
"""
import torch
from torch.autograd import Variable
import numpy as np
import torchvision
from torch.utils.data import DataLoader
from torchvision import transforms
from PIL import Image
import matplotlib.pyplot as plt
import torch.nn.functional as F
import torch.optim as optim

from Siamese.Config import Config
from Siamese.ContrastiveLoss import ContrastiveLoss
from Siamese.SiameseNetwork import SiameseNetwork
from Siamese.getone_dataset import OneDataset
from Siamese.mydataset import MyDataset

model_path = './model.ckpt'


# Helper functions
def imshow(img, text=None, should_save=False):
    npimg = img.numpy()
    plt.axis("off")
    if text:
        plt.text(75, 8, text, style='italic', fontweight='bold',
                 bbox={'facecolor': 'white', 'alpha': 0.8, 'pad': 10})
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()


def show_plot(iteration, loss):
    plt.plot(iteration, loss)
    plt.show()


# Visualising some of the data
"""
train_data=MyDataset(txt = Config.train_txt, transform=transforms.ToTensor(), 
                     transform=transforms.Compose([transforms.Scale((100,100)),
                               transforms.ToTensor()], should_invert=False))
train_loader = DataLoader(dataset=train_data, batch_size=8, shuffle=True)
#it = iter(train_loader)
p1, p2, label = it.next()
example_batch = it.next()
concatenated = torch.cat((example_batch[0],example_batch[1]),0)
imshow(torchvision.utils.make_grid(concatenated))
print(example_batch[2].numpy())
"""


def train():
    # Training
    train_data = MyDataset(
        txt=Config.train_txt,
        transform=transforms.Compose([transforms.Resize((70, 70)), transforms.ToTensor()]),
        should_invert=False)
    train_dataloader = DataLoader(dataset=train_data, shuffle=True, batch_size=Config.train_batch_size)

    val_data = MyDataset(
        txt=Config.val_txt,
        transform=transforms.Compose([transforms.Resize((70, 70)), transforms.ToTensor()]),
        should_invert=False)
    val_dataloader = DataLoader(dataset=val_data, shuffle=False, batch_size=Config.train_batch_size)

    net = SiameseNetwork().cuda()
    if Config.load_model:
        print('load model')
        net.load_state_dict(torch.load(model_path))
    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(), lr=Config.lr)

    optimizer = optim.SGD(net.parameters(), lr=Config.lr,
                          momentum=Config.momentum)

    counter = []
    loss_history = []
    same_his = []
    notsame_his = []
    iteration_number = 0
    outputDistance = 0
    noimprovepoch = 0
    for epoch in range(1, Config.train_number_epochs):
        losslist = []
        net.train()  # set the model to training mode

        train_samelist = []
        train_notsamelist = []
        for i, data in enumerate(train_dataloader, 0):
            img0, img1, label = data
            img0, img1, label = Variable(img0).cuda(), Variable(img1).cuda(), Variable(label).cuda()
            optimizer.zero_grad()
            output1, output2 = net(img0, img1)
            loss_contrastive = criterion(output1, output2, label)
            loss_contrastive.backward()
            optimizer.step()
            losslist.append(loss_contrastive.item())

            euclidean_distance = F.pairwise_distance(output1, output2)
            for ii in range(0, len(label)):
                if label[ii] == 0:
                    train_samelist.append(euclidean_distance[ii].item())
                else:
                    train_notsamelist.append(euclidean_distance[ii].item())

        net.eval()  # set the model to evaluation mode
        samelist = []
        notsamelist = []
        with torch.no_grad():
            for i, data in enumerate(val_dataloader, 0):
                img0, img1, label = data
                img0, img1, label = Variable(img0).cuda(), Variable(img1).cuda(), Variable(label).cuda()
                output1, output2 = net(img0, img1)
                euclidean_distance = F.pairwise_distance(output1, output2)
                for ii in range(0, len(label)):
                    if label[ii] == 0:
                        samelist.append(euclidean_distance[ii].item())
                    else:
                        notsamelist.append(euclidean_distance[ii].item())
        avgloss = sum(losslist) / len(losslist)
        print("Epoch:{}, loss {:.4f}, t same {:.4f}, t not {:.4f}, same {:.4f}, not {:.4f}\n"
              .format(epoch, avgloss, np.mean(train_samelist), np.mean(train_notsamelist),
                      np.mean(samelist), np.mean(notsamelist)))
        iteration_number += 1
        counter.append(iteration_number)
        loss_history.append(avgloss)
        same_his.append(np.mean(samelist))
        notsame_his.append(np.mean(notsamelist))

        curdis = np.mean(notsamelist) - np.mean(samelist)
        torch.save(net.state_dict(), model_path)
        if curdis > outputDistance:
            outputDistance = curdis
            torch.save(net.state_dict(), model_path)
        else:
            noimprovepoch += 1
        if noimprovepoch > Config.early_stop:
            break
    show_plot(counter, loss_history)
    show_plot(counter, same_his)
    show_plot(counter, notsame_his)


def get_test_img(p1, p2):
    img0 = Image.open(p1)
    img1 = Image.open(p2)
    img0 = img0.convert("L")
    img1 = img1.convert("L")

    transform = transforms.Compose(
        [transforms.Resize((70, 70)), transforms.ToTensor()])
    img0 = transform(img0)
    img1 = transform(img1)
    return img0, img1


def test(plist):
    net = SiameseNetwork().cuda()
    net.load_state_dict(torch.load(model_path))

    net.eval()
    # 画出模型的输出
    all = []
    all_labels = []
    od = OneDataset(
        txt=Config.train_txt,
        transform=transforms.Compose([transforms.Resize((70, 70)), transforms.ToTensor()]),
        should_invert=False, onlysame=False)
    o_dataloader = DataLoader(dataset=od, shuffle=False, batch_size=100)
    for iid, data in enumerate(o_dataloader):
        x = data[0]
        labels = data[1]
        x, labels = x.cuda(), labels.cuda()
        output = net.forward_once(x)
        all.extend(output.data.cpu().numpy().tolist())
        all_labels.extend(labels.data.cpu().numpy().tolist())

    numpy_all = np.array(all)
    numpy_labels = np.array(all_labels)

    def plot_mnist(numpy_all, numpy_labels):
        c = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff',
             '#ff00ff', '#990000', '#999900', '#009900', '#009999']
        numpy_labels = numpy_labels.flatten()
        labelsss = Config.typelist
        for i in range(0, len(labelsss)):
            f = numpy_all[np.where(numpy_labels == labelsss[i])]
            plt.plot(f[:, 0], f[:, 1], '.', c=c[i])
        plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
        plt.savefig('result.png')

    plot_mnist(numpy_all, numpy_labels)
    return
    # 进行测试并显示
    val_data = MyDataset(
        txt=Config.val_txt,
        transform=transforms.Compose([transforms.Resize((70, 70)), transforms.ToTensor()]),
        should_invert=False, onlysame=True)
    val_dataloader = DataLoader(dataset=val_data, shuffle=False, batch_size=1)
    with torch.no_grad():
        for i, data in enumerate(val_dataloader, 0):
            img0, img1, label = data
            img0, img1, label = Variable(img0).cuda(), Variable(img1).cuda(), Variable(label).cuda()
            output1, output2 = net(img0, img1)
            euclidean_distance = F.pairwise_distance(output1, output2)
            concatenated = torch.cat((img0.cpu(), img1.cpu()), 0)
            imshow(torchvision.utils.make_grid(concatenated),
                   'Dissimilarity: {:.2f}'.format(euclidean_distance.item()))


if __name__ == '__main__':
    # readimage()
    # convert()
    train()
    # make_test_image()
    d = r'D:\PythonProject\face48\target\33_16187603587.jpg'
    d1 = r'D:\PythonProject\face48\target\33_161876036023.jpg'
    p3 = r'D:\PythonProject\face48\target\42_161876037713.jpg'
    nai = r'D:\PythonProject\face48\test\16187636830.jpg'
    pairlist = [(d, p3), (d, nai), (nai, p3), (d, d1),
                (d1, p3), (d1, nai)]
    test(pairlist)
