import torch
import pickle
import torchvision
from torchvision import transforms
import torchvision.datasets as dset
from torchvision import transforms
from mydataset import OmniglotTrain, OmniglotTest, MyTrain, MyTest
from torch.utils.data import DataLoader
from torch.autograd import Variable
import matplotlib.pyplot as plt
from model import Siamese, ContrastiveLoss, SiameseNetwork, CRNN
import time
import numpy as np
# import gflags
import sys
from collections import deque
import os
import argparse
import torch.nn.functional as F


def arg_parse():
    """
    Parse arguements to the detect module

    """

    parser = argparse.ArgumentParser(
        description='...')

    parser.add_argument(
        "--cuda",
        dest='cuda',
        help="use cuda",
        default=True,
        type=bool)
    parser.add_argument(
        "--train_path",
        dest="train_path",
        help="训练图片文件夹",
        # default="/home/scl/data/omniglot-master/omniglot-master/python/images_background",
        default="/home/scl/py/OCR/TestImage/train",
        type=str)
    parser.add_argument(
        "--test_path",
        dest="test_path",
        help="测试图片文件夹",
        # default="/home/scl/data/omniglot-master/omniglot-master/python/images_evaluation",
        default="/home/scl/py/OCR/TestImage/test",
        type=str)
    parser.add_argument(
        "--way",
        dest="way",
        help="how much way one-shot learning",
        default=20)
    parser.add_argument(
        "--times",
        dest='times',
        help="number of samples to test accuracy",
        default=100)
    parser.add_argument(
        "--workers",
        dest='workers',
        help="number of dataLoader workers",
        default=8)
    parser.add_argument(
        "--batch_size",
        dest='batch_size',
        help="number of batch size",
        default=128,
        type=str)
    parser.add_argument(
        "--lr",
        dest='lr',
        help="learning rate",
        default=0.00001,
        type=float)
    parser.add_argument(
        "--show_every",
        dest='show_every',
        help="show result after each show_every iter.",
        default=10)
    parser.add_argument(
        "--save_every",
        dest='save_every',
        help="save model after each save_every iter.",
        default=1000
    )
    parser.add_argument(
        "--test_every",
        dest='test_every',
        help="test model after each test_every iter.",
        default=100
    )
    parser.add_argument(
        "--max_iter",
        dest='max_iter',
        help="number of iterations before stopping",
        default=10000
    )
    parser.add_argument(
        "--model_path",
        dest='model_path',
        help="path to store model",
        default="train_model",
        type=str
    )
    parser.add_argument(
        "--gpu_ids",
        dest='gpu_ids',
        help="gpu ids used to train",
        default="0,1",
        type=str
    )
    return parser.parse_args()

if __name__ == '__main__':

    Flags = arg_parse()

    data_transforms = transforms.Compose([
        transforms.RandomAffine(15),
        transforms.ToTensor()
    ])


    # train_dataset = dset.ImageFolder(root=Flags.train_path)
    # test_dataset = dset.ImageFolder(root=Flags.test_path)


    os.environ["CUDA_VISIBLE_DEVICES"] = Flags.gpu_ids
    print("use gpu:", Flags.gpu_ids, "to train.")

    trainSet = MyTrain(Flags.train_path, transform=data_transforms)
    testSet = MyTest(Flags.test_path, transform=transforms.ToTensor(), times=Flags.times, way=Flags.way)

    # trainSet = OmniglotTrain(Flags.train_path, transform=data_transforms)
    # testSet = OmniglotTest(Flags.test_path, transform=transforms.ToTensor(), times = Flags.times, way = Flags.way)

    testLoader = DataLoader(testSet, batch_size=Flags.way, shuffle=False, num_workers=Flags.workers)

    trainLoader = DataLoader(trainSet, batch_size=Flags.batch_size, shuffle=False, num_workers=Flags.workers)

    # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    loss_fn = torch.nn.BCELoss(size_average=True)
    net = CRNN(32, 1, 512, 256)
    # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

    #--------------------------------------------------------
    # loss_fn = torch.nn.BCELoss(size_average=True)
    # net = Siamese()
    # --------------------------------------------------------

    # =========================================================
    # criterion = ContrastiveLoss()
    # net = SiameseNetwork()
    # =========================================================


    # multi gpu
    if len(Flags.gpu_ids.split(",")) > 1:
        net = torch.nn.DataParallel(net)

    if Flags.cuda:
        net.cuda()

    # 导入已经训练好的模型
    model_path = "/home/scl/py/孪生/siamese-pytorch-master/train_model/model-inter-1001.pt"
    net.load_state_dict(torch.load(model_path))

    net.train()

    optimizer = torch.optim.Adam(net.parameters(),lr = Flags.lr )
    # optimizer = torch.optim.SGD(net.parameters(), lr=Flags.lr, weight_decay=0.01)

    train_loss = []
    loss_val = 0
    time_start = time.time()
    queue = deque(maxlen=20)

    for batch_id, (img1, img2, label) in enumerate(trainLoader, 1):
        net.train()
        if batch_id > Flags.max_iter:
            break
        if Flags.cuda:
            img1, img2, label = Variable(img1.cuda()), Variable(img2.cuda()), Variable(label.cuda())
        else:
            img1, img2, label = Variable(img1), Variable(img2), Variable(label)
        optimizer.zero_grad()

        # ====================================================
        # output1, output2 = net.forward(img1, img2)
        # loss_contrastive = criterion(output1, output2, label)
        # loss_val += loss_contrastive.item()
        # loss_contrastive.backward()
        # ====================================================


        # -------------------------------------------------
        output = net.forward(img1, img2)
        loss = loss_fn(output, label)
        loss_val += loss
        loss.backward()
        # -------------------------------------------------

        optimizer.step()
        if batch_id % Flags.show_every == 0 :
            print('[%d]\tloss:\t%.5f\ttime lapsed:\t%.2f s'%(batch_id, loss_val/Flags.show_every, time.time() - time_start))
            loss_val = 0
            time_start = time.time()
        if batch_id % Flags.save_every == 0:
            torch.save(net.state_dict(), Flags.model_path + '/model-inter-' + str(batch_id+1) + ".pt")
        if batch_id % Flags.test_every == 0:
            net.eval()
            accuracy = []
            for _, (test1, test2, label) in enumerate(testLoader, 1):
                if Flags.cuda:
                    test1, test2 = test1.cuda(), test2.cuda()
                test1, test2 = Variable(test1), Variable(test2)
                with torch.no_grad():
                    # output1, output2 = net.forward(test1, test2)
                    output = net.forward(test1, test2)

                pred = torch.gt(output, 0.9).float().cpu()
                # euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True).data.cpu()
                # pred = torch.gt(euclidean_distance, 0.5).float()
                acc = (pred == label).sum().float() / float(label.shape[0])
                accuracy.append(acc.item())
            narray_accuracy = np.array(accuracy)
            a = narray_accuracy.mean()

            print('*'*70)
            print('Accu: {:.4f}'.format(a * 100.))
            print('*'*70)

        train_loss.append(loss_val)
    #  learning_rate = learning_rate * 0.95

    with open('train_loss', 'wb') as f:
        pickle.dump(train_loss, f)

    acc = 0.0
    for d in queue:
        acc += d
    print("#"*70)
    print("final accuracy: ", acc/20)


    # def test(model):
    #     model.eval()
    #     all = []
    #     all_labels = []
    #
    #     for batch_idx, (x, labels) in enumerate(testLoader):
    #         if Flags.cuda:
    #             x, labels = x.cuda(), labels.cuda()
    #         x, labels = Variable(x, volatile=True), Variable(labels)
    #         output = model.forward_once(x)
    #         all.extend(output.data.cpu().numpy().tolist())
    #         all_labels.extend(labels.data.cpu().numpy().tolist())
    #
    #     numpy_all = np.array(all)
    #     numpy_labels = np.array(all_labels)
    #     return numpy_all, numpy_labels
    #
    #
    # numpy_all, numpy_labels = test(net)
