import torch
import torchvision.models as models
import torch.optim as optim
import torchvision.transforms as transforms
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.datasets as torchdata
import torch.utils.data as Data
from collections import namedtuple
import os
import copy
import numpy as np
from torch import autograd
import itertools
import time
from utils import *
# from tools import *
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import argparse
parser = argparse.ArgumentParser(description='BlockDrop Training')
parser.add_argument('--lr', type=float, default=1e-7, help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--margin', type=float, default=10, help='margin of triplet loss')
parser.add_argument('--weight_decay', type=float, default=0.0005, help='weight decay')
# parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--batch_size', type=int, default=100, help='batch size')
parser.add_argument('--max_epochs', type=int, default=500, help='total epochs to run')
parser.add_argument('--alpha', type=int, default=1.0, help='meta test weights')
parser.add_argument('--cv_dir', default='improve_v2_1e-7_seed_alexnet')
# -----------------------------------------------------------------------------------
args = parser.parse_args()
# define the Pytorch Tensor
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor

seed = 1
torch.manual_seed(seed)  # cpu
torch.cuda.manual_seed_all(seed)  # gpu  # 通过设置这个，保证每次运行按次序生成的随机数一样


def get_meta_train(features, labels, cls_num, meta_test_idx):
    # ----------------- 分类 ------------------
    hashcodes_meta_train = []
    labels_meta_train = []
    
    for i in range(cls_num):
        idx = labels[:, i] == 1
        if i != meta_test_idx:
            hashcodes_meta_train.append(features[idx])
            labels_meta_train.append(labels[idx])
    # -----------------------------------------
    hashcodes_meta_train = torch.cat(hashcodes_meta_train, dim=0)
    labels_meta_train = torch.cat(labels_meta_train, dim=0)

    return hashcodes_meta_train, labels_meta_train

def get_meta_test(features, labels, cls_num, meta_test_idx):
    # ----------------- 分类 ------------------
    hashcodes_meta_test = []
    labels_meta_test = []
    
    for i in range(cls_num):
        idx = labels[:, i] == 1
        if i == meta_test_idx:
            hashcodes_meta_test.append(features[idx])
            labels_meta_test.append(labels[idx])
    # -----------------------------------------
    hashcodes_meta_test = torch.cat(hashcodes_meta_test, dim=0)
    labels_meta_test = torch.cat(labels_meta_test, dim=0)

    return hashcodes_meta_test, labels_meta_test

def triplet_loss_choose(Ihash, labels, margin, meta_test_idx):
    triplet_loss = torch.tensor(0.0).cuda()
    labels_ = labels.cpu().data.numpy()
    triplets = []
    for label in labels_:
        label_mask = np.matmul(labels_, np.transpose(label)) > 0  # multi-labels
        label_indices = np.where(label_mask)[0]
        if len(label_indices) < 2:
            continue
        negative_indices = np.where(np.logical_not(label_mask))[0]
        if len(negative_indices) < 1:
            continue
        anchor_positives = list(itertools.combinations(label_indices, 2))
        temp = [[anchor_positive[0], anchor_positive[1], neg_ind] for anchor_positive in anchor_positives
                for neg_ind in negative_indices]
        
        labels_tensor = labels.cpu()
        temp_tensor = torch.LongTensor(temp)
        mark_1 = labels_tensor[temp_tensor[:, 0], meta_test_idx]==1
        mark_2 = labels_tensor[temp_tensor[:, 1], meta_test_idx]==1
        mark_3 = labels_tensor[temp_tensor[:, 2], meta_test_idx]==1
        # mark_1 = (labels_tensor[temp_tensor[:, 0], 3]==1) | (labels_tensor[temp_tensor[:, 0], 4]==1)
        # mark_2 = (labels_tensor[temp_tensor[:, 1], 3]==1) | (labels_tensor[temp_tensor[:, 1], 4]==1)
        # mark_3 = (labels_tensor[temp_tensor[:, 2], 3]==1) | (labels_tensor[temp_tensor[:, 2], 4]==1)
        mark = mark_1 | mark_2 | mark_3
        # mark = mark_1 & mark_2 & mark_3

        # print('mark_1 = ', mark_1)
        # print('mark_2 = ', mark_2)
        # print('mark_3 = ', mark_3)
        # print('mark = ', mark)
        # print('temp_tensor = ', temp_tensor)
        tt = temp_tensor[mark].numpy().tolist()
        if len(tt) != 0:
            triplets += tt

    if triplets:
        triplets = np.array(triplets)
        # print('triplet', triplets.shape)
        # intra triplet loss
        I_ap = (Ihash[triplets[:, 0]] - Ihash[triplets[:, 1]]).pow(2).sum(1)
        I_an = (Ihash[triplets[:, 0]] - Ihash[triplets[:, 2]]).pow(2).sum(1)
        triplet_loss = F.relu(margin + I_ap - I_an).mean()

    return triplet_loss

def triplet_loss(Ihash, labels, margin):
    triplet_loss = torch.tensor(0.0).cuda()
    labels_ = labels.cpu().data.numpy()
    triplets = []
    for label in labels_:
        label_mask = np.matmul(labels_, np.transpose(label)) > 0  # multi-labels
        label_indices = np.where(label_mask)[0]
        if len(label_indices) < 2:
            continue
        negative_indices = np.where(np.logical_not(label_mask))[0]
        if len(negative_indices) < 1:
            continue
        anchor_positives = list(itertools.combinations(label_indices, 2))
        temp = [[anchor_positive[0], anchor_positive[1], neg_ind] for anchor_positive in anchor_positives
                for neg_ind in negative_indices]
        triplets += temp

    if triplets:
        triplets = np.array(triplets)
        # print('triplet', triplets.shape)
        # intra triplet loss
        I_ap = (Ihash[triplets[:, 0]] - Ihash[triplets[:, 1]]).pow(2).sum(1)
        I_an = (Ihash[triplets[:, 0]] - Ihash[triplets[:, 2]]).pow(2).sum(1)
        # print('I_ap = ', I_ap)
        # print('I_an = ', I_an)
        triplet_loss = F.relu(margin + I_ap - I_an).mean()

    return triplet_loss

def train(epoch):

    # ---  从0-8中 随机选一类作为 meta-test -----
    meta_test_idx = np.random.randint(0, 9)
    # -----------------------------------------
    accum_loss = 0
    
    for batch_idx, (images, labels) in enumerate(train_loader):

        # if batch_idx == 0:
        # print('batch_idx = ', batch_idx)

        images, labels = Variable(images).cuda(), Variable(labels).cuda()
        batch_num = labels.shape[0]
        cls_num = labels.shape[1]

        # meta-train
        hashcodes = net.forward(x=images, vars=net.parameters(), dropout_training=True)
        hashcodes_meta_train, labels_meta_train = get_meta_train(hashcodes, labels, cls_num, meta_test_idx)
        hashcodes_meta_train = torch.tanh(hashcodes_meta_train)  # don't forget
        loss_meta_train = triplet_loss(hashcodes_meta_train, labels_meta_train, args.margin)
        # print('loss_meta_train = ', loss_meta_train)
        # grads_meta_train = autograd.grad(loss_meta_train, list(learner.net_1_pi.parameters())+list(learner.net_2_pi.parameters()), create_graph=True)
        grad = torch.autograd.grad(loss_meta_train, net.parameters(), retain_graph=True)
        fast_weights = list(map(lambda p: p[1] - args.lr * p[0], zip(grad, net.parameters())))


        # meta-test
        new_hashcodes = net.forward(x=images, vars=fast_weights, dropout_training=True)
        new_hashcodes = torch.tanh(new_hashcodes)  # don't forget
        loss_meta_test = triplet_loss_choose(new_hashcodes, labels, args.margin, meta_test_idx)
        # print('loss_meta_test = ', loss_meta_test)
        
        # 更新 net
        # print('loss_meta_train = ', loss_meta_train)
        # print('loss_meta_test = ', loss_meta_test)
        loss = loss_meta_train + args.alpha * loss_meta_test
        optimizer_meta.zero_grad()
        loss.backward()
        optimizer_meta.step()

        accum_loss += loss.data.item()

        # learner.update_net(args, images, labels, grads_meta_train)
        # accum_loss += loss_meta_train.data.item()

    # -------------
    print("epoch: %d, accum_loss: %.6f " % (epoch, accum_loss))

    # s =  'epoch = ' + str(epoch) + ',  accum_loss = ' + str(accum_loss)
    # torch.save(s, args.cv_dir+'/'+str(epoch)+'.txt')

def compute_result_image(dataloader, net):
    # img = []
    # img_name = []
    bs, clses = [], []

    time_start = time.time()
    # for batch_idx, (img_names, images, labels) in enumerate(dataloader):
    for batch_idx, (images, labels) in enumerate(dataloader):

        clses.append(labels.data.cpu())
        with torch.no_grad():
            images, labels = Variable(images).cuda(), Variable(labels).cuda()
        
        hashFeatures = net.forward(x=images, vars=net.parameters(), dropout_training=False)
        
        bs.append(hashFeatures.data.cpu())
    total_time = time.time() - time_start

    return torch.sign(torch.cat(bs)), torch.cat(clses), total_time

def compute_mAP_MultiLabels(trn_binary, tst_binary, trn_label, tst_label):
    """
    compute mAP by searching testset from trainset
    https://github.com/flyingpot/pytorch_deephash
    """
    for x in trn_binary, tst_binary, trn_label, tst_label: x.long()

    AP = []
    Ns = torch.arange(1, trn_binary.size(0) + 1)
    Ns = Ns.type(torch.FloatTensor)
    # cnt = 0
    # total = 0.0
    # print('Ns = ', Ns)
    for i in range(tst_binary.size(0)):
        query_label, query_binary = tst_label[i], tst_binary[i]
        # print('query_binary = ', query_binary)
        # print('trn_binary = ', trn_binary)
        # 计算汉明距离，并将距离从小到大排序(query_result是索引)
        _, query_result = torch.sum((query_binary != trn_binary).long(), dim=1).sort()
        # print('query_result = ', query_result)
        # correct = (query_label == trn_label[query_result]).float()
        # 与 query label 相同的
        correct = ((trn_label[query_result]*query_label).sum(1) > 0).float()
        # print('correct = ', correct)
        P = torch.cumsum(correct, dim=0) / Ns
        # print('P = ', P)
        AP.append(torch.sum(P * correct) / torch.sum(correct))
        
        # if query_label[0] == 1:
        #     print('tst'+str(i)+' AP = ', torch.sum(P * correct) / torch.sum(correct))
            # cnt += 1
            # total += torch.sum(P * correct) / torch.sum(correct)
        # print('append to AP = ', torch.sum(P * correct) / torch.sum(correct))
        # print(torch.sum(correct))
        # print(trn_binary.size(0))
    # print('AP = ', AP)
    mAP = torch.mean(torch.Tensor(AP))
    # print('cnt = ', cnt)
    # print('total = ', total)
    # print('mAP = ', mAP)
    return mAP

def test(epoch):

    tst_binary, tst_label, tst_time = compute_result_image(test_loader, net)
    db_binary, db_label, db_time = compute_result_image(db_loader, net)
    # print('test_codes_time = %.6f, db_codes_time = %.6f'%(tst_time ,db_time))

    mAP = compute_mAP_MultiLabels(db_binary, tst_binary, db_label, tst_label)
    print("epoch: %d, retrieval mAP: %.6f" %(epoch, mAP))
    # logger.add_scalar('retrieval_mAP', mAP, epoch)

    torch.save(net.state_dict(), args.cv_dir+'/ckpt_E_%d_mAP_%.5f_net.t7'%(epoch, mAP))
    f = open('result/'+args.cv_dir+'_improve_v2_mAP.txt', 'a') 
    f.write('Epoch:'+str(epoch)+'  mAP = '+str(mAP)+'\n')
    f.close()

start_epoch = 0
total_tst_time = 0
test_cnt = 0
loss_print = 0
MODEL_UPDATE_ITER = 0

train_loader, test_loader, db_loader = init_voc_dataloader(args)
config = [
        ('conv2d', [64, 3, 11, 11, 4, 2]),  # [0] [ch_out, ch_in, kernelsz, kernelsz, stride, padding]  # w.shape=[ch_out, ch_in, kernelsz, kernelsz], bias.shape=[ch_out]
        ('relu', [False]),  # [1]
        ('max_pool2d', [3, 2, 0]),  # [2] [kernelsz, stride, padding]  注：torch.nn.functional.max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)
        ('conv2d', [192, 64, 5, 5, 1, 2]),  # [3] [ch_out, ch_in, kernelsz, kernelsz, stride, padding]  # w.shape=[ch_out, ch_in, kernelsz, kernelsz], bias.shape=[ch_out]
        ('relu', [False]),  # [4]
        ('max_pool2d', [3, 2, 0]),  # [5] [kernelsz, stride, padding]  注：torch.nn.functional.max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)
        ('conv2d', [384, 192, 3, 3, 1, 1]),  # [6] [ch_out, ch_in, kernelsz, kernelsz, stride, padding]  # w.shape=[ch_out, ch_in, kernelsz, kernelsz], bias.shape=[ch_out]
        ('relu', [False]),  # [7]
        ('conv2d', [256, 384, 3, 3, 1, 1]),  # [8] [ch_out, ch_in, kernelsz, kernelsz, stride, padding]  # w.shape=[ch_out, ch_in, kernelsz, kernelsz], bias.shape=[ch_out]
        ('relu', [False]),  # [9]
        ('conv2d', [256, 256, 3, 3, 1, 1]),  # [10] [ch_out, ch_in, kernelsz, kernelsz, stride, padding]  # w.shape=[ch_out, ch_in, kernelsz, kernelsz], bias.shape=[ch_out]
        ('relu', [False]),  # [11]
        ('max_pool2d', [3, 2, 0]),  # [12] [kernelsz, stride, padding]  注：torch.nn.functional.max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)
        ('flatten', []),
        ('dropout', [0.5, False]),  # [0-13] torch.nn.functional.dropout(input, p=0.5, training=False, inplace=False)
        ('linear', [4096, 9216]),  # [1-14]
        ('relu', [False]),  # [2-15]
        ('dropout', [0.5, False]),  # [3-16] torch.nn.functional.dropout(input, p=0.5, training=False, inplace=False)
        ('linear', [4096, 4096]),  # [4-17]
        ('relu', [False]),  # [5-18]
        ('linear', [32, 4096])  #[19] HashNet
]

class Learner(nn.Module):
    def __init__(self, config, path='/home/disk1/zhaoyuying/models/alexnet-owt-4df8aa71.pth'):
        """

        :param config: network config file, type:list of (string, list)
        """
        super(Learner, self).__init__()
        self.config = config
        # this dict contains all tensors needed to be optimized
        self.vars = nn.ParameterList()

        ini_param = torch.load(path)
        # print('type(ini_param.keys()) = ', type(ini_param.keys()))
        # print(len(list(ini_param.keys())))
        keys = list(ini_param.keys())
        for i in range(len(keys)-2):
            # print(ini_param[keys[i]].shape)
            self.vars.append(ini_param[keys[i]])
        # HashNet
        net_tem = nn.Linear(self.config[-1][1][1], self.config[-1][1][0])
        net_tem.bias.data.fill_(0)
        nn.init.xavier_uniform_(net_tem.weight,gain=0.5)
        
        self.vars.append(net_tem.weight)
        # [ch_out]
        self.vars.append(net_tem.bias)
        # w = nn.Parameter(torch.ones(self.config[-1][1]))
        # # gain=1 according to cbfinn's implementation
        # torch.nn.init.kaiming_normal_(w)
        # self.vars.append(w)
        # # [ch_out]
        # self.vars.append(nn.Parameter(torch.zeros(self.config[-1][1][0])))
    
    def forward(self, x, vars=None, dropout_training=True):
        """
        This function can be called by finetunning, however, in finetunning, we dont wish to update
        running_mean/running_var. Thought weights/bias of bn is updated, it has been separated by fast_weights.
        Indeed, to not update running_mean/running_var, we need set update_bn_statistics=False
        but weight/bias will be updated and not dirty initial theta parameters via fast_weiths.
        :param x: [b, 1, 28, 28]
        :param vars:
        :param bn_training: set False to not update
        :return: x, loss, likelihood, kld
        """

        if vars is None:
            vars = self.vars
        
        idx = 0

        for name, param in self.config:
            if name is 'conv2d':
                w, b = vars[idx], vars[idx + 1]
                # remember to keep synchrozied of forward_encoder and forward_decoder!
                x = F.conv2d(x, w, b, stride=param[4], padding=param[5])
                idx += 2
                # print(name, param, '\tout:', x.shape)
            elif name is 'convt2d':
                w, b = vars[idx], vars[idx + 1]
                # remember to keep synchrozied of forward_encoder and forward_decoder!
                x = F.conv_transpose2d(x, w, b, stride=param[4], padding=param[5])
                idx += 2
                # print(name, param, '\tout:', x.shape)
            elif name is 'linear':
                w, b = vars[idx], vars[idx + 1]
                x = F.linear(x, w, b)
                idx += 2
                # print('forward:', idx, x.norm().item())
            elif name is 'dropout':
                x = F.dropout(input=x, p=param[0], training=dropout_training, inplace=param[1])
            # elif name is 'bn':
            #     w, b = vars[idx], vars[idx + 1]
            #     running_mean, running_var = self.vars_bn[bn_idx], self.vars_bn[bn_idx+1]
            #     x = F.batch_norm(x, running_mean, running_var, weight=w, bias=b, training=bn_training)
            #     idx += 2
            #     bn_idx += 2

            elif name is 'flatten':
                # print(x.shape)
                x = x.view(x.size(0), -1)
            elif name is 'reshape':
                # [b, 8] => [b, 2, 2, 2]
                x = x.view(x.size(0), *param)
            elif name is 'relu':
                x = F.relu(x, inplace=param[0])
            elif name is 'leakyrelu':
                x = F.leaky_relu(x, negative_slope=param[0], inplace=param[1])
            elif name is 'tanh':
                x = F.tanh(x)
            elif name is 'sigmoid':
                x = torch.sigmoid(x)
            elif name is 'upsample':
                x = F.upsample_nearest(x, scale_factor=param[0])
            elif name is 'max_pool2d':
                x = F.max_pool2d(x, param[0], param[1], param[2])
            elif name is 'avg_pool2d':
                x = F.avg_pool2d(x, param[0], param[1], param[2])

            else:
                raise NotImplementedError

        # make sure variable is used properly
        assert idx == len(vars)

        return x

    def zero_grad(self, vars=None):
        """

        :param vars:
        :return:
        """
        with torch.no_grad():
            if vars is None:
                for p in self.vars:
                    if p.grad is not None:
                        p.grad.zero_()
            else:
                for p in vars:
                    if p.grad is not None:
                        p.grad.zero_()

    def parameters(self):
        """
        override this function since initial parameters will return with a generator.
        :return:
        """
        return self.vars

net = Learner(config).cuda()
optimizer_meta = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

# epoch = 0
# train(epoch)
# net = train(epoch, net)
# test(epoch)

# train 1
for epoch in range(start_epoch, start_epoch+args.max_epochs+1):

    # lr_scheduler_image.adjust_learning_rate(epoch)
    # net = train(epoch, net)
    train(epoch)
    if epoch % 10 == 0:
        test(epoch)

