import torchvision.models as models
import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
import time
from torch.autograd import Variable
from torch.autograd import Function
import torchvision.transforms as transforms
import random
import itertools
import torch.optim as optim
from torch import autograd

# @timing
# def compute_result(dataloader, net, opt):
#     all_codes, all_labels = [], []
#     net.eval() #不使用BN和dropout
#     sub = (torch.zeros(opt.binary_bits))
#     for imgs, labels in dataloader:
#         codes= net(Variable(imgs.cuda(opt.ngpu), volatile=True))
#         all_codes.append((codes.data.cpu())-sub)
#         all_labels.append(labels)

#     return torch.sign(torch.cat(all_codes)), torch.cat(all_labels)
def compute_result_image(dataloader, learner):
    # img = []
    # img_name = []
    bs, clses = [], []

    time_start = time.time()
    # for batch_idx, (img_names, images, labels) in enumerate(dataloader):
    for batch_idx, (images, labels) in enumerate(dataloader):

        clses.append(labels.data.cpu())
        with torch.no_grad():
            images, labels = Variable(images).cuda(), Variable(labels).cuda()
        
        hashFeatures = learner.forward(images)
        
        bs.append(hashFeatures.data.cpu())
    total_time = time.time() - time_start

    return torch.sign(torch.cat(bs)), torch.cat(clses), total_time
    # return torch.cat(img_name), torch.cat(img),  torch.sign(torch.cat(bs)), torch.cat(clses), total_time

def compute_mAP(trn_binary, tst_binary, trn_label, tst_label):
    """
    compute mAP by searching testset from trainset
    https://github.com/flyingpot/pytorch_deephash
    """
    for x in trn_binary, tst_binary, trn_label, tst_label: x.long()

    AP = []
    Ns = torch.arange(1, trn_binary.size(0) + 1)
    Ns = Ns.type(torch.FloatTensor)
    for i in range(tst_binary.size(0)):
        query_label, query_binary = tst_label[i], tst_binary[i]
        # print('query_binary = ', query_binary)
        # print('trn_binary = ', trn_binary)
        # 计算汉明距离，并将距离从小到大排序(query_result是索引)
        _, query_result = torch.sum((query_binary != trn_binary).long(), dim=1).sort()
        # print('query_result = ', query_result)
        # correct = (query_label == trn_label[query_result]).float()
        # 与 query label 相同的
        # correct = (query_label == trn_label[query_result]).float()
        correct = ((trn_label[query_result]*query_label).sum(1) > 0).float()
        # correct.shape =  torch.Size([59000, 10])
        # Ns =  tensor([    1.,     2.,     3.,  ..., 58998., 58999., 59000.])
        # print('correct.shape = ', correct.shape)
        # print('Ns = ', Ns)
        P = torch.cumsum(correct, dim=0) / Ns
        AP.append(torch.sum(P * correct) / torch.sum(correct))
    mAP = torch.mean(torch.Tensor(AP))
    return mAP

def compute_mAP_MultiLabels(trn_binary, tst_binary, trn_label, tst_label):
    """
    compute mAP by searching testset from trainset
    https://github.com/flyingpot/pytorch_deephash
    """
    for x in trn_binary, tst_binary, trn_label, tst_label: x.long()

    AP = []
    Ns = torch.arange(1, trn_binary.size(0) + 1)
    Ns = Ns.type(torch.FloatTensor)
    # cnt = 0
    # total = 0.0
    # print('Ns = ', Ns)
    for i in range(tst_binary.size(0)):
        query_label, query_binary = tst_label[i], tst_binary[i]
        # print('query_binary = ', query_binary)
        # print('trn_binary = ', trn_binary)
        # 计算汉明距离，并将距离从小到大排序(query_result是索引)
        _, query_result = torch.sum((query_binary != trn_binary).long(), dim=1).sort()
        # print('query_result = ', query_result)
        # correct = (query_label == trn_label[query_result]).float()
        # 与 query label 相同的
        correct = ((trn_label[query_result]*query_label).sum(1) > 0).float()
        # print('correct = ', correct)
        P = torch.cumsum(correct, dim=0) / Ns
        # print('P = ', P)
        AP.append(torch.sum(P * correct) / torch.sum(correct))
        
        # if query_label[0] == 1:
        #     print('tst'+str(i)+' AP = ', torch.sum(P * correct) / torch.sum(correct))
            # cnt += 1
            # total += torch.sum(P * correct) / torch.sum(correct)
        # print('append to AP = ', torch.sum(P * correct) / torch.sum(correct))
        # print(torch.sum(correct))
        # print(trn_binary.size(0))
    # print('AP = ', AP)
    mAP = torch.mean(torch.Tensor(AP))
    # print('cnt = ', cnt)
    # print('total = ', total)
    # print('mAP = ', mAP)
    return mAP


class Learner(nn.Module):

    def __init__(self, args, semantic_vector_length=300, code_length=32):
        super(Learner, self).__init__()

        self.net_1 = models.alexnet(pretrained=True).cuda()
        self.net_1.classifier = self.net_1.classifier[0:6]
        self.net_2 = nn.Linear(4096, code_length).cuda()

        self.net_1_pi = models.alexnet().cuda()
        self.net_1_pi.classifier = self.net_1_pi.classifier[0:6]
        self.net_2_pi = nn.Linear(4096, code_length).cuda()

        self.optimizer = optim.SGD(list(self.net_1.parameters())+list(self.net_2.parameters()), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
        self.optimizer_pi = optim.SGD(list(self.net_1_pi.parameters())+list(self.net_2_pi.parameters()), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    
    def update_pi(self):
        """
        copy parameters from self.net -> self.net_pi
        :return:
        """
        for m_from, m_to in zip(self.net_1.modules(), self.net_1_pi.modules()):
            if isinstance(m_to, nn.Linear) or isinstance(m_to, nn.Conv2d) or isinstance(m_to, nn.BatchNorm2d):
                m_to.weight.data = m_from.weight.data.clone()
                if m_to.bias is not None:
                    m_to.bias.data = m_from.bias.data.clone()
        
        for m_from, m_to in zip(self.net_2.modules(), self.net_2_pi.modules()):
            if isinstance(m_to, nn.Linear) or isinstance(m_to, nn.Conv2d) or isinstance(m_to, nn.BatchNorm2d):
                m_to.weight.data = m_from.weight.data.clone()
                if m_to.bias is not None:
                    m_to.bias.data = m_from.bias.data.clone()
        
                    
    def write_grads(self, dummy_loss, sum_grads_pi):
        """
        write loss into learner.net, gradients come from sum_grads_pi.
        Since the gradients info is not calculated by general backward, we need this function to write the right gradients
        into theta network and update theta parameters as wished.
        :param dummy_loss: dummy loss, nothing but to write our gradients by hook
        :param sum_grads_pi: the summed gradients
        :return:
        """

        # Register a hook on each parameter in the net that replaces the current dummy grad
        # with our grads accumulated across the meta-batch
        hooks = []
        
        for i, v in enumerate(list(self.net_1.parameters())+list(self.net_2.parameters())):
            def closure():
                ii = i
                return lambda grad: sum_grads_pi[ii]

            # if you write: hooks.append( v.register_hook(lambda grad : sum_grads_pi[i]) )
            # it will pop an ERROR, i don't know why?
            hooks.append(v.register_hook(closure()))

        # use our sumed gradients_pi to update the theta/net network,
        # since our optimizer receive the self.net.parameters() only.
        self.optimizer.zero_grad()
        dummy_loss.backward()
        self.optimizer.step()

        # if you do NOT remove the hook, the GPU memory will expode!!!
        for h in hooks:
            h.remove()


    def forward(self, x):

        features = self.net_1(x)
        hashcodes = self.net_2(features)
        
        return hashcodes
    
    def forward_pi(self, x):

        features = self.net_1_pi(x)
        hashcodes = self.net_2_pi(features)
        
        return hashcodes
    
    def update_net(self, args, images, labels, grads_sum):

        # torch.save(grads_pi, 'debug_file/grads_pi')
        dummy_hashcodes = self.forward(images)
        dummy_loss = triplet_loss(dummy_hashcodes, labels, args.margin)

        # torch.save(self.net_1.state_dict(), 'debug_file/net_1')
        # torch.save(self.net_2.state_dict(), 'debug_file/net_2')
        self.write_grads(dummy_loss, grads_sum)
        # torch.save(self.net_1.state_dict(), 'debug_file/after_net_1')
        # torch.save(self.net_2.state_dict(), 'debug_file/after_net_2')
        


    


def triplet_loss(Ihash, labels, margin):
    triplet_loss = torch.tensor(0.0).cuda()
    labels_ = labels.cpu().data.numpy()
    triplets = []
    for label in labels_:
        label_mask = np.matmul(labels_, np.transpose(label)) > 0  # multi-labels
        label_indices = np.where(label_mask)[0]
        if len(label_indices) < 2:
            continue
        negative_indices = np.where(np.logical_not(label_mask))[0]
        if len(negative_indices) < 1:
            continue
        anchor_positives = list(itertools.combinations(label_indices, 2))
        temp = [[anchor_positive[0], anchor_positive[1], neg_ind] for anchor_positive in anchor_positives
                for neg_ind in negative_indices]
        triplets += temp

    if triplets:
        triplets = np.array(triplets)
        # print('triplet', triplets.shape)
        # intra triplet loss
        I_ap = (Ihash[triplets[:, 0]] - Ihash[triplets[:, 1]]).pow(2).sum(1)
        I_an = (Ihash[triplets[:, 0]] - Ihash[triplets[:, 2]]).pow(2).sum(1)
        # print('I_ap = ', I_ap)
        # print('I_an = ', I_an)
        triplet_loss = F.relu(margin + I_ap - I_an).mean()

    return triplet_loss

# class TripletLoss(nn.Module):
#     # pass
#     # def __init__(self, margin, device):
#     def __init__(self, margin):
#         super(TripletLoss, self).__init__()
#         self.margin = margin
#         # self.device = device

#     def similarity(self, label1, label2):
#         return label1 == label2  # default with singe label

#     def forward(self, x, labels):
#         labels_ = labels.cpu().numpy()
#         # labels_ = np.array(labels.cpu())
#         # print('labels_ = ', labels_)
#         # labels_ = np.array(labels.cpu())
#         triplets = []
#         # triplet_loss = torch.tensor(0.0).to(self.device)
#         triplet_loss = torch.tensor(0.0).cuda()
#         # triplet_loss = Variable(torch.tensor(0.0).to(self.device), requires_grad=True)
#         # for label in set(labels_):

#         # for label in labels_:
#         # labels_num = 10
#         labels_set = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
#                                [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
#                                [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
#                                [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
#                                [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
#                                [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
#                                [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
#                                [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
#                                [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
#                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
#         for label in labels_set:
#             label_mask = (labels_ == label)
#             label_indices = np.where(label_mask)[0]
#             if len(label_indices) < 2:
#                 continue
#             negative_indices = np.where(np.logical_not(label_mask))[0]
#             if len(negative_indices) < 1:
#                 continue
            
#             anchor_positives = list(itertools.combinations(label_indices, 2))
#             temp = [[anchor_positive[0], anchor_positive[1], neg_ind] for anchor_positive in anchor_positives
#                     for neg_ind in negative_indices]
#             triplets += temp


#         if triplets:
#             # triplets = torch.LongTensor(np.array(triplets)).to(self.device)
#             triplets = torch.LongTensor(np.array(triplets)).cuda()
#             sq_ap = (x[triplets[:, 0]] - x[triplets[:, 1]]).pow(2).sum(1)
#             sq_an = (x[triplets[:, 0]] - x[triplets[:, 2]]).pow(2).sum(1)
#             losses = F.relu(self.margin + sq_ap - sq_an)
#             # triplet_count = torch.tensor(losses.size()[0]).float().to(self.device)
#             triplet_count = torch.tensor(losses.size()[0]).float().cuda()
#             if triplet_count>0:
#                 triplet_loss=losses.sum()/triplet_count

#         return triplet_loss

