import torchvision.models as models
import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
import time
from torch.autograd import Variable
from torch.autograd import Function
import torchvision.transforms as transforms
from GoogLeNet import *
import random

input_ImageSize = int(448)

global_GoogLeNet_out_channel = 1024
global_GoogLeNet_out_feature = 14

# the feature size is of 7x7xp, being p the number of channels
global_feature_size = 7
global_channel_num = 480
class LrScheduler:
    def __init__(self, optimizer, base_lr_1, base_lr_2, lr_decay_ratio, epoch_step):
        self.base_lr_1 = base_lr_1
        self.base_lr_2 = base_lr_2
        self.lr_decay_ratio = lr_decay_ratio
        self.epoch_step = epoch_step
        self.optimizer = optimizer

    def adjust_learning_rate(self, epoch):
        """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
        lr_1 = self.base_lr_1 * (self.lr_decay_ratio ** (epoch // self.epoch_step))
        lr_2 = self.base_lr_2 * (self.lr_decay_ratio ** (epoch // self.epoch_step))
        i = 0
        for param_group in self.optimizer.param_groups:
            # param_group['lr'] = lr
            if i == 0:
                # print('one: ', param_group['lr'])
                param_group['lr'] = lr_1
                if epoch%self.epoch_step==0:
                    print('# setting learning_rate_googlenet to %.2E' % lr_1)
                i = i+1
            else:
                # print('two: ', param_group['lr'])
                param_group['lr'] = lr_2
                if epoch%self.epoch_step==0:
                    print('# setting learning_rate_rest to %.2E' % lr_2)
            

class MultiLabelTripletLoss(nn.Module):

    def __init__(self, margin):
        super(MultiLabelTripletLoss, self).__init__()
        self.margin = margin

    def forward(self, x, labels):
        self.batch_size = x.size()[0]  # batch_size
        # print('batch_size = ', self.batch_size)

        triplet_loss = torch.tensor(0.0).cuda()
        
        triplets = []
        # sim[i][j] = labels[i]点乘labels[j] = i-th图片的label 与 j-th图片的label 相同的数目
        sim = torch.mm(labels.float(), labels.t().float()) 
        # print('labels.float() = ', labels.float())
        # print('labels.t().float() = ', labels.t().float())
        # print('sim = ', sim)

        for i in range(self.batch_size):
            max_shared = torch.max(sim[i,:])+1
            for shared in range(1, max_shared.int()):
                p = (sim[i,:] == shared)
                n = (sim[i,:] < shared)
                pos_indexs = torch.nonzero(p)
                neg_indexs = torch.nonzero(n)
                pos_indexs = list(pos_indexs.view(pos_indexs.size(0)).to("cpu").numpy())
                neg_indexs = list(neg_indexs.view(neg_indexs.size(0)).to("cpu").numpy())
                temp=[[i, pos, neg] for pos in pos_indexs
                      for neg in neg_indexs]
                triplets += temp

        if triplets:
            triplets = np.array(triplets)
            sq_ap = (x[triplets[:, 0]] - x[triplets[:, 1]]).pow(2).sum(1)
            sq_an = (x[triplets[:, 0]] - x[triplets[:, 2]]).pow(2).sum(1)
            losses = F.relu(self.margin + sq_ap - sq_an)
            # print('triplets[:, 0] = ', triplets[:, 0])
            # print('triplets[:, 1] = ', triplets[:, 1])
            # print('triplets[:, 2] = ', triplets[:, 2])
            # print('losses = ', losses)
            # ----- weighted loss ----------------------
            sim_ap = sim[triplets[:, 0], triplets[:, 1]]
            sim_an = sim[triplets[:, 0], triplets[:, 2]]
            weight_factors = torch.pow(2, sim_ap) - torch.pow(2, sim_an)
            # print('sim_ap = ', sim_ap)
            # print('sim_an = ', sim_an)
            # print('weight_factors = ', weight_factors)
            weighted_losses = losses.mul(weight_factors.cuda())
            # print('weighted_losses = ', weighted_losses)
            # ------------------------------------------
            triplet_count = torch.tensor(losses.size()[0]).float().cuda()
            # for i in range(losses.size()[0]):
            #     print('i=%d, sim_ap=%.1f, sim_an=%.1f, weight_factor=%.3f, loss=%.5f, weighted_loss=%.5f' 
            #               %(i, sim_ap[i], sim_an[i], weight_factors[i], losses[i], weighted_losses[i]))
            
            if triplet_count > 0:
                triplet_loss = weighted_losses.sum() / triplet_count
            # if triplet_count > 0:
            #     triplet_loss = losses.sum() / triplet_count
        else:
            triplet_count = torch.tensor(0).float().cuda() 

        return triplet_loss, triplet_count

    def forward_not_weighted(self, x, labels):
        self.batch_size = x.size()[0]  # batch_size
        # print('batch_size = ', self.batch_size)

        triplet_loss = torch.tensor(0.0).cuda()
        
        triplets = []
        # sim[i][j] = labels[i]点乘labels[j] = i-th图片的label 与 j-th图片的label 相同的数目
        sim = torch.mm(labels.float(), labels.t().float()) 
        # print('labels.float() = ', labels.float())
        # print('labels.t().float() = ', labels.t().float())
        # print('sim = ', sim)

        for i in range(self.batch_size):
            max_shared = torch.max(sim[i,:])+1
            for shared in range(1, max_shared.int()):
                p = (sim[i,:] == shared)
                n = (sim[i,:] < shared)
                pos_indexs = torch.nonzero(p)
                neg_indexs = torch.nonzero(n)
                pos_indexs = list(pos_indexs.view(pos_indexs.size(0)).to("cpu").numpy())
                neg_indexs = list(neg_indexs.view(neg_indexs.size(0)).to("cpu").numpy())
                temp=[[i, pos, neg] for pos in pos_indexs
                      for neg in neg_indexs]
                triplets += temp

        if triplets:
            triplets = np.array(triplets)
            sq_ap = (x[triplets[:, 0]] - x[triplets[:, 1]]).pow(2).sum(1)
            sq_an = (x[triplets[:, 0]] - x[triplets[:, 2]]).pow(2).sum(1)
            losses = F.relu(self.margin + sq_ap - sq_an)
            triplet_count = torch.tensor(losses.size()[0]).float().cuda()
            
            if triplet_count > 0:
                triplet_loss = losses.sum() / triplet_count
        else:
            triplet_count = torch.tensor(0).float().cuda() 

        return triplet_loss, triplet_count

    def forward_hard_mining(self, x, labels):
        self.batch_size = x.size()[0]  # batch_size
        # print('batch_size = ', self.batch_size)

        triplet_loss = torch.tensor(0.0).cuda()
        
        triplets = []
        # sim[i][j] = labels[i]点乘labels[j] = i-th图片的label 与 j-th图片的label 相同的数目
        sim = torch.mm(labels.float(), labels.t().float()) 
        # print('labels.float() = ', labels.float())
        # print('labels.t().float() = ', labels.t().float())
        # print('sim = ', sim)

        for i in range(self.batch_size):
            max_shared = torch.max(sim[i,:])+1
            for shared in range(1, max_shared.int()):
                p = (sim[i,:] == shared)
                n = (sim[i,:] < shared)
                pos_indexs = torch.nonzero(p)
                neg_indexs = torch.nonzero(n)
                pos_indexs = list(pos_indexs.view(pos_indexs.size(0)).to("cpu").numpy())
                neg_indexs = list(neg_indexs.view(neg_indexs.size(0)).to("cpu").numpy())
                temp=[[i, pos, neg] for pos in pos_indexs
                    for neg in neg_indexs if pos != i]
                triplets += temp
        # print('triplets = ', triplets)
        random.shuffle(triplets)
        # print('triplets = ', triplets)
        min_num = min(len(triplets), self.batch_size*3)
        triplets = triplets[0:min_num]
        # print('triplets = ', triplets)
        # print('type(triplets) = ', type(triplets))
        if triplets:
            triplets = np.array(triplets)
            sq_ap = (x[triplets[:, 0]] - x[triplets[:, 1]]).pow(2).sum(1)
            sq_an = (x[triplets[:, 0]] - x[triplets[:, 2]]).pow(2).sum(1)
            losses = F.relu(self.margin + sq_ap - sq_an)
            # print('triplets[:, 0] = ', triplets[:, 0])
            # print('triplets[:, 1] = ', triplets[:, 1])
            # print('triplets[:, 2] = ', triplets[:, 2])
            # print('losses = ', losses)
            # ----- weighted loss ----------------------
            sim_ap = sim[triplets[:, 0], triplets[:, 1]]
            sim_an = sim[triplets[:, 0], triplets[:, 2]]
            weight_factors = torch.pow(2, sim_ap) - torch.pow(2, sim_an)
            # print('sim_ap = ', sim_ap)
            # print('sim_an = ', sim_an)
            # print('weight_factors = ', weight_factors)
            weighted_losses = losses.mul(weight_factors.cuda())
            # print('weighted_losses = ', weighted_losses)
            # ------------------------------------------
            triplet_count = torch.tensor(losses.size()[0]).float().cuda()
            # print('triplet_count = ', triplet_count)
            # for i in range(losses.size()[0]):
            #     print('i=%d, sim_ap=%.1f, sim_an=%.1f, weight_factor=%.3f, loss=%.5f, weighted_loss=%.5f' 
            #               %(i, sim_ap[i], sim_an[i], weight_factors[i], losses[i], weighted_losses[i]))
            
            if triplet_count > 0:
                triplet_loss = weighted_losses.sum() / triplet_count
            # if triplet_count > 0:
            #     triplet_loss = losses.sum() / triplet_count
        else:
            triplet_count = torch.tensor(0).float().cuda()

        return triplet_loss, triplet_count

def MultiLabel_CrossEntropyLoss(x, labels):

    batch_size = x.size()[0]
    
    labels = labels.float()
    mul = labels.mul(torch.log(x))
    
    cross_entropy_loss = (torch.sum(mul)*float(-1))/float(batch_size)

    return cross_entropy_loss


def compute_result_image(dataloader, featureExtractor_1, featureExtractor_2, hashNet):
    # img = []
    # img_name = []
    bs, clses = [], []

    time_start = time.time()
    # for batch_idx, (img_names, images, labels) in enumerate(dataloader):
    for batch_idx, (images, labels) in enumerate(dataloader):

        clses.append(labels.data.cpu())
        with torch.no_grad():
            images, labels = Variable(images).cuda(), Variable(labels).cuda()
        
        imageFeatures_1 = featureExtractor_1.forward(images)  # torch.Size([16, 1024, 14, 14])
        imageFeatures = featureExtractor_2.forward(imageFeatures_1)  # torch.Size([16, 480, 14, 14])
        hashFeatures = hashNet.forward_hashcode(imageFeatures)
        
        bs.append(hashFeatures.data.cpu())
    total_time = time.time() - time_start

    return torch.sign(torch.cat(bs)), torch.cat(clses), total_time
    # return torch.cat(img_name), torch.cat(img),  torch.sign(torch.cat(bs)), torch.cat(clses), total_time


def compute_mAP_MultiLabels(trn_binary, tst_binary, trn_label, tst_label):
    """
    compute mAP by searching testset from trainset
    https://github.com/flyingpot/pytorch_deephash
    """
    for x in trn_binary, tst_binary, trn_label, tst_label: x.long()

    AP = []
    Ns = torch.arange(1, trn_binary.size(0) + 1)
    Ns = Ns.type(torch.FloatTensor)
    # cnt = 0
    # total = 0.0
    # print('Ns = ', Ns)
    for i in range(tst_binary.size(0)):
        query_label, query_binary = tst_label[i], tst_binary[i]
        # print('query_binary = ', query_binary)
        # print('trn_binary = ', trn_binary)
        # 计算汉明距离，并将距离从小到大排序(query_result是索引)
        _, query_result = torch.sum((query_binary != trn_binary).long(), dim=1).sort()
        # print('query_result = ', query_result)
        # correct = (query_label == trn_label[query_result]).float()
        # 与 query label 相同的
        correct = ((trn_label[query_result]*query_label).sum(1) > 0).float()
        # print('correct = ', correct)
        P = torch.cumsum(correct, dim=0) / Ns
        # print('P = ', P)
        AP.append(torch.sum(P * correct) / torch.sum(correct))
        
        # if query_label[0] == 1:
        #     print('tst'+str(i)+' AP = ', torch.sum(P * correct) / torch.sum(correct))
            # cnt += 1
            # total += torch.sum(P * correct) / torch.sum(correct)
        # print('append to AP = ', torch.sum(P * correct) / torch.sum(correct))
        # print(torch.sum(correct))
        # print(trn_binary.size(0))
    # print('AP = ', AP)
    mAP = torch.mean(torch.Tensor(AP))
    # print('cnt = ', cnt)
    # print('total = ', total)
    # print('mAP = ', mAP)
    return mAP



# festure extractor
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo

def FeatureExtractor_1(weights="0"):  # [batch_size, 1024, 14, 14]

    model = Inception_v1()

    if weights == "0":
        path_to_weights = '../models/inception_v1_weights.pth'
        model.load_state_dict(torch.load(path_to_weights))
    else:
        model.load_state_dict(torch.load(weights))
    return model

class BasicConv2d(nn.Module):

    def __init__(self, in_channels, out_channels, **kwargs):
        super(BasicConv2d, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, bias=True, **kwargs)
        # *****  新增  ***********************************
        # torch.nn.init.xavier_normal_(self.conv.weight, gain=0.01)
        torch.nn.init.normal_(self.conv.weight, mean=0, std=0.01)
        torch.nn.init.constant_(self.conv.bias, 0)
        # *****  end 新增  ***********************************
        self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
        

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        return F.relu(x, inplace=True)

def FeatureExtractor_2(weights="0"):  # [batch_size, 480, 14, 14]
    if weights == "0":
        # 原始网络(已经训练好的)
        model = BasicConv2d(global_GoogLeNet_out_channel, global_channel_num, 
                        kernel_size=3, stride=1, padding=1)
    else:
        model = BasicConv2d(global_GoogLeNet_out_channel, global_channel_num, 
                        kernel_size=3, stride=1, padding=1)
        model.load_state_dict(torch.load(weights))
    return model


class My_Threshold(Function):
    # Note that both forward and backward are @staticmethods
    @staticmethod
    def forward(ctx, mask):
        # mask = self.mask

        theta = 1.0/(14.0*14.0)
        idx = mask > theta
        # print('idx = ', idx)
        # print('mask[idx] = ', mask[idx])
        size = mask.shape
        new_mask = torch.zeros(size[0], size[1], size[2], size[3]).cuda()
        new_mask[idx] = 1
        new_mask[~idx] = 0
        return new_mask
    
    # This function has only a single output, so it gets only one gradient
    @staticmethod
    def backward(ctx, grad_output):
        # size = self.mask.shape
        # grad = torch.ones(size[0], size[1], size[2], size[3]).cuda()
        return grad_output


class HashNet(nn.Module):     
    def __init__(self, code_length = 32, class_num = 20):         
        super(HashNet, self).__init__()      

        # mask net         
        self.conv1 = nn.Conv2d(global_channel_num, 1, kernel_size=1, 
                                                        stride=1, padding=0, bias=True)         
        # *****  新增  ************************************
        # torch.nn.init.xavier_normal_(self.conv1.weight, gain=0.01)
        torch.nn.init.normal_(self.conv1.weight, mean=0, std=0.01)
        torch.nn.init.constant_(self.conv1.bias, 0)
        # *****  end 新增  ***********************************
        self.softmax = nn.Softmax(dim=1)  # dim=1:行
        # self.threshold =  My_Threshold()
             
        # self.max_pool = nn.MaxPool2d(kernel_size=14, stride=14, padding=1) 
        # self.avg_pool = nn.AvgPool2d(kernel_size=14, stride=14, padding=1) 

        # triplet loss
        self.fc_1 = nn.Linear(global_channel_num, code_length)
        # torch.nn.init.xavier_uniform_(self.fc_1.weight)
        # *****  新增  ************************************
        # torch.nn.init.xavier_normal_(self.fc_1.weight, gain=0.01)
        torch.nn.init.normal_(self.fc_1.weight, mean=0, std=0.01)
        torch.nn.init.constant_(self.fc_1.bias, 0)
        # *****  end 新增  ***********************************
        # self.tanh = nn.Tanh()  # 将每个bit的值映射在 [-1,1]之间

        # cross entropy loss
        self.fc_2 = nn.Linear(global_channel_num, class_num)
        # torch.nn.init.xavier_uniform_(self.fc_2.weight)
        # *****  新增  ************************************
        # torch.nn.init.xavier_normal_(self.fc_2.weight, gain=0.01)
        torch.nn.init.normal_(self.fc_2.weight, mean=0, std=0.01)
        torch.nn.init.constant_(self.fc_2.bias, 0)
        # *****  end 新增  ***********************************
        # self.sigmoid = nn.Sigmoid()  # 将每个bit的值映射在 [0,1]之间


    def forward(self, features):                

        # get mask         
        mask = self.conv1(features)  # 得到 [batch size, 1, 14, 14]

        mask = mask.view(mask.size(0), 14*14)  # 得到 [batch size, 14*14]
        # mask.register_hook(lambda mask_grad: print('mask00_grad = ', mask_grad))

        mask = self.softmax(mask)  # 得到每个14×14的feature map的softmax值
        # mask.register_hook(lambda mask_grad: print('mask0_grad = ', mask_grad))
        
        mask = mask.view(mask.size(0), 1, 14, 14)
        # mask.register_hook(lambda mask_grad: print('mask1_grad = ', mask_grad))

        mask = My_Threshold.apply(mask)
        # mask.register_hook(lambda mask_grad: print('mask2_grad = ', mask_grad))

        # weighted average pooling       
        mask_features = features.mul(mask)   
        # mask_features.register_hook(lambda mask_grad: print('mask3_grad = ', mask_grad))
        # ---------------------------------------------------------


        # -----------------------------------------------------
        mask_features = mask_features.view(mask_features.size(0), global_channel_num, 14*14)
        mask_features = torch.sum(mask_features, dim=2)  # batch size × 480
        fenmu_flatten = mask.view(mask.size(0), 14*14)
        fenmu = torch.sum(fenmu_flatten, dim=1)  # batch size × 1
        # print('fenmu = ', fenmu)
        mask_features_new = torch.zeros([mask_features.shape[0], 480]).cuda()
        for i in range(fenmu.shape[0]):
            mask_features_new[i] = mask_features[i]/fenmu[i]
        # ------------------------------------------------------
        # print('mask_features[:, 0:10] = ', mask_features[:, 0:10])
        # print('mask_features_new[:, 0:10] = ', mask_features_new[:, 0:10])

        # mask_features = self.max_pool(mask_features)
        # mask_features = mask_features.view(mask_features.size(0), global_channel_num)
        # mask_features = self.avg_pool(mask_features)

        # triplet loss
        hmf_1 = torch.tanh(self.fc_1(mask_features_new))

        # cross entropy loss
        hmf_2 = torch.sigmoid((self.fc_2(mask_features_new)))

        return hmf_1, hmf_2

    def forward_hashcode(self, features):         
        
        mask = self.conv1(features)         
        mask = mask.view(mask.size(0), 14*14)         
        mask = self.softmax(mask)     
        mask = mask.view(mask.size(0), 1, 14, 14)
        # mask = self.thre1(mask)         
        # mask = -mask         
        # mask = self.thre2(mask)
        mask = My_Threshold.apply(mask)
        
        # weighted average pooling  
        # print('features.shape = ', features.shape)
        # print('mask.shape = ', mask.shape)       
        mask_features = features.mul(mask)   
        # print('mask_features_1.shape = ', mask_features.shape)
        # print('mask_features_1 = ', mask_features)
        # ---------------------------------------------------------


        # -----------------------------------------------------
        mask_features = mask_features.view(mask_features.size(0), global_channel_num, 14*14)
        mask_features = torch.sum(mask_features, dim=2)  # batch size × 480
        fenmu_flatten = mask.view(mask.size(0), 14*14)
        fenmu = torch.sum(fenmu_flatten, dim=1)  # batch size × 1
        # print('heng')
        mask_features_new = torch.zeros([mask_features.shape[0], 480]).cuda()
        for i in range(fenmu.shape[0]):
            mask_features_new[i] = mask_features[i]/fenmu[i]
        # ------------------------------------------------------

        # mask_features = self.max_pool(mask_features)
        # mask_features = mask_features.view(mask_features.size(0), global_channel_num)
        # mask_features = self.avg_pool(mask_features)

        # triplet loss
        hmf_1 = torch.tanh(self.fc_1(mask_features_new))

        return hmf_1
    
    def forward_mask(self, features):         
        
        mask = self.conv1(features)         
        mask = mask.view(mask.size(0), 14*14)         
        mask = self.softmax(mask)     
        mask = mask.view(mask.size(0), 1, 14, 14)
        # mask = self.thre1(mask)         
        # mask = -mask         
        # mask = self.thre2(mask)
        mask = My_Threshold.apply(mask)
        

        return mask













# class HashNet(nn.Module):     
#     def __init__(self, code_length = 32, class_num = 20):         
#         super(HashNet, self).__init__()      

#         # mask net         
#         self.conv1 = nn.Conv2d(global_channel_num, 1, kernel_size=1, 
#                                                         stride=1, padding=0, bias=True)         
#         # *****  新增  ************************************
#         # torch.nn.init.xavier_normal_(self.conv1.weight, gain=0.01)
#         torch.nn.init.normal_(self.conv1.weight, mean=0, std=0.01)
#         torch.nn.init.constant_(self.conv1.bias, 0)
#         # *****  end 新增  ***********************************
#         self.softmax = nn.Softmax(dim=1)  # dim=1:行
#         self.thre1 = nn.Threshold(1/(14*14), 0)         
#         self.thre2 = nn.Threshold(-1/(14*14), 1)      
#         # self.max_pool = nn.MaxPool2d(kernel_size=14, stride=14, padding=1) 
#         # self.avg_pool = nn.AvgPool2d(kernel_size=14, stride=14, padding=1) 

#         # triplet loss
#         self.fc_1 = nn.Linear(global_channel_num, code_length)
#         # torch.nn.init.xavier_uniform_(self.fc_1.weight)
#         # *****  新增  ************************************
#         # torch.nn.init.xavier_normal_(self.fc_1.weight, gain=0.01)
#         torch.nn.init.normal_(self.fc_1.weight, mean=0, std=0.01)
#         torch.nn.init.constant_(self.fc_1.bias, 0)
#         # *****  end 新增  ***********************************
#         # self.tanh = nn.Tanh()  # 将每个bit的值映射在 [-1,1]之间

#         # cross entropy loss
#         self.fc_2 = nn.Linear(global_channel_num, class_num)
#         # torch.nn.init.xavier_uniform_(self.fc_2.weight)
#         # *****  新增  ************************************
#         # torch.nn.init.xavier_normal_(self.fc_2.weight, gain=0.01)
#         torch.nn.init.normal_(self.fc_2.weight, mean=0, std=0.01)
#         torch.nn.init.constant_(self.fc_2.bias, 0)
#         # *****  end 新增  ***********************************
#         # self.sigmoid = nn.Sigmoid()  # 将每个bit的值映射在 [0,1]之间


#     def forward(self, features):                

#         # get mask         
#         mask = self.conv1(features)  # 得到 [batch size, 1, 14, 14]
#         mask = mask.view(mask.size(0), 14*14)  # 得到 [batch size, 14*14]
#         mask.register_hook(lambda mask_grad: print('mask00_grad = ', mask_grad))
#         mask = self.softmax(mask)  # 得到每个14×14的feature map的softmax值
#         mask.register_hook(lambda mask_grad: print('mask0_grad = ', mask_grad))
#         mask = mask.view(mask.size(0), 1, 14, 14)
#         # print('mask[0] = ', mask[0])
#         mask.register_hook(lambda mask_grad: print('mask1_grad = ', mask_grad))
#         mask = self.thre1(mask)         
#         mask.register_hook(lambda mask_grad: print('mask2_grad = ', mask_grad))
#         mask = -mask       
#         mask.register_hook(lambda mask_grad: print('mask3_grad = ', mask_grad))  
#         mask = self.thre2(mask)  # torch.Size([10, 1, 14, 14]
#         mask.register_hook(lambda mask_grad: print('mask4_grad = ', mask_grad))
#         # print('mask.shape = ', mask.shape)

#         # weighted average pooling  
#         # print('features.shape = ', features.shape)
#         # print('mask.shape = ', mask.shape)       
#         mask_features = features.mul(mask)   
#         # print('mask_features_1.shape = ', mask_features.shape)
#         # print('mask_features_1 = ', mask_features)
#         # ---------------------------------------------------------


#         # -----------------------------------------------------
#         mask_features = mask_features.view(mask_features.size(0), global_channel_num, 14*14)
#         mask_features = torch.sum(mask_features, dim=2)  # batch size × 480
#         fenmu_flatten = mask.view(mask.size(0), 14*14)
#         fenmu = torch.sum(fenmu_flatten, dim=1)  # batch size × 1
#         # print('fenmu = ', fenmu)
#         mask_features_new = torch.zeros([mask_features.shape[0], 480]).cuda()
#         for i in range(fenmu.shape[0]):
#             mask_features_new[i] = mask_features[i]/fenmu[i]
#         # ------------------------------------------------------
#         # print('mask_features[:, 0:10] = ', mask_features[:, 0:10])
#         # print('mask_features_new[:, 0:10] = ', mask_features_new[:, 0:10])

#         # mask_features = self.max_pool(mask_features)
#         # mask_features = mask_features.view(mask_features.size(0), global_channel_num)
#         # mask_features = self.avg_pool(mask_features)

#         # triplet loss
#         hmf_1 = torch.tanh(self.fc_1(mask_features_new))

#         # cross entropy loss
#         hmf_2 = torch.sigmoid((self.fc_2(mask_features_new)))

#         return hmf_1, hmf_2

#     def forward_hashcode(self, features):         
        
#         mask = self.conv1(features)         
#         mask = mask.view(mask.size(0), 14*14)         
#         mask = self.softmax(mask)     
#         mask = mask.view(mask.size(0), 1, 14, 14)
#         mask = self.thre1(mask)         
#         mask = -mask         
#         mask = self.thre2(mask)
        
#         # weighted average pooling  
#         # print('features.shape = ', features.shape)
#         # print('mask.shape = ', mask.shape)       
#         mask_features = features.mul(mask)   
#         # print('mask_features_1.shape = ', mask_features.shape)
#         # print('mask_features_1 = ', mask_features)
#         # ---------------------------------------------------------


#         # -----------------------------------------------------
#         mask_features = mask_features.view(mask_features.size(0), global_channel_num, 14*14)
#         mask_features = torch.sum(mask_features, dim=2)  # batch size × 480
#         fenmu_flatten = mask.view(mask.size(0), 14*14)
#         fenmu = torch.sum(fenmu_flatten, dim=1)  # batch size × 1
#         # print('heng')
#         mask_features_new = torch.zeros([mask_features.shape[0], 480]).cuda()
#         for i in range(fenmu.shape[0]):
#             mask_features_new[i] = mask_features[i]/fenmu[i]
#         # ------------------------------------------------------

#         # mask_features = self.max_pool(mask_features)
#         # mask_features = mask_features.view(mask_features.size(0), global_channel_num)
#         # mask_features = self.avg_pool(mask_features)

#         # triplet loss
#         hmf_1 = torch.tanh(self.fc_1(mask_features_new))

#         return hmf_1


# class HashNet(nn.Module):     
#     def __init__(self, code_length = 32, class_num = 20):         
#         super(HashNet, self).__init__()      

#         # # features         
#         # self.features = nn.Sequential(*list(model.features.children())[:-1])

#         # mask net         
#         self.conv1 = nn.Conv2d(global_channel_num, 1, kernel_size=1, 
#                                                         stride=1, padding=0, bias=True)         
#         # *****  新增  ************************************
#         # torch.nn.init.xavier_normal_(self.conv1.weight, gain=0.01)
#         torch.nn.init.normal_(self.conv1.weight, mean=0, std=0.01)
#         torch.nn.init.constant_(self.conv1.bias, 0)
#         # *****  end 新增  ***********************************
#         self.softmax = nn.Softmax(dim=1)  # dim=1:行
#         self.thre1 = nn.Threshold(1/(14*14), 0)         
#         self.thre2 = nn.Threshold(-1/(14*14), 1)       

#         # triplet loss
#         self.fc_1 = nn.Linear(global_channel_num, code_length)
#         # torch.nn.init.xavier_uniform_(self.fc_1.weight)
#         # *****  新增  ************************************
#         # torch.nn.init.xavier_normal_(self.fc_1.weight, gain=0.01)
#         torch.nn.init.normal_(self.fc_1.weight, mean=0, std=0.01)
#         torch.nn.init.constant_(self.fc_1.bias, 0)
#         # *****  end 新增  ***********************************
#         # self.tanh = nn.Tanh()  # 将每个bit的值映射在 [-1,1]之间

#         # cross entropy loss
#         self.fc_2 = nn.Linear(global_channel_num, class_num)
#         # torch.nn.init.xavier_uniform_(self.fc_2.weight)
#         # *****  新增  ************************************
#         # torch.nn.init.xavier_normal_(self.fc_2.weight, gain=0.01)
#         torch.nn.init.normal_(self.fc_2.weight, mean=0, std=0.01)
#         torch.nn.init.constant_(self.fc_2.bias, 0)
#         # *****  end 新增  ***********************************
#         # self.sigmoid = nn.Sigmoid()  # 将每个bit的值映射在 [0,1]之间


#     def forward(self, features):         
#         # features = self.features(x)          

#         # get mask         
#         mask = self.conv1(features)         
#         mask = mask.view(mask.size(0), 14*14)         
#         mask = self.softmax(mask)     
#         mask = mask.view(mask.size(0), 1, 14, 14)
#         mask = self.thre1(mask)         
#         mask = -mask         
#         mask = self.thre2(mask)
        
#         # weighted average pooling  
#         # print('features.shape = ', features.shape)
#         # print('mask.shape = ', mask.shape)       
#         mask_features = features.mul(mask)   
#         # ---------------------------------------------------------

#         mask_features = mask_features.view(mask_features.size(0), global_channel_num, 14*14)
#         # print('mask_features[0][0] = ', mask_features[0][0])
#         # print('mask_features[0][1] = ', mask_features[0][1])
#         # print('mask_features[0][10] = ', mask_features[0][10])
#         # print('mask_features[2][3] = ', mask_features[2][3])
#         # print('mask_features[6][54] = ', mask_features[6][54])
#         # print('mask_features[23][100] = ', mask_features[23][100])
#         mask_features = torch.sum(mask_features, dim=2)
#         # print('after computing')
#         # print('mask_features[0][0] = ', mask_features[0][0])
#         # print('mask_features[0][1] = ', mask_features[0][1])
#         # print('mask_features[0][10] = ', mask_features[0][10])
#         # print('mask_features[2][3] = ', mask_features[2][3])
#         # print('mask_features[6][54] = ', mask_features[6][54])
#         # print('mask_features[23][100] = ', mask_features[23][100])
#         # print('mask_features.shape = ', mask_features.shape)
#         # triplet loss
#         hmf_1 = torch.tanh(self.fc_1(mask_features))

#         # cross entropy loss
#         hmf_2 = torch.sigmoid((self.fc_2(mask_features)))

#         return hmf_1, hmf_2

#     def forward_hashcode(self, features):         
        
#         mask = self.conv1(features)         
#         mask = mask.view(mask.size(0), 14*14)         
#         mask = self.softmax(mask)     
#         mask = mask.view(mask.size(0), 1, 14, 14)
#         mask = self.thre1(mask)         
#         mask = -mask         
#         mask = self.thre2(mask)
        
#         # weighted average pooling  
#         # print('features.shape = ', features.shape)
#         # print('mask.shape = ', mask.shape)       
#         mask_features = features.mul(mask)   
#         # ---------------------------------------------------------

#         mask_features = mask_features.view(mask_features.size(0), global_channel_num, 14*14)
#         mask_features = torch.sum(mask_features, dim=2)
        
#         # triplet loss
#         hmf = torch.tanh(self.fc_1(mask_features))

#         return hmf

    

