import torch
import torch.nn as nn
from torch.nn import init
from torchvision import models
from torch.autograd import Variable
from torch.nn import functional as F
import time
######################################################################
def weights_init_kaiming(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('Linear') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_out')
        init.constant(m.bias.data, 0.0)
    elif classname.find('BatchNorm1d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)

def weights_init_classifier(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        init.normal(m.weight.data, std=0.001)
        init.constant(m.bias.data, 0.0)

# Defines the new fc layer and classification layer
# |--Linear--|--bn--|--relu--|--Linear--|
class ClassBlock(nn.Module):
    def __init__(self, input_dim, class_num, dropout=True, relu=True, num_bottleneck=512):
        super(ClassBlock, self).__init__()
        add_block = []
        add_block += [nn.Linear(input_dim, num_bottleneck)] 
        add_block += [nn.BatchNorm1d(num_bottleneck)]
        if relu:
            add_block += [nn.LeakyReLU(0.1)]
        if dropout:
            add_block += [nn.Dropout(p=0.5)]
        add_block = nn.Sequential(*add_block)
        add_block.apply(weights_init_kaiming)

        classifier = []
        classifier += [nn.Linear(num_bottleneck, class_num)]
        classifier = nn.Sequential(*classifier)
        classifier.apply(weights_init_classifier)

        self.add_block = add_block
        self.classifier = classifier
    def forward(self, x, output=None):
        x = self.add_block(x)
        if output == 'middle':
            return x
        x = self.classifier(x)
        return x

## define the tirplet head
class tri_block(nn.Module):
    def __init__(self, num_features, triplet_features):
        super(tri_block, self).__init__()
        self.num_features=num_features
        self.feat = nn.Linear(2048, self.num_features)
        self.feat_bn = nn.BatchNorm1d(self.num_features)
        init.kaiming_normal_(self.feat.weight, mode='fan_out')
        init.constant_(self.feat.bias, 0)
        init.constant_(self.feat_bn.weight, 1)
        init.constant_(self.feat_bn.bias, 0)
        self.triplet_features = triplet_features     
        # self.drop = nn.Dropout(self.dropout)
        # self.classifier = nn.Linear(self.num_features, self.num_classes)
        # init.normal_(self.classifier.weight, std=0.001)
        # init.constant_(self.classifier.bias, 0)
        self.triplet = nn.Linear(self.num_features, self.triplet_features)
        init.normal_(self.triplet.weight, std=0.001)
        init.constant_(self.triplet.bias, 0)
    def forward(self, x, output_feature=None ):
        x = self.feat(x)
        x = self.feat_bn(x)
        x = F.relu(x)
        x_triplet = self.triplet(x)   
        # x = self.drop(x)
        # if output_feature == 'embeding':
        #     x = F.normalize(x)
        #     return x
        # x_class = self.classifier(x)
        return x_triplet

# Define the ResNet50-based Model
class ft_net(nn.Module):

    def __init__(self, class_num ):
        super(ft_net, self).__init__()
        model_ft = models.resnet50(pretrained=True)
        # avg pooling to global pooling
        model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
        self.model = model_ft
        self.classifier = ClassBlock(2048, class_num)

    def forward(self, x):
        x = self.model.conv1(x)
        x = self.model.bn1(x)
        x = self.model.relu(x)
        x = self.model.maxpool(x)
        x = self.model.layer1(x)
        x = self.model.layer2(x)
        x = self.model.layer3(x)
        x = self.model.layer4(x)
        x = self.model.avgpool(x)
        x = torch.squeeze(x)
        x = self.classifier(x)
        return x

class ft_net_att(nn.Module):
    def __init__(self, class_num, mode='pc'):
        super(ft_net_att, self).__init__()
        model_ft = models.resnet50(pretrained=True)
        # avg pooling to global pooling
        model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
        self.mode = mode
        self.model = model_ft
        self.classifier = ClassBlock(2048+2048+1024, class_num)
        if mode == 'pc':
            self.u1 = nn.Conv2d(1024,1,1)
            self.u2 = nn.Conv2d(2048,1,1)
            self.u3 = nn.Conv2d(2048,1,1)

        self.fc1 = nn.Linear(2048, 2048)
        self.fc1_l3 = nn.Linear(2048, 2048)
        self.fc1_l2 = nn.Linear(2048, 2048)
        self.fc1_l1 = nn.Linear(2048, 1024)
        #self.fc2 = nn.Linear(2048+512+1024,class_num)

    def forward(self, x):
        x = self.model.conv1(x)
        x = self.model.bn1(x)
        x = self.model.relu(x)
        x = self.model.maxpool(x)
        x = self.model.layer1(x)
        x = self.model.layer2(x)
        
        x1 = self.model.layer3(x)

        x2 = self.model.layer4(x1)
        x3 = self.model.avgpool(x2)
        #x3 = torch.squeeze(x3)
        fc1 = self.fc1(x3.view(x3.size(0), -1))
        fc1_l1 = self.fc1_l1(fc1)
        fc1_l2 = self.fc1_l2(fc1)
        fc1_l3 = self.fc1_l3(fc1)

        att1 = self._compatibility_fn(x1, fc1_l1, level=1)
        att2 = self._compatibility_fn(x2, fc1_l2, level=2)
        att3 = self._compatibility_fn(x3, fc1_l3, level=3)
        g1 = self._weighted_combine(x1, att1)
        g2 = self._weighted_combine(x2, att2)
        g3 = self._weighted_combine(x3, att3)        
        g = torch.cat((g1, g2, g3), dim=1)
                
        #out = self.fc2(g)
        g = self.classifier(g)
        return g

    def _compatibility_fn(self, l, g, level):
        if self.mode == 'dp':
            att = l * g.unsqueeze(2).unsqueeze(3)
            att = att.sum(1).unsqueeze(1)

            size = att.size()
            att = att.view(att.size(0), att.size(1), -1)
            att = F.softmax(att, dim=2)
            att = att.view(size)
        elif self.mode == 'pc':
            att = l + g.unsqueeze(2).unsqueeze(3)

            if level == 1:
                u = self.u1
            elif level == 2:
                u = self.u2
            elif level == 3:
                u = self.u3
            att = u(att)
            
            size = att.size()
            att = att.view(att.size(0), att.size(1), -1)
            att = F.softmax(att, dim=2)
            att = att.view(size)

        return att        

    def _weighted_combine(self, l, att_map):
        g = l * att_map
        return g.view(g.size(0), g.size(1), -1).sum(2)

class ft_net_tri(nn.Module):
    def __init__(self, pretrained=True, num_features=1024, norm=False, dropout=0.5, num_classes=751, triplet_features=128):
        super(ft_net_tri, self).__init__()
        model_ft = models.resnet50(pretrained=True)
        self.model = model_ft
        self.num_features = num_features
        self.norm = norm
        self.dropout = dropout
        self.num_classes = num_classes
        out_planes = self.model.fc.in_features
        self.feat = nn.Linear(out_planes, self.num_features)
        self.feat_bn = nn.BatchNorm1d(self.num_features)
        init.kaiming_normal_(self.feat.weight, mode='fan_out')
        init.constant_(self.feat.bias, 0)
        init.constant_(self.feat_bn.weight, 1)
        init.constant_(self.feat_bn.bias, 0)
        self.triplet_features = triplet_features     
        self.drop = nn.Dropout(self.dropout)
        self.classifier = nn.Linear(self.num_features, self.num_classes)
        init.normal_(self.classifier.weight, std=0.001)
        init.constant_(self.classifier.bias, 0)
        self.triplet = nn.Linear(self.num_features, self.triplet_features)
        init.normal_(self.triplet.weight, std=0.001)
        init.constant_(self.triplet.bias, 0)

    def forward(self, x, output_feature=None):
        x = self.model.conv1(x)
        x = self.model.bn1(x)
        x = self.model.relu(x)
        x = self.model.maxpool(x)
        x = self.model.layer1(x)
        x = self.model.layer2(x)
        x = self.model.layer3(x)
        x = self.model.layer4(x)
        x = F.avg_pool2d(x, x.size()[2:])  
        x = x.view(x.size(0), -1)
        if output_feature == 'pool5':
            x = F.normalize(x)
            return x
        x = self.feat(x)
        x = self.feat_bn(x)
        if self.norm:
            x = F.normalize(x)
        x = F.relu(x)
        x_triplet = self.triplet(x)
        x = self.drop(x)
        x_class = self.classifier(x)
        
        return x_class, x_triplet




   
# Define the DenseNet121-based Model
class ft_net_dense(nn.Module):

    def __init__(self, class_num ):
        super(ft_net_dense, self).__init__()
        model_ft = models.densenet121(pretrained=True)
        model_ft.features.avgpool = nn.AdaptiveAvgPool2d((1,1))
        model_ft.fc = nn.Sequential()
        self.model = model_ft
        # For DenseNet, the feature dim is 1024 
        self.classifier = ClassBlock(1024, class_num)

    def forward(self, x):
        x = self.model.features(x)
        x = torch.squeeze(x)
        x = self.classifier(x)
        return x
    
# Define the ResNet50-based Model (Middle-Concat)
# In the spirit of "The Devil is in the Middle: Exploiting Mid-level Representations for Cross-Domain Instance Matching." Yu, Qian, et al. arXiv:1711.08106 (2017).
class ft_net_middle(nn.Module):

    def __init__(self, class_num ):
        super(ft_net_middle, self).__init__()
        model_ft = models.resnet50(pretrained=True)
        # avg pooling to global pooling
        model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
        self.model = model_ft
        self.classifier = ClassBlock(2048+1024, class_num)

    def forward(self, x):
        x = self.model.conv1(x)
        x = self.model.bn1(x)
        x = self.model.relu(x)
        x = self.model.maxpool(x)
        x = self.model.layer1(x)
        x = self.model.layer2(x)
        x = self.model.layer3(x)
        # x0  n*1024*1*1
        x0 = self.model.avgpool(x)
        x = self.model.layer4(x)
        # x1  n*2048*1*1
        x1 = self.model.avgpool(x)
        x = torch.cat((x0,x1),1)
        x = torch.squeeze(x)
        x = self.classifier(x)
        return x

# Part Model proposed in Yifan Sun etal. (2018)
class PCB(nn.Module):
    def __init__(self, class_num ):
        super(PCB, self).__init__()

        self.part = 6 # We cut the pool5 to 6 parts
        model_ft = models.resnet50(pretrained=True)
        self.model = model_ft
        self.avgpool = nn.AdaptiveAvgPool2d((self.part,1))
        self.dropout = nn.Dropout(p=0.5)
        # remove the final downsample
        self.model.layer4[0].downsample[0].stride = (1,1)
        self.model.layer4[0].conv2.stride = (1,1)
        # define 6 classifiers
        for i in range(self.part):
            name = 'classifier'+str(i)
            setattr(self, name, ClassBlock(2048, class_num, True, False, 256))

    def forward(self, x):
        x = self.model.conv1(x)
        x = self.model.bn1(x)
        x = self.model.relu(x)
        x = self.model.maxpool(x)
        
        x = self.model.layer1(x)
        x = self.model.layer2(x)
        x = self.model.layer3(x)
        x = self.model.layer4(x)
        x = self.avgpool(x)
        #x = self.dropout(x)

        part = {}
        predict = {}
        # get six part feature batchsize*2048*6
        for i in range(self.part):
            part[i] = torch.squeeze(x[:,:,i])
            name = 'classifier'+str(i)
            c = getattr(self,name)
            predict[i] = c(part[i])

        # sum prediction
        #y = predict[0]
        #for i in range(self.part-1):
        #    y += predict[i+1]
        y = []
        for i in range(self.part):
            y.append(predict[i])
        return y




class SinkhornDistance(nn.Module):
    r"""
    Given two empirical measures with n points each with locations x and y,
    outputs an approximation of the regularized OT cost for point clouds.

    Args:
        eps (float): regularization coefficient
        max_iter (int): maximum number of Sinkhorn iterations
        reduction (string, optional): Specifies the reduction to apply to the output:
            'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
            'mean': the sum of the output will be divided by the number of
            elements in the output, 'sum': the output will be summed. Default: 'mean'

    Shape:
        - Input: :math:`(N, \text{in\_features})`, :math:`(N, \text{in\_features})`
        - Output: :math:`(N)` or :math:`()`, depending on `reduction`
    """
    def __init__(self, grad=False, eps=1e-2, max_iter=100, reduction=None):
        super(SinkhornDistance, self).__init__()
        self.eps = eps
        self.max_iter = max_iter
        self.reduction = reduction
        self.grad = grad

    def forward(self, x, y, mu, nu):
        # The Sinkhorn algorithm takes as input three variables :
        C = self._cost_matrix(x, y)  # Wasserstein cost function

        n_points = x.shape[-2]
        if x.dim() == 2:
            batch_size = 1
        else:
            batch_size = x.shape[0]

        # both marginals are fixed with equal weights
        ## we set different marginal distributions
        # mu = torch.empty(batch_size, n_points, dtype=torch.float,
        #                  requires_grad=False).fill_(1.0 / n_points).squeeze()
        # nu = torch.empty(batch_size, n_points, dtype=torch.float,
        #                  requires_grad=False).fill_(1.0 / n_points).squeeze()

        u = torch.zeros_like(mu)
        v = torch.zeros_like(nu)
        # To check if algorithm terminates because of threshold
        # or max iterations reached
        actual_nits = 0
        # Stopping criterion
        thresh = 1e-1

        # Sinkhorn iterations
        for i in range(self.max_iter):
            u1 = u  # useful to check the update
            u = self.eps * (torch.log(mu+1e-8) - self.lse(self.M(C, u, v))) + u
            v = self.eps * (torch.log(nu+1e-8) - self.lse(self.M(C, u, v).transpose(-2, -1))) + v
            err = (u - u1).abs().sum(-1).mean()
            actual_nits += 1
            if err.item() < thresh:
                break

        U, V = u, v
        # Transport plan pi = diag(a)*K*diag(b)

        pi = torch.exp(self.M(C, U, V))
        # Sinkhorn distance
        C1 = Variable(C.view(batch_size, 6,6), requires_grad=True)
        if not self.grad:
			C = Variable(C, requires_grad= self.grad)			
			pi = Variable(pi, requires_grad= self.grad)
			u, v = Variable(u, requires_grad= self.grad), Variable(v, requires_grad= self.grad)

			U, V = Variable(U, requires_grad= self.grad), Variable(V, requires_grad= self.grad)
        	
        cost = torch.sum(pi * C1, dim=(-2, -1))




        if self.reduction == 'mean':
            cost = cost.mean()
        elif self.reduction == 'sum':
            cost = cost.sum()

        return cost, pi, C

    def M(self, C, u, v):
        "Modified cost for logarithmic updates"
        "$M_{ij} = (-c_{ij} + u_i + v_j) / \epsilon$"
        return (-C + u.unsqueeze(-1) + v.unsqueeze(-2)) / self.eps

    @staticmethod
    def _cost_matrix(x, y, p=2):
        "Returns the matrix of $|x_i-y_j|^p$."
        x_col = x.unsqueeze(-2)
        y_lin = y.unsqueeze(-3)
        C = torch.sum((torch.abs(x_col - y_lin)) ** p, -1)
        return C

    @staticmethod
    def lse(A):
        "log-sum-exp"
        # add 10^-6 to prevent NaN
        result = torch.log(torch.exp(A).sum(-1) + 1e-6)
        return result

    @staticmethod
    def ave(u, u1, tau):
        "Barycenter subroutine, used by kinetic acceleration through extrapolation."
        return tau * u + (1 - tau) * u1



class pcb_attemd(nn.Module):
    def __init__(self, class_num=751, mode='pc', num_features=1024, triplet_features=128):
        super(pcb_attemd, self).__init__()
        self.part = 6 
        model_ft = models.resnet50(pretrained=True)
        self.model = model_ft
        self.avgpool1 = nn.AdaptiveAvgPool2d((self.part,1))
        self.avgpool2 = nn.AdaptiveAvgPool2d((1,1))
        self.dropout = nn.Dropout(p=0.5)
        self.model.layer4[0].downsample[0].stride = (1,1)
        self.model.layer4[0].conv2.stride = (1,1)
        self.mode = mode

        self.local_mask = nn.Conv2d(1, 6 , kernel_size=1,padding=0,bias=True)
        init.kaiming_normal(self.local_mask.weight, mode= 'fan_out')
        init.constant(self.local_mask.bias,0)


        ## prepare the triplet loss    
        # self.tri = tri_block(num_features, triplet_features)
        if mode == 'pc':
            # self.u1 = nn.Conv2d(1024,1,1)
            # self.u2 = nn.Conv2d(2048,1,1)
            self.u1 = nn.Conv2d(2048,1,1)

        self.fc1 = nn.Linear(512, 2048)
        self.fc1_l1 = nn.Linear(2048, 2048)
        # self.fc1_l2 = nn.Linear(2048, 2048)
        # self.fc1_l1 = nn.Linear(2048, 1024)

        self.max_iter=10
        self.S_distance = SinkhornDistance()
        self.global_classifier = ClassBlock(2048, class_num, num_bottleneck=512)
        for i in range(self.part):
            name1 = 'classifier'+str(i)
            name2 = 'tri' +str(i)
            setattr(self, name1, ClassBlock(2048, class_num, True, False, 256))
            setattr(self, name2, tri_block(2048, triplet_features))

    def forward(self, x, targets, output = None):
        ## backbone of resnet 50

        x = self.model.conv1(x)
        x = self.model.bn1(x)
        x = self.model.relu(x)
        x = self.model.maxpool(x)
        
        x = self.model.layer1(x)
        x = self.model.layer2(x)
        x1 = self.model.layer3(x)
        x2 = self.model.layer4(x1)
        x3 = self.avgpool1(x2)  ## average pooling for different parts 
        
        global_feat =self.avgpool2(x2) # global average pooling 
        global_feat = torch.squeeze(global_feat)

        if output == 'pool5':
            return global_feat

        global_result = self.global_classifier(global_feat) # global classifier  
        global_feat = self.global_classifier(global_feat, output='middle')

        part = {}
        predict = {}
        y = []
        tri_feat = []
        # get six part feature whose dimension is  batchsize*2048*6
        for i in range(self.part):
            part[i] = torch.squeeze(x3[:,:,i])
            name1 = 'classifier'+str(i)
            name2 = 'tri'+str(i)
## notice that we don't utlize part cross-enropy loss 
            # c = getattr(self,name1)
            # y.append(c(part[i]))

            t = getattr(self,name2)
             
            tri_feat.append(t(part[i]))

            
        tri_feat = torch.stack(tri_feat,1)

        fc1 = self.fc1(global_feat.view(global_feat.size(0),-1))
        fc1_l1 = self.fc1_l1(fc1)
        # fc1_l2 = self.fc1_l2(fc1)
        # fc1_l3 = self.fc1_l3(fc1)

        ## generate the attention map via two kinds of assignment 
        att_final = self._compatibility_fn(x2, fc1_l1, level=1)

        ## global pooling for attention map:        
        # att_final1 = self.avgpool1(att_final)
        global_feat = self._weighted_combine(x2, att_final)
        ## soft pooling for attention map:
        center = F.avg_pool2d(att_final,(att_final.size(2),att_final.size(3)))
        att_final = att_final-center.expand_as(att_final)

        local_mask = self.local_mask(att_final)
        local_mask = local_mask/local_mask.norm(2,1).unsqueeze(1).expand_as(local_mask).clamp(min=1e-12)
        local_mask = F.softmax(local_mask.squeeze())
        # # print(local_mask)

        lw = local_mask.chunk(6,1)
        f0 = att_final*6*(lw[0].expand_as(att_final))
        f1 = att_final*6*(lw[1].expand_as(att_final))
        f2 = att_final*6*(lw[2].expand_as(att_final))
        f3 = att_final*6*(lw[3].expand_as(att_final))
        f4 = att_final*6*(lw[4].expand_as(att_final))
        f5 = att_final*6*(lw[5].expand_as(att_final))
        f0 = F.avg_pool2d(f0,kernel_size=(f0.size(2),f0.size(3)))  
        f1 = F.avg_pool2d(f1,kernel_size=(f1.size(2),f1.size(3)))  
        f2 = F.avg_pool2d(f2,kernel_size=(f2.size(2),f2.size(3)))  
        f3 = F.avg_pool2d(f3,kernel_size=(f3.size(2),f3.size(3)))  
        f4 = F.avg_pool2d(f4,kernel_size=(f4.size(2),f4.size(3)))  
        f5 = F.avg_pool2d(f5,kernel_size=(f5.size(2),f5.size(3)))         
        att_final1 = torch.cat((f0,f1,f2,f3,f4,f5),2)


        # att2 = self._compatibility_fn(x2, fc1_l2, level=2)
        # att3 = self._compatibility_fn(x3, fc1_l3, level=3)

        ## the shape of att_final is (n*6*1*1)
        # att_final = self.avgpool1(att1 + att2 + att3)

        # g1 = self._weighted_combine(x1, att1)
        # g2 = self._weighted_combine(x2, att2)
        # g3 = self._weighted_combine(x3, att3)

        ## hard smaple mining via global feature and caculate the earth mover distance
        n = x.size(0)
        dist = torch.pow(global_feat, 2).sum(dim=1, keepdim=True).expand(n, n)
        dist = dist + dist.t()
        dist.addmm_(1, -2, global_feat, global_feat.t())
        dist = dist.clamp(min=1e-12).sqrt()
        mask = targets.expand(n, n).eq(targets.expand(n, n).t())
        dist_ap, dist_an = [], []
        Pos_index = [] 
        Neg_index = []
        att_final1 = att_final1.squeeze()

        att_final1 = att_final1/att_final1.norm(2,1).unsqueeze(1).expand_as(att_final1).clamp(min=1e-12)
        att_prob = Variable(F.softmax(att_final1, dim=1), requires_grad = False)

        for i in range(n):
            _, pos_index=torch.max(dist[i][mask[i]==1],0)
            _, neg_index=torch.min(dist[i][mask[i]==0],0)
            # print(i)
            # print (neg_index.item())
            # time.sleep(2)
            if neg_index>=(((i)//8)*8):
                neg_index=neg_index+8  
            # print (neg_index.item()) 
            # time.sleep(5) 
            # print (pos_index.item())   
            # time.sleep(5)       
            # print (pos_index.item()+(i//8)*8)                      
            
            # time.sleep(2)
            # if i%8==0:
            #     print('next')

            # # print(neg_index.item())
            # time.sleep(2)
            Pos_index.append(pos_index.data+(i//8)*8)
            Neg_index.append(neg_index.data)
            # att_x1[i,:,:] = att_x[i,:,:]/att_x[i,:,:].norm(2,1).expand_as(att_x[i,:,:]).clamp(min=1e-12)
            # att_y1[i,:,:] = att_y[i,:,:]/att_y[i,:,:].norm(2,1).expand_as(att_y[i,:,:]).clamp(min=1e-12)
            # att_x1[i,:,:] =  Variable(F.softmax(att_x1[i,:,:].squeeze(0),dim=0), requires_grad = False)
            # att_y1[i,:,:] = Variable(F.softmax(att_y1[i,:,:].squeeze(0), dim=0), requires_grad =False)
      

            # dist_ap.append(self.emd_distance_S(tri_feat[pos_index.item()+(i//8)*8,:,:],tri_feat[i,:,:],att_final1[pos_index.item()+(i//8)*8,:],att_final1[i,:]).view(1))
            # dist_an.append(self.emd_distance_S(tri_feat[neg_index.item(),:,:],tri_feat[i,:,:],att_final1[neg_index.item(),:],att_final1[i,:]).view(1))

        x_pos = tri_feat[Pos_index,:,:]
        x_neg = tri_feat[Neg_index,:,:]
        att_pos = att_prob[Pos_index,:]
        att_neg = att_prob[Neg_index,:]
        dist_ap = self.emd_distance_S(x_pos,tri_feat,att_pos, att_prob)
        dist_an = self.emd_distance_S(x_neg,tri_feat,att_neg, att_prob)

        # for i in range(n):
        #     print(dist_ap[i])
        #     print(dist_an[i])
        #     time.sleep(2)
        #     print(dist[i][mask[i]].max())
        #     print(dist[i][mask[i] == 0].min())
        #     time.sleep(2)

            # dist_ap.append(dist[i][mask[i]].max().view(1))
            # dist_an.append(dist[i][mask[i] == 0].min().view(1))

        # dist_ap = torch.cat(dist_ap)
        # dist_an = torch.cat(dist_an)


        # y = dist_an.data.new()
        # y.resize_as_(dist_an.data)
        # y.fill_(1)
        # y = Variable(y)
        return y, dist_ap, dist_an, global_result

    def emd_distance_S(self, x, y, att_x, att_y):
        # att_x1 = att_x/att_x.norm(2,1).expand_as(att_x).clamp(min=1e-12)
        # att_y1 = att_y/att_y.norm(2,1).expand_as(att_y).clamp(min=1e-12)
        # att_x1 =  Variable(F.softmax(att_x1.squeeze(0),dim=0), requires_grad = False)
        # att_y1 = Variable(F.softmax(att_y1.squeeze(0), dim=0), requires_grad =False)
        dist_final, _, _ = self.S_distance(x, y, att_x, att_y)

        return dist_final

    def emd_distance_flow(self, x, y, att_x, att_y):
        ## we first get the best flow 
        # dist_calculate = torch.nn.PairwiseDistance(p=2)

        dist1=Variable(self.EuclideanDistances(x,y), requires_grad = False)
        dist2 = Variable(dist1.view(self.part,self.part), requires_grad =True)

        K= Variable(torch.exp(-0.01* dist1), requires_grad = False)
        u, v = self.scaling_para(K, att_x, att_y)
        U=Variable(torch.diag(u.squeeze()), requires_grad = False)
        V=Variable(torch.diag(v.squeeze()), requires_grad = False)
        T_best = Variable(U.mm(K).mm(V) , requires_grad = False)

        dist_final = T_best.mul(dist2).sum()/dist2.sum()

        return dist_final

    def scaling_para(self, K, att_x, att_y):
        u = Variable(torch.rand(self.part,1).cuda(), requires_grad = False)
        v = Variable(torch.rand( self.part,1).cuda(), requires_grad = False)
        att_x1 = att_x/att_x.norm(2,1).expand_as(att_x).clamp(min=1e-12)
        att_y1 = att_y/att_y.norm(2,1).expand_as(att_y).clamp(min=1e-12)
        att_x1 =  Variable(F.softmax(att_x1.squeeze(0),dim=0), requires_grad = False)
        att_y1 = Variable(F.softmax(att_y1.squeeze(0), dim=0), requires_grad =False)
      
        # att_x1 =  Variable(F.softmax(att_x1,dim=1), requires_grad = False)
        # att_y1 = Variable(F.softmax(att_y1, dim=1), requires_grad =False)
        # att_x1 = (att_x1-att_x1.min())/(att_x1.max()-att_x1.min())
        # att_y1 = (att_y1-att_y1.min())/(att_y1.max()-att_y1.min())
        # att_x1 = att_x1/att_x1.norm(2,1).expand_as(att_x).clamp(min=1e-12)
        # att_y1 = att_y1/att_y1.norm(2,1).expand_as(att_y).clamp(min=1e-12)

        for i in range(self.max_iter):
            u = att_x1.div(K.mm(v))
            v = att_y1.div(K.t().mm(u))
        return u, v

    def emd_distance(self, x, y, att_x, att_y):
        ## compute the pair-wise distance, for each x and each y 

        dist_calculate = torch.nn.PairwiseDistance(p=2)
        dist=dist_calculate(x,y)       
        # att_diff = pairwise_distance(att_x, att_y)        
        att_diff = torch.abs(att_x - att_y)
        dist_final = att_diff.mul(dist).sum()/dist.sum()
        print(dist_final)
        return dist_final

    def EuclideanDistances(self, x, y ):
        vec = x.mm(y.t())
        SqA = torch.pow(x,2).sum(1).repeat(6,1).t()
        SqB = torch.pow(y,2).sum(1).repeat(6,1).t()
        # print(vec.shape)
        # print(SqA.shape)
        # time.sleep(100)
        SqED = SqA + SqB - 2*vec
        SqED = SqED.clamp(min=1e-12).sqrt()
        return SqED

    def pairwise_distance(self, input):
        n = input.size(0)
        dist = torch.pow(input, 2).sum(dim=1, keepdim=True).expand(n, n)
        dist = dist + dist.t()
        dist.addmm_(1, -2, input, input.t())        
        dist = dist.clamp(min=1e-12).sqrt()
        return dist 

    def pairwise_distance1(self, x, y):
        dist_calculate = torch.nn.PairwiseDistance(p=1)
        dist=Variable(torch.zeros(6,6).cuda(), requires_grad=False)
        print(x[0,:],y[0,:])
        for i in range(6):
            for j in range(6):
                print(i)
                dist[i,j]=dist_calculate(x[i,:],y[j,:])
            dist[i,i]=0
        return dist

    def _compatibility_fn(self, l, g, level):
        if self.mode == 'dp':
            att = l * g.unsqueeze(2).unsqueeze(3)
            att = att.sum(1).unsqueeze(1)

            size = att.size()
            att = att.view(att.size(0), att.size(1), -1)
            att = F.softmax(att, dim=2)
            att = att.view(size)
        elif self.mode == 'pc':
            att = l + g.unsqueeze(2).unsqueeze(3)

            if level == 1:
                u = self.u1
            elif level == 2:
                u = self.u2
            elif level == 3:
                u = self.u3
            att = u(att)
            
            size = att.size()
            att = att.view(att.size(0), att.size(1), -1)
            att = F.softmax(att, dim=2)
            att = att.view(size)

        return att        

    def _weighted_combine(self, l, att_map):
        g = l * att_map
        return g.view(g.size(0), g.size(1), -1).sum(2)


class PCB_test(nn.Module):
    def __init__(self,model):
        super(PCB_test,self).__init__()
        self.part = 6
        self.model = model.model
        self.avgpool = nn.AdaptiveAvgPool2d((self.part,1))
        # remove the final downsample
        self.model.layer4[0].downsample[0].stride = (1,1)
        self.model.layer4[0].conv2.stride = (1,1)

    def forward(self, x):
        x = self.model.conv1(x)
        x = self.model.bn1(x)
        x = self.model.relu(x)
        x = self.model.maxpool(x)

        x = self.model.layer1(x)
        x = self.model.layer2(x)
        x = self.model.layer3(x)
        x = self.model.layer4(x)
        x = self.avgpool(x)
        y = x.view(x.size(0),x.size(1),x.size(2))
        return y


# debug model structure


# import torch
# import torch.nn as nn
# from torch.nn import init
# from torchvision import models
# from torch.autograd import Variable
# from torch.nn import functional as F
# import time
######################################################################
## This code is the structure of the proposed emd triplet model, 
## which is not using the part cross-entropy loss but esay to inplement 
#######################################################################

## define the initialization methods
def weights_init_kaiming(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('Linear') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_out')
        init.constant(m.bias.data, 0.0)
    elif classname.find('BatchNorm1d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)

def weights_init_classifier(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        init.normal(m.weight.data, std=0.001)
        init.constant(m.bias.data, 0.0)

# Defines the embeding and classifier layer
# |--Linear--|--bn--|--relu--|--Linear--|
class ClassBlock(nn.Module):
    def __init__(self, input_dim, class_num, dropout=True, relu=True, num_bottleneck=512):
        super(ClassBlock, self).__init__()
        add_block = []
        add_block += [nn.Linear(input_dim, num_bottleneck)] 
        add_block += [nn.BatchNorm1d(num_bottleneck)]
        if relu:
            add_block += [nn.LeakyReLU(0.1)]
        if dropout:
            add_block += [nn.Dropout(p=0.5)]
        add_block = nn.Sequential(*add_block)
        add_block.apply(weights_init_kaiming)

        classifier = []
        classifier += [nn.Linear(num_bottleneck, class_num)]
        classifier = nn.Sequential(*classifier)
        classifier.apply(weights_init_classifier)

        self.add_block = add_block
        self.classifier = classifier
    def forward(self, x, output=None):
        x = self.add_block(x)

        if output == 'middle': ## aims to produce attention map
            return x
        x = self.classifier(x)
        return x

## define the tirplet convolution 
class tri_block(nn.Module):
    def __init__(self, num_features, triplet_features):
        super(tri_block, self).__init__()
        self.num_features=num_features
        self.feat = nn.Linear(2048, self.num_features)
        self.feat_bn = nn.BatchNorm1d(self.num_features)
        init.kaiming_normal_(self.feat.weight, mode='fan_out')
        init.constant_(self.feat.bias, 0)
        init.constant_(self.feat_bn.weight, 1)
        init.constant_(self.feat_bn.bias, 0)
        self.triplet_features = triplet_features     
        # self.drop = nn.Dropout(self.dropout)
        # self.classifier = nn.Linear(self.num_features, self.num_classes)
        # init.normal_(self.classifier.weight, std=0.001)
        # init.constant_(self.classifier.bias, 0)
        self.triplet = nn.Linear(self.num_features, self.triplet_features)
        init.normal_(self.triplet.weight, std=0.001)
        init.constant_(self.triplet.bias, 0)
    def forward(self, x, output_feature=None ):
        x = self.feat(x)
        x = self.feat_bn(x)
        x = F.relu(x)
        x_triplet = self.triplet(x)   
        # x = self.drop(x)
        # if output_feature == 'embeding':
        #     x = F.normalize(x)
        #     return x
        # x_class = self.classifier(x)
        return x_triplet


## main structure         
class pcb_attemd1(nn.Module):
    def __init__(self, class_num=751, mode='pc', num_features=1024, triplet_features=128):
        super(pcb_attemd1, self).__init__()
        self.part = 6 
        model_ft = models.resnet50(pretrained=True)
        self.model = model_ft
        self.avgpool1 = nn.AdaptiveAvgPool2d((self.part,1))
        self.avgpool2 = nn.AdaptiveAvgPool2d((1,1))
        self.dropout = nn.Dropout(p=0.5)
        self.model.layer4[0].downsample[0].stride = (1,1)
        self.model.layer4[0].conv2.stride = (1,1)
        self.mode = mode

        self.local_mask = nn.Conv2d(1, 6 , kernel_size=1,padding=0,bias=True)
        init.kaiming_normal(self.local_mask.weight, mode= 'fan_out')
        init.constant(self.local_mask.bias,0)


        ## prepare the triplet loss    
        # self.tri = tri_block(num_features, triplet_features)
        if mode == 'pc':
            # self.u1 = nn.Conv2d(1024,1,1)
            # self.u2 = nn.Conv2d(2048,1,1)
            self.u1 = nn.Conv2d(2048,1,1)

        self.fc1 = nn.Linear(2048, 2048)
        self.fc1_l1 = nn.Linear(2048, 2048)
        # self.fc1_l2 = nn.Linear(2048, 2048)
        # self.fc1_l1 = nn.Linear(2048, 1024)

        self.max_iter=30

        self.global_classifier = ClassBlock(2048, class_num, num_bottleneck=2048)
        for i in range(self.part):
            name1 = 'classifier'+str(i)
            name2 = 'tri' +str(i)
            setattr(self, name1, ClassBlock(2048, class_num, True, False, 256))
            setattr(self, name2, tri_block(2048, triplet_features))

    def forward(self, x, targets, output = None):
        ## backbone of resnet 50
        x = self.model.conv1(x)
        x = self.model.bn1(x)
        x = self.model.relu(x)
        x = self.model.maxpool(x)
        
        x = self.model.layer1(x)
        x = self.model.layer2(x)
        x1 = self.model.layer3(x)
        x2 = self.model.layer4(x1)
        x3 = self.avgpool1(x2)  ## average pooling for different parts 
        
        global_feat =self.avgpool2(x2) # global average pooling 
        global_feat = torch.squeeze(global_feat)

        if output == 'pool5':
            return global_feat

        global_result = self.global_classifier(global_feat) # global classifier  
        global_feat = self.global_classifier(global_feat, output='middle')# global high level feature

        part = {}
        predict = {}
        y = []
        tri_feat = []
        
        for i in range(self.part):
            part[i] = torch.squeeze(x3[:,:,i])
            name1 = 'classifier'+str(i)
            name2 = 'tri'+str(i)
## notice that we don't utlize part cross-enropy loss 
            # c = getattr(self,name1)
            # y.append(c(part[i]))
            t = getattr(self,name2)
            tri_feat.append(t(part[i]))

        # get six part features whose dimension are batchsize*6*128   
        tri_feat = torch.stack(tri_feat,1)

        fc1 = self.fc1(global_feat.view(global_feat.size(0),-1))
        fc1_l1 = self.fc1_l1(fc1)
        # fc1_l2 = self.fc1_l2(fc1)
        # fc1_l3 = self.fc1_l3(fc1)

        ## generate the attention map via two kinds of assignment 
        att_final = self._compatibility_fn(x2, fc1_l1, level=1)


        ## global pooling for attention map:        
        # att_final = self.avgpool1(att_final)

        ## refined part pooling for attention map:
        center = F.avg_pool2d(att_final,(att_final.size(2),att_final.size(3)))
        att_final = att_final-center.expand_as(att_final)

        local_mask = self.local_mask(att_final)
        local_mask = F.softmax(local_mask.squeeze())
  

        lw = local_mask.chunk(6,1)
        f0 = att_final*6*(lw[0].expand_as(att_final))
        f1 = att_final*6*(lw[1].expand_as(att_final))
        f2 = att_final*6*(lw[2].expand_as(att_final))
        f3 = att_final*6*(lw[3].expand_as(att_final))
        f4 = att_final*6*(lw[4].expand_as(att_final))
        f5 = att_final*6*(lw[5].expand_as(att_final))
        f0 = F.avg_pool2d(f0,kernel_size=(f0.size(2),f0.size(3)))  
        f1 = F.avg_pool2d(f1,kernel_size=(f1.size(2),f1.size(3)))  
        f2 = F.avg_pool2d(f2,kernel_size=(f2.size(2),f2.size(3)))  
        f3 = F.avg_pool2d(f3,kernel_size=(f3.size(2),f3.size(3)))  
        f4 = F.avg_pool2d(f4,kernel_size=(f4.size(2),f4.size(3)))  
        f5 = F.avg_pool2d(f5,kernel_size=(f5.size(2),f5.size(3)))         
        att_final1 = torch.cat((f0,f1,f2,f3,f4,f5),2)


        ## hard smaple mining via global feature and caculate the earth mover distance
        n = x.size(0)
        dist = torch.pow(global_feat, 2).sum(dim=1, keepdim=True).expand(n, n)
        dist = dist + dist.t()
        dist.addmm_(1, -2, global_feat, global_feat.t())
        dist = dist.clamp(min=1e-12).sqrt()
        mask = targets.expand(n, n).eq(targets.expand(n, n).t())
        dist_ap, dist_an = [], []
        for i in range(n):
            ## get the hardest positive sample index and hardest negative sample index
            _, pos_index=torch.max(dist[i][mask[i]],0)
            _, neg_index=torch.max(dist[i][mask[i]==0],0)
            
            ## pos_index and neg_index are aligned in the batch format            
            if neg_index>=(((i)//8)*8):
                neg_index=neg_index+8  

            ## caculate the emd distance according to the CVPR 2019 paper
            dist_ap.append(self.emd_distance_flow(tri_feat[pos_index.item()+(i//8)*8,:,:],tri_feat[i,:,:],att_final1[pos_index.item()+(i//8)*8,:],att_final1[i,:]).view(1))
            dist_an.append(self.emd_distance_flow(tri_feat[neg_index.item(),:,:],tri_feat[i,:,:],att_final1[neg_index.item(),:],att_final1[i,:]).view(1))
            #print(self.emd_distance_flow(tri_feat[pos_index.item()+(i//8)*8,:,:],tri_feat[i,:,:],att_final1[pos_index.item()+(i//8)*8,:],att_final1[i,:]))
            print(self.emd_distance_flow(tri_feat[pos_index.item()+(i//8)*8,:,:],tri_feat[i,:,:],att_final1[pos_index.item()+(i//8)*8,:],att_final1[i,:]))
            print(self.emd_distance_flow(tri_feat[neg_index.item(),:,:],tri_feat[i,:,:],att_final1[neg_index.item(),:],att_final1[i,:]))
            time.sleep(2)
            print(dist[i][mask[i]].max())
            print(dist[i][mask[i] == 0].min())
            time.sleep(2)
            # dist_ap.append(dist[i][mask[i]].max().view(1))
            # dist_an.append(dist[i][mask[i] == 0].min().view(1))

        dist_ap = torch.cat(dist_ap)
        dist_an = torch.cat(dist_an)
        # y = dist_an.data.new()
        # y.resize_as_(dist_an.data)
        # y.fill_(1)
        # y = Variable(y)
        return y, dist_ap, dist_an, global_result


    def emd_distance_flow(self, x, y, att_x, att_y):
        ## we first get the best flow 
        # dist_calculate = torch.nn.PairwiseDistance(p=2)
        dist1=Variable(self.EuclideanDistances(x,y), requires_grad = False)

        dist2 = Variable(dist1.view(self.part,self.part), requires_grad =True)
        K= Variable(torch.exp(-0.01* dist1), requires_grad = False)
        u, v = self.scaling_para(K, att_x, att_y)
        U=Variable(torch.diag(u.squeeze()), requires_grad = False)
        V=Variable(torch.diag(v.squeeze()), requires_grad = False)
        T_best = Variable(U.mm(K).mm(V) , requires_grad = False)
        dist_final = T_best.mul(dist2).sum()/dist2.sum()
        return dist_final

    def scaling_para(self, K, att_x, att_y):
        u = Variable(torch.rand(self.part,1).cuda(), requires_grad = False)
        v = Variable(torch.rand( self.part,1).cuda(), requires_grad = False)
        att_x1 = att_x/att_x.norm(2,1).expand_as(att_x).clamp(min=1e-12)
        att_y1 = att_y/att_y.norm(2,1).expand_as(att_y).clamp(min=1e-12)
        att_x1 =  Variable(F.softmax(att_x1.squeeze(0),dim=0), requires_grad = False)
        att_y1 = Variable(F.softmax(att_y1.squeeze(0), dim=0), requires_grad =False)
        # att_x1 =  Variable(F.softmax(att_x1,dim=1), requires_grad = False)
        # att_y1 = Variable(F.softmax(att_y1, dim=1), requires_grad =False)
        # att_x1 = (att_x1-att_x1.min())/(att_x1.max()-att_x1.min())
        # att_y1 = (att_y1-att_y1.min())/(att_y1.max()-att_y1.min())
        # att_x1 = att_x1/att_x1.norm(2,1).expand_as(att_x).clamp(min=1e-12)
        # att_y1 = att_y1/att_y1.norm(2,1).expand_as(att_y).clamp(min=1e-12)
        for i in range(self.max_iter):
            u = att_x1.div(K.mm(v))
            v = att_y1.div(K.t().mm(u))
        return u, v

    def emd_distance(self, x, y, att_x, att_y):
        ## compute the pair-wise distance, for each x and each y 

        dist_calculate = torch.nn.PairwiseDistance(p=2)
        dist=dist_calculate(x,y)       
        # att_diff = pairwise_distance(att_x, att_y)        
        att_diff = torch.abs(att_x - att_y)
        dist_final = att_diff.mul(dist).sum()/dist.sum()
        return dist_final

    ## fix the bugs and this distance is verified
    def EuclideanDistances(self, x, y ):
        vec = x.mm(y.t())
        SqA = torch.pow(x,2).sum(1).repeat(6,1).t()
        SqB = torch.pow(y,2).sum(1).repeat(6,1).t()
        SqED = SqA + SqB - 2*vec
        SqED = SqED.clamp(min=1e-12).sqrt()
        return SqED


    def _compatibility_fn(self, l, g, level):
        if self.mode == 'dp':
            att = l * g.unsqueeze(2).unsqueeze(3)
            att = att.sum(1).unsqueeze(1)

            size = att.size()
            att = att.view(att.size(0), att.size(1), -1)
            att = F.softmax(att, dim=2)
            att = att.view(size)
        elif self.mode == 'pc':
            att = l + g.unsqueeze(2).unsqueeze(3)

            if level == 1:
                u = self.u1
            elif level == 2:
                u = self.u2
            elif level == 3:
                u = self.u3
            att = u(att)
            
            size = att.size()
            att = att.view(att.size(0), att.size(1), -1)
            att = F.softmax(att, dim=2)
            att = att.view(size)

        return att        

    def _weighted_combine(self, l, att_map):
        g = l * att_map
        return g.view(g.size(0), g.size(1), -1).sum(2)