import torch
import torch.nn as nn
from torch.nn import init
from torchvision import models
from torch.autograd import Variable
from torch.nn import functional as F
import time
######################################################################
## This code is the structure of the proposed emd triplet model, 
## which is not using the part cross-entropy loss but esay to inplement 
#######################################################################

## define the initialization methods
def weights_init_kaiming(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('Linear') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_out')
        init.constant(m.bias.data, 0.0)
    elif classname.find('BatchNorm1d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)

def weights_init_classifier(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        init.normal(m.weight.data, std=0.001)
        init.constant(m.bias.data, 0.0)

# Defines the embeding and classifier layer
# |--Linear--|--bn--|--relu--|--Linear--|
class ClassBlock(nn.Module):
    def __init__(self, input_dim, class_num, dropout=True, relu=True, num_bottleneck=512):
        super(ClassBlock, self).__init__()
        add_block = []
        add_block += [nn.Linear(input_dim, num_bottleneck)] 
        add_block += [nn.BatchNorm1d(num_bottleneck)]
        if relu:
            add_block += [nn.LeakyReLU(0.1)]
        if dropout:
            add_block += [nn.Dropout(p=0.5)]
        add_block = nn.Sequential(*add_block)
        add_block.apply(weights_init_kaiming)

        classifier = []
        classifier += [nn.Linear(num_bottleneck, class_num)]
        classifier = nn.Sequential(*classifier)
        classifier.apply(weights_init_classifier)

        self.add_block = add_block
        self.classifier = classifier
    def forward(self, x, output=None):
        x = self.add_block(x)

        if output == 'middle': ## aims to produce attention map
            return x
        x = self.classifier(x)
        return x

## define the tirplet convolution 
class tri_block(nn.Module):
    def __init__(self, num_features, triplet_features):
        super(tri_block, self).__init__()
        self.num_features=num_features
        self.feat = nn.Linear(2048, self.num_features)
        self.feat_bn = nn.BatchNorm1d(self.num_features)
        init.kaiming_normal_(self.feat.weight, mode='fan_out')
        init.constant_(self.feat.bias, 0)
        init.constant_(self.feat_bn.weight, 1)
        init.constant_(self.feat_bn.bias, 0)
        self.triplet_features = triplet_features     
        # self.drop = nn.Dropout(self.dropout)
        # self.classifier = nn.Linear(self.num_features, self.num_classes)
        # init.normal_(self.classifier.weight, std=0.001)
        # init.constant_(self.classifier.bias, 0)
        self.triplet = nn.Linear(self.num_features, self.triplet_features)
        init.normal_(self.triplet.weight, std=0.001)
        init.constant_(self.triplet.bias, 0)
    def forward(self, x, output_feature=None ):
        x = self.feat(x)
        x = self.feat_bn(x)
        x = F.relu(x)
        x_triplet = self.triplet(x)   
        # x = self.drop(x)
        # if output_feature == 'embeding':
        #     x = F.normalize(x)
        #     return x
        # x_class = self.classifier(x)
        return x_triplet


## main structure         
class pcb_attemd(nn.Module):
    def __init__(self, class_num=751, mode='pc', num_features=1024, triplet_features=128):
        super(pcb_attemd, self).__init__()
        self.part = 6 
        model_ft = models.resnet50(pretrained=True)
        self.model = model_ft
        self.avgpool1 = nn.AdaptiveAvgPool2d((self.part,1))
        self.avgpool2 = nn.AdaptiveAvgPool2d((1,1))
        self.dropout = nn.Dropout(p=0.5)
        self.model.layer4[0].downsample[0].stride = (1,1)
        self.model.layer4[0].conv2.stride = (1,1)
        self.mode = mode

        self.local_mask = nn.Conv2d(1, 6 , kernel_size=1,padding=0,bias=True)
        init.kaiming_normal(self.local_mask.weight, mode= 'fan_out')
        init.constant(self.local_mask.bias,0)


        ## prepare the triplet loss    
        # self.tri = tri_block(num_features, triplet_features)
        if mode == 'pc':
            # self.u1 = nn.Conv2d(1024,1,1)
            # self.u2 = nn.Conv2d(2048,1,1)
            self.u1 = nn.Conv2d(2048,1,1)

        self.fc1 = nn.Linear(2048, 2048)
        self.fc1_l1 = nn.Linear(2048, 2048)
        # self.fc1_l2 = nn.Linear(2048, 2048)
        # self.fc1_l1 = nn.Linear(2048, 1024)

        self.max_iter=30

        self.global_classifier = ClassBlock(2048, class_num, num_bottleneck=2048)
        for i in range(self.part):
            name1 = 'classifier'+str(i)
            name2 = 'tri' +str(i)
            setattr(self, name1, ClassBlock(2048, class_num, True, False, 256))
            setattr(self, name2, tri_block(2048, triplet_features))

    def forward(self, x, targets, output = None):
        ## backbone of resnet 50
        x = self.model.conv1(x)
        x = self.model.bn1(x)
        x = self.model.relu(x)
        x = self.model.maxpool(x)
        
        x = self.model.layer1(x)
        x = self.model.layer2(x)
        x1 = self.model.layer3(x)
        x2 = self.model.layer4(x1)
        x3 = self.avgpool1(x2)  ## average pooling for different parts 
        
        global_feat =self.avgpool2(x2) # global average pooling 
        global_feat = torch.squeeze(global_feat)

        if output == 'pool5':
            return global_feat

        global_result = self.global_classifier(global_feat) # global classifier  
        global_feat = self.global_classifier(global_feat, output='middle')# global high level feature

        part = {}
        predict = {}
        y = []
        tri_feat = []
        
        for i in range(self.part):
            part[i] = torch.squeeze(x3[:,:,i])
            name1 = 'classifier'+str(i)
            name2 = 'tri'+str(i)
## notice that we don't utlize part cross-enropy loss 
            # c = getattr(self,name1)
            # y.append(c(part[i]))
            t = getattr(self,name2)
            tri_feat.append(t(part[i]))

        # get six part features whose dimension are batchsize*6*128   
        tri_feat = torch.stack(tri_feat,1)

        fc1 = self.fc1(global_feat.view(global_feat.size(0),-1))
        fc1_l1 = self.fc1_l1(fc1)
        # fc1_l2 = self.fc1_l2(fc1)
        # fc1_l3 = self.fc1_l3(fc1)

        ## generate the attention map via two kinds of assignment 
        att_final = self._compatibility_fn(x2, fc1_l1, level=1)


        ## global pooling for attention map:        
        # att_final = self.avgpool1(att_final)

        ## refined part pooling for attention map:
        center = F.avg_pool2d(att_final,(att_final.size(2),att_final.size(3)))
        att_final = att_final-center.expand_as(att_final)

        local_mask = self.local_mask(att_final)
        local_mask = F.softmax(local_mask.squeeze())
  

        lw = local_mask.chunk(6,1)
        f0 = att_final*6*(lw[0].expand_as(att_final))
        f1 = att_final*6*(lw[1].expand_as(att_final))
        f2 = att_final*6*(lw[2].expand_as(att_final))
        f3 = att_final*6*(lw[3].expand_as(att_final))
        f4 = att_final*6*(lw[4].expand_as(att_final))
        f5 = att_final*6*(lw[5].expand_as(att_final))
        f0 = F.avg_pool2d(f0,kernel_size=(f0.size(2),f0.size(3)))  
        f1 = F.avg_pool2d(f1,kernel_size=(f1.size(2),f1.size(3)))  
        f2 = F.avg_pool2d(f2,kernel_size=(f2.size(2),f2.size(3)))  
        f3 = F.avg_pool2d(f3,kernel_size=(f3.size(2),f3.size(3)))  
        f4 = F.avg_pool2d(f4,kernel_size=(f4.size(2),f4.size(3)))  
        f5 = F.avg_pool2d(f5,kernel_size=(f5.size(2),f5.size(3)))         
        att_final1 = torch.cat((f0,f1,f2,f3,f4,f5),2)


        ## hard smaple mining via global feature and caculate the earth mover distance
        n = x.size(0)
        dist = torch.pow(global_feat, 2).sum(dim=1, keepdim=True).expand(n, n)
        dist = dist + dist.t()
        dist.addmm_(1, -2, global_feat, global_feat.t())
        dist = dist.clamp(min=1e-12).sqrt()
        mask = targets.expand(n, n).eq(targets.expand(n, n).t())
        dist_ap, dist_an = [], []
        for i in range(n):
            ## get the hardest positive sample index and hardest negative sample index
            _, pos_index=torch.max(dist[i][mask[i]],0)
            _, neg_index=torch.max(dist[i][mask[i]==0],0)
            
            ## pos_index and neg_index are aligned in the batch format            
            if neg_index>=(((i)//8)*8):
                neg_index=neg_index+8  

            ## caculate the emd distance according to the CVPR 2019 paper
            dist_ap.append(self.emd_distance_flow(tri_feat[pos_index.item()+(i//8)*8,:,:],tri_feat[i,:,:],att_final1[pos_index.item()+(i//8)*8,:],att_final1[i,:]).view(1))
            dist_an.append(self.emd_distance_flow(tri_feat[neg_index.item(),:,:],tri_feat[i,:,:],att_final1[neg_index.item(),:],att_final1[i,:]).view(1))
            # dist_ap.append(dist[i][mask[i]].max().view(1))
            # dist_an.append(dist[i][mask[i] == 0].min().view(1))

        dist_ap = torch.cat(dist_ap)
        dist_an = torch.cat(dist_an)
        # y = dist_an.data.new()
        # y.resize_as_(dist_an.data)
        # y.fill_(1)
        # y = Variable(y)
        return y, dist_ap, dist_an, global_result


    def emd_distance_flow(self, x, y, att_x, att_y):
        ## we first get the best flow 
        # dist_calculate = torch.nn.PairwiseDistance(p=2)
        dist1=Variable(self.EuclideanDistances(x,y), requires_grad = False)

        dist2 = Variable(dist1.view(self.part,self.part), requires_grad =True)
        K= Variable(torch.exp(-0.01* dist1), requires_grad = False)
        u, v = self.scaling_para(K, att_x, att_y)
        U=Variable(torch.diag(u.squeeze()), requires_grad = False)
        V=Variable(torch.diag(v.squeeze()), requires_grad = False)
        T_best = Variable(U.mm(K).mm(V) , requires_grad = False)
        dist_final = T_best.mul(dist2).sum()/dist2.sum()
        return dist_final

    def scaling_para(self, K, att_x, att_y):
        u = Variable(torch.rand(self.part,1).cuda(), requires_grad = False)
        v = Variable(torch.rand( self.part,1).cuda(), requires_grad = False)
        att_x1 = att_x/att_x.norm(2,1).expand_as(att_x).clamp(min=1e-12)
        att_y1 = att_y/att_y.norm(2,1).expand_as(att_y).clamp(min=1e-12)
        att_x1 =  Variable(F.softmax(att_x1.squeeze(0),dim=0), requires_grad = False)
        att_y1 = Variable(F.softmax(att_y1.squeeze(0), dim=0), requires_grad =False)
        # att_x1 =  Variable(F.softmax(att_x1,dim=1), requires_grad = False)
        # att_y1 = Variable(F.softmax(att_y1, dim=1), requires_grad =False)
        # att_x1 = (att_x1-att_x1.min())/(att_x1.max()-att_x1.min())
        # att_y1 = (att_y1-att_y1.min())/(att_y1.max()-att_y1.min())
        # att_x1 = att_x1/att_x1.norm(2,1).expand_as(att_x).clamp(min=1e-12)
        # att_y1 = att_y1/att_y1.norm(2,1).expand_as(att_y).clamp(min=1e-12)
        for i in range(self.max_iter):
            u = att_x1.div(K.mm(v))
            v = att_y1.div(K.t().mm(u))
        return u, v

    def emd_distance(self, x, y, att_x, att_y):
        ## compute the pair-wise distance, for each x and each y 

        dist_calculate = torch.nn.PairwiseDistance(p=2)
        dist=dist_calculate(x,y)       
        # att_diff = pairwise_distance(att_x, att_y)        
        att_diff = torch.abs(att_x - att_y)
        dist_final = att_diff.mul(dist).sum()/dist.sum()
        return dist_final

    ## fix the bugs and this distance is verified
    def EuclideanDistances(self, x, y ):
        vec = x.mm(y.t())
        SqA = torch.pow(x,2).sum(1).repeat(6,1).t()
        SqB = torch.pow(y,2).sum(1).repeat(6,1).t()
        SqED = SqA + SqB - 2*vec
        SqED = SqED.clamp(min=1e-12).sqrt()
        return SqED


    def _compatibility_fn(self, l, g, level):
        if self.mode == 'dp':
            att = l * g.unsqueeze(2).unsqueeze(3)
            att = att.sum(1).unsqueeze(1)

            size = att.size()
            att = att.view(att.size(0), att.size(1), -1)
            att = F.softmax(att, dim=2)
            att = att.view(size)
        elif self.mode == 'pc':
            att = l + g.unsqueeze(2).unsqueeze(3)

            if level == 1:
                u = self.u1
            elif level == 2:
                u = self.u2
            elif level == 3:
                u = self.u3
            att = u(att)
            
            size = att.size()
            att = att.view(att.size(0), att.size(1), -1)
            att = F.softmax(att, dim=2)
            att = att.view(size)

        return att        

    def _weighted_combine(self, l, att_map):
        g = l * att_map
        return g.view(g.size(0), g.size(1), -1).sum(2)
