from __future__ import absolute_import

from torch import nn
from torch.nn import functional as F
from torch.nn import init
import torchvision
import torch
from torch.autograd import Variable
__all__ = ['resnet50_rpp_emd']

## define the tri_block module
class tri_block(nn.Module):
    def __init__(self, num_features, triplet_features):
        super(tri_block, self).__init__()
        self.num_features=num_features
        self.feat = nn.Linear(2048, self.num_features)
        self.feat_bn = nn.BatchNorm1d(self.num_features)
        init.kaiming_normal_(self.feat.weight, mode='fan_out')
        init.constant_(self.feat.bias, 0)
        init.constant_(self.feat_bn.weight, 1)
        init.constant_(self.feat_bn.bias, 0)
        self.triplet_features = triplet_features     
        # self.drop = nn.Dropout(self.dropout)
        # self.classifier = nn.Linear(self.num_features, self.num_classes)
        # init.normal_(self.classifier.weight, std=0.001)
        # init.constant_(self.classifier.bias, 0)
        self.triplet = nn.Linear(self.num_features, self.triplet_features)
        init.normal_(self.triplet.weight, std=0.001)
        init.constant_(self.triplet.bias, 0)
    def forward(self, x, output_feature=None ):
        x = self.feat(x)
        x = self.feat_bn(x)
        x = F.relu(x)
        x_triplet = self.triplet(x)   
        # x = self.drop(x)
        # if output_feature == 'embeding':
        #     x = F.normalize(x)
        #     return x
        # x_class = self.classifier(x)
        return x_triplet


class SinkhornDistance(nn.Module):
    r"""
    Given two empirical measures with n points each with locations x and y,
    outputs an approximation of the regularized OT cost for point clouds.

    Args:
        eps (float): regularization coefficient
        max_iter (int): maximum number of Sinkhorn iterations
        reduction (string, optional): Specifies the reduction to apply to the output:
            'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
            'mean': the sum of the output will be divided by the number of
            elements in the output, 'sum': the output will be summed. Default: 'mean'

    Shape:
        - Input: :math:`(N, \text{in\_features})`, :math:`(N, \text{in\_features})`
        - Output: :math:`(N)` or :math:`()`, depending on `reduction`
    """
    def __init__(self, grad=False, eps=1e-2, max_iter=100, reduction=None):
        super(SinkhornDistance, self).__init__()
        self.eps = eps
        self.max_iter = max_iter
        self.reduction = reduction
        self.grad = grad

    def forward(self, x, y, mu, nu):
        # The Sinkhorn algorithm takes as input three variables :
        C = self._cost_matrix(x, y)  # Wasserstein cost function

        n_points = x.shape[-2]
        if x.dim() == 2:
            batch_size = 1
        else:
            batch_size = x.shape[0]

        # both marginals are fixed with equal weights
        ## we set different marginal distributions
        # mu = torch.empty(batch_size, n_points, dtype=torch.float,
        #                  requires_grad=False).fill_(1.0 / n_points).squeeze()
        # nu = torch.empty(batch_size, n_points, dtype=torch.float,
        #                  requires_grad=False).fill_(1.0 / n_points).squeeze()

        u = torch.zeros_like(mu)
        v = torch.zeros_like(nu)
        # To check if algorithm terminates because of threshold
        # or max iterations reached
        actual_nits = 0
        # Stopping criterion
        thresh = 1e-1

        # Sinkhorn iterations
        for i in range(self.max_iter):
            u1 = u  # useful to check the update
            u = self.eps * (torch.log(mu+1e-8) - self.lse(self.M(C, u, v))) + u
            v = self.eps * (torch.log(nu+1e-8) - self.lse(self.M(C, u, v).transpose(-2, -1))) + v
            err = (u - u1).abs().sum(-1).mean()
            actual_nits += 1
            if err.item() < thresh:
                break

        U, V = u, v
        # Transport plan pi = diag(a)*K*diag(b)

        pi = torch.exp(self.M(C, U, V))
        # Sinkhorn distance
        C1 = Variable(C.view(batch_size, 6,6), requires_grad=True)
        if not self.grad:
            C = Variable(C, requires_grad= self.grad)			
            pi = Variable(pi, requires_grad= self.grad)
            u, v = Variable(u, requires_grad= self.grad), Variable(v, requires_grad= self.grad)

            U, V = Variable(U, requires_grad= self.grad), Variable(V, requires_grad= self.grad)
        	
        cost = torch.sum(pi * C1, dim=(-2, -1))




        if self.reduction == 'mean':
            cost = cost.mean()
        elif self.reduction == 'sum':
            cost = cost.sum()

        return cost, pi, C

    def M(self, C, u, v):
        "Modified cost for logarithmic updates"
        "$M_{ij} = (-c_{ij} + u_i + v_j) / \epsilon$"
        return (-C + u.unsqueeze(-1) + v.unsqueeze(-2)) / self.eps

    @staticmethod
    def _cost_matrix(x, y, p=2):
        "Returns the matrix of $|x_i-y_j|^p$."
        x_col = x.unsqueeze(-2)
        y_lin = y.unsqueeze(-3)
        C = torch.sum((torch.abs(x_col - y_lin)) ** p, -1)
        return C

    @staticmethod
    def lse(A):
        "log-sum-exp"
        # add 10^-6 to prevent NaN
        result = torch.log(torch.exp(A).sum(-1) + 1e-6)
        return result

    @staticmethod
    def ave(u, u1, tau):
        "Barycenter subroutine, used by kinetic acceleration through extrapolation."
        return tau * u + (1 - tau) * u1




class ResNet(nn.Module):
    __factory = {
        18: torchvision.models.resnet18,
        34: torchvision.models.resnet34,
        50: torchvision.models.resnet50,
        101: torchvision.models.resnet101,
        152: torchvision.models.resnet152,
    }

    def __init__(self, depth, pretrained=True, cut_at_pooling=False,
                 num_features=0, norm=False, dropout=0, num_classes=0, FCN=False, T=1, dim = 256):
        super(ResNet, self).__init__()

        self.depth = depth
        self.pretrained = pretrained
        self.cut_at_pooling = cut_at_pooling
        self.FCN=FCN
        self.T = T
        self.reduce_dim = dim
        self.part = 6
        if depth not in ResNet.__factory:
            raise KeyError("Unsupported depth:", depth)
        self.base = ResNet.__factory[depth](pretrained=pretrained)
        self.mode = 'pc'
#==========================add dilation=============================#
        if self.FCN:
            self.base.layer4[0].conv2.stride=(1,1)
            self.base.layer4[0].downsample[0].stride=(1,1)
#================append conv for FCN==============================#
            self.num_features = num_features
            self.num_classes = num_classes
            self.dropout = dropout
            self.local_conv = nn.Conv2d(2048, self.num_features, kernel_size=1,padding=0,bias=False)
            init.kaiming_normal(self.local_conv.weight, mode= 'fan_out')
#            init.constant(self.local_conv.bias,0)
            self.feat_bn2d = nn.BatchNorm2d(self.num_features) #may not be used, not working on caffe
            init.constant(self.feat_bn2d.weight,1) #initialize BN, may not be used
            init.constant(self.feat_bn2d.bias,0) # iniitialize BN, may not be used

##---------------------------stripe1----------------------------------------------#
            self.instance0 = nn.Linear(self.num_features, self.num_classes)
            init.normal(self.instance0.weight, std=0.001)
            init.constant(self.instance0.bias, 0)
##---------------------------stripe1----------------------------------------------#
##---------------------------stripe1----------------------------------------------#
            self.instance1 = nn.Linear(self.num_features, self.num_classes)
            init.normal(self.instance1.weight, std=0.001)
            init.constant(self.instance1.bias, 0)
##---------------------------stripe1----------------------------------------------#
##---------------------------stripe1----------------------------------------------#
            self.instance2 = nn.Linear(self.num_features, self.num_classes)
            init.normal(self.instance2.weight, std=0.001)
            init.constant(self.instance2.bias, 0)
##---------------------------stripe1----------------------------------------------#
##---------------------------stripe1----------------------------------------------#
            self.instance3 = nn.Linear(self.num_features, self.num_classes)
            init.normal(self.instance3.weight, std=0.001)
            init.constant(self.instance3.bias, 0)
##---------------------------stripe1----------------------------------------------#
##---------------------------stripe1----------------------------------------------#
            self.instance4 = nn.Linear(self.num_features, self.num_classes)
            init.normal(self.instance4.weight, std=0.001)
            init.constant(self.instance4.bias, 0)
##---------------------------stripe1----------------------------------------------#
##---------------------------stripe1----------------------------------------------#
            self.instance5 = nn.Linear(self.num_features, self.num_classes)
            init.normal(self.instance5.weight, std=0.001)
            init.constant(self.instance5.bias, 0)
##---------------------------stripe1----------------------------------------------#
            self.drop = nn.Dropout(self.dropout)

            ## for ce localfeature
            self.local_mask = nn.Conv2d(self.reduce_dim, 6 , kernel_size=1,padding=0,bias=True)
            init.kaiming_normal(self.local_mask.weight, mode= 'fan_out')
            init.constant(self.local_mask.bias,0)

#===================================================================#

        elif not self.cut_at_pooling:
            self.num_features = num_features
            self.norm = norm
            self.dropout = dropout
            self.has_embedding = num_features > 0
            self.num_classes = num_classes

            out_planes = self.base.fc.in_features

            # Append new layers
            if self.has_embedding:
                self.feat = nn.Linear(out_planes, self.num_features, bias=False)
                self.feat_bn = nn.BatchNorm1d(self.num_features)
                init.kaiming_normal(self.feat.weight, mode='fan_out')
#                init.constant(self.feat.bias, 0)
                init.constant(self.feat_bn.weight, 1)
                init.constant(self.feat_bn.bias, 0)
            else:
                # Change the num_features to CNN output channels
                self.num_features = out_planes
            if self.dropout > 0:
                self.drop = nn.Dropout(self.dropout)
            if self.num_classes > 0:
#                self.classifier = nn.Linear(self.num_features, self.num_classes)
                self.classifier = nn.Linear(self.num_features, self.num_classes)
                init.normal(self.classifier.weight, std=0.001)
                init.constant(self.classifier.bias, 0)

        ## for att mask
        self.local_mask_att = nn.Conv2d(1, 6 , kernel_size=1,padding=0,bias=True)
        init.kaiming_normal(self.local_mask_att.weight, mode= 'fan_out')
        init.constant(self.local_mask_att.bias,0)
        if self.mode == 'pc':
            self.u1 = nn.Conv2d(2048,1,1)

        self.fc1 = nn.Linear(1536, 2048)
        self.fc1_l1 = nn.Linear(2048,2048)
        self.S_distance = SinkhornDistance()
        for i in range(self.part):
            name = 'tri' + str(i)
            setattr(self, name, tri_block(256,128))

        # self.triplet = nn.Linear(2048, self.config.dim_triplet_features)
        # init.normal_(self.triplet.weight, std=0.001)
        # init.constant_(self.triplet.bias, 0)

        if not self.pretrained:
            self.reset_params()

    def forward(self, x, targets= None, output= None):
        for name, module in self.base._modules.items():
            if name == 'avgpool':
                break
            x = module(x)

        if self.cut_at_pooling:
            return x
#=======================FCN===============================#
        if self.FCN:
            T = self.T
            ## refined_pooling          
            y = self.drop(x).unsqueeze(1)
            stride = int(2048/self.reduce_dim)
            y = F.avg_pool3d(y,kernel_size=(stride,1,1),stride=(stride,1,1)).squeeze(1)
            center = F.avg_pool2d(y,(y.size(2),y.size(3)))
            y = y-center.expand_as(y)
            local_mask = self.local_mask(y)
            local_mask = F.softmax(T*local_mask)   #using softmax mode

            lw = local_mask.chunk(6,1)
            x = x*6
            f0 = x*(lw[0].expand_as(x))
            f1 = x*(lw[1].expand_as(x))
            f2 = x*(lw[2].expand_as(x))
            f3 = x*(lw[3].expand_as(x))
            f4 = x*(lw[4].expand_as(x))
            f5 = x*(lw[5].expand_as(x))
            f0 = F.avg_pool2d(f0,kernel_size=(f0.size(2),f0.size(3)))  
            f1 = F.avg_pool2d(f1,kernel_size=(f1.size(2),f1.size(3)))  
            f2 = F.avg_pool2d(f2,kernel_size=(f2.size(2),f2.size(3)))  
            f3 = F.avg_pool2d(f3,kernel_size=(f3.size(2),f3.size(3)))  
            f4 = F.avg_pool2d(f4,kernel_size=(f4.size(2),f4.size(3)))  
            f5 = F.avg_pool2d(f5,kernel_size=(f5.size(2),f5.size(3))) 
            x = torch.cat((f0,f1,f2,f3,f4,f5),2)
            feat = torch.cat((f0,f1,f2,f3,f4,f5),2)
            
            attention_before = feat.view(feat.size(0),2048,6,1)
            tri_feat = []

            for i in range(6):
                name = 'tri'+str(i)
                t = getattr(self,name)
                tri_feat.append(t(x[:,:,i,:].squeeze().contiguous().view(x.size(0),-1)))


            out0 = feat/feat.norm(2,1).unsqueeze(1).expand_as(feat)
            
            x = self.drop(x)
            x = self.local_conv(x)

            out1 = x.view(x.size(0),-1)
            out1 = x/x.norm(2,1).unsqueeze(1).expand_as(x)
            
            x = self.feat_bn2d(x)
            out1 = x/x.norm(2,1).unsqueeze(1).expand_as(x)
            x = F.relu(x) # relu for local_conv feature
            x = x.chunk(6,2)
            x0 = x[0].contiguous().view(x[0].size(0),-1)
            x1 = x[1].contiguous().view(x[1].size(0),-1)
            x2 = x[2].contiguous().view(x[2].size(0),-1)
            x3 = x[3].contiguous().view(x[3].size(0),-1)
            x4 = x[4].contiguous().view(x[4].size(0),-1)
            x5 = x[5].contiguous().view(x[5].size(0),-1)
            c0 = self.instance0(x0)
            c1 = self.instance1(x1)
            c2 = self.instance2(x2)
            c3 = self.instance3(x3)
            c4 = self.instance4(x4)
            c5 = self.instance5(x5)
            if output == 'pcb':
                return out0, (c0, c1, c2, c3, c4, c5), local_mask
            global_feat = torch.cat((x0, x1, x2, x3, x4, x5),1)
            tri_feat = torch.stack(tri_feat, 1)
            
            fc1 = self.fc1(global_feat)
            fc1_l1 = self.fc1_l1(fc1)

            att_final = self._compatibility_fn(attention_before, fc1_l1, level=1)
            print(att_final.shape)
            ## we follow the same divition with part_features
            #att_final = att_final*6
            #att_final0 = x*(lw[0].expand_as(att_final))
            #att_final1 = x*(lw[1].expand_as(att_final))
            #att_final2 = x*(lw[2].expand_as(att_final))
            #att_final3 = x*(lw[3].expand_as(att_final))
            #att_final4 = x*(lw[4].expand_as(att_final))
            #att_final5 = x*(lw[5].expand_as(att_final))

            #att_final0 = F.avg_pool2d(att_final0, kernel_size=(att_final0.size(2),att_final0.size(3)))
            #att_final1 = F.avg_pool2d(att_final1, kernel_size=(att_final1.size(2),att_final1.size(3)))
            #att_final2 = F.avg_pool2d(att_final2, kernel_size=(att_final2.size(2),att_final2.size(3)))
            #att_final3 = F.avg_pool2d(att_final3, kernel_size=(att_final3.size(2),att_final3.size(3)))
            #att_final4 = F.avg_pool2d(att_final4, kernel_size=(att_final4.size(2),att_final4.size(3)))
            #att_final5 = F.avg_pool2d(att_final5, kernel_size=(att_final5.size(2),att_final5.size(3)))            
            #att_prob = torch.cat((att_final0,att_final1,att_final2,att_final3,att_final4,att_final5),2)
            
            att_prob = F.normalize(att_final.squeeze())
            att_prob = Variable(F.softmax(att_prob, dim=1), requires_grad = True)

            n = 64
            dist = torch.pow(global_feat, 2).sum(dim=1, keepdim=True).expand(n, n)
            dist = dist + dist.t()
            dist.addmm_(1, -2, global_feat, global_feat.t())
            dist = dist.clamp(min=1e-12).sqrt()
            mask = targets.expand(n, n).eq(targets.expand(n, n).t())
            dist_ap, dist_an = [], []
            Pos_index = [] 
            Neg_index = []
            for i in range(n):
                _, pos_index = torch.max(dist[i][mask[i]==1],0)
                _, neg_index = torch.max(dist[i][mask[i]==0],0)

                if neg_index >= (i//4)*4:
                    neg_index = neg_index+4
                Pos_index.append(pos_index.data+(i//4)*4)
                Neg_index.append(neg_index)
            
            x_pos = tri_feat[Pos_index,:,:]
            x_neg = tri_feat[Neg_index,:,:]
            att_pos = att_prob[Pos_index,:]
            att_neg = att_prob[Neg_index,:]
            dist_ap = self.emd_distance_S(x_pos, tri_feat,att_pos,att_prob)
            dist_an = self.emd_distance_S(x_neg, tri_feat,att_neg,att_prob)

            return out0, (c0,c1,c2,c3,c4,c5), dist_ap, dist_an

#==========================================================#


        x = F.avg_pool2d(x, x.size()[2:])
        x = x.view(x.size(0), -1)
        out1 = x
        out1 = x / x.norm(2,1).unsqueeze(1).expand_as(x)
        if self.has_embedding:
            x = self.feat(x)
            x = self.feat_bn(x)
            out2 = x/ x.norm(2,1).unsqueeze(1).expand_as(x)
        if self.norm:
            x = x / x.norm(2, 1).unsqueeze(1).expand_as(x)
        if self.dropout > 0:
            x = self.drop(x)
        if self.num_classes > 0:
            x = self.classifier(x)


        return out2, x
	

    def reset_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant(m.bias, 0)
    
    def emd_distance_S(self, x, y, att_x, att_y):
        # att_x1 = att_x/att_x.norm(2,1).expand_as(att_x).clamp(min=1e-12)
        # att_y1 = att_y/att_y.norm(2,1).expand_as(att_y).clamp(min=1e-12)
        # att_x1 =  Variable(F.softmax(att_x1.squeeze(0),dim=0), requires_grad = False)
        # att_y1 = Variable(F.softmax(att_y1.squeeze(0), dim=0), requires_grad =False)
        dist_final, _, _ = self.S_distance(x, y, att_x, att_y)
        return dist_final
    
    def _compatibility_fn(self, l, g, level):
        if self.mode == 'dp':
            att = l * g.unsqueeze(2).unsqueeze(3)
            att = att.sum(1).unsqueeze(1)

            size = att.size()
            att = att.view(att.size(0), att.size(1), -1)
            att = F.softmax(att, dim=2)
            att = att.view(size)
        elif self.mode == 'pc':
            att = l + g.unsqueeze(2).unsqueeze(3)

            if level == 1:
                u = self.u1
            elif level == 2:
                u = self.u2
            elif level == 3:
                u = self.u3
            att = u(att)
            
            size = att.size()
            att = att.view(att.size(0), att.size(1), -1)
            att = F.softmax(att, dim=2)
            att = att.view(size)

        return att    


def resnet50_rpp_emd(**kwargs):
    return ResNet(50, **kwargs)
