import torch
from torch.autograd import Variable
def global_loss(tri_loss,global_feat,labels,normalize_feature=True):
    '''
    Args:
    :param tri_loss: tri_loss: a 'TripletLoss' object
    :param global_feat: global_feat: pytorch Variable, shape[N,C]
    :param labels:  pytorch LongTensor, shape [N]
    :param normalize_feature:  whether to normalize feature to unit length along the channel dimension
    :return:
    loss:pytorch Variable, shape [1]
    p_inds: pytorch LongTensor, shape [N], indices of selected hard positive samples; 0<=p_inds[i]<=N-1
    n_inds: pytorch LongTensor, shape [N], indices of selected hard negative samples; 0<=n_inds[i]<=N-1
        =============
    For Debugging
    =============
    dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
    dist_an: pytorch Variable, distance(anchor, negative); shape [N]
    ===================
    For Mutual Learning
    ===================
    dist_mat: pytorch Variable, pairwise euclidean distance; shape [N, N]
    '''
    if normalize_feature:
        global_feat = normalize_global(global_feat, axis=-1)
    # shape[N,N]
    dist_mat = euclidean_dist(global_feat, global_feat)
    dist_ap, dist_an, p_inds, n_inds = hard_example_mining(dist_mat, labels, return_inds=True)
    loss = tri_loss(dist_ap,dist_an)
    return loss,p_inds,n_inds,dist_ap,dist_an,dist_mat
def spatial_loss(tri_loss,spatial_feat,p_inds=None,n_inds=None,labels=None,normalize_feature=True):
    '''If hard samples are specified by p_inds and n_inds, then labels is not used. Otherwise, spatial distalce finds its own hard samples independent of global distance.
    '''
    if normalize_feature:
        spatial_feat = normalize_spatital(spatial_feat, axis=-1)
    if p_inds is None or n_inds is None:
        dist_mat = spatial_dist(spatial_feat,spatial_feat)
        dist_ap, dist_an = hard_example_mining(dist_mat,labels,return_inds=False)
        loss = tri_loss(dist_ap,dist_an)
        return loss, dist_ap, dist_an, dist_mat
    else:
        dist_mat = spatial_dist(spatial_feat,spatial_feat)
        dist_ap, dist_an = DCR_L(spatial_feat, spatial_feat, p_inds, n_inds)
        loss = tri_loss(dist_ap,dist_an)
        # return loss, dist_ap,dist_an
        return loss, dist_ap, dist_an, dist_mat

def normalize_global(x, axis=-1):
    '''
    Normalizing to unit length along the specified dimension.
    :param x: pytorch Variable
    :param axis:
    :return: x: pytorch Variable, same shape as input.
    '''
    x = 1. * x / (torch.norm(x,2,axis,keepdim=True).expand_as(x)+1e-12)
    return x
def normalize_spatital(x, axis=-1):
  """Normalizing to unit length along the specified dimension.
  Args:
    x: pytorch Variable
  Returns:
    x: pytorch Variable, same shape as input
  """
  y = Variable(torch.zeros(x.size()))
  y = y.cuda()
  for i in range(0, x.size(0)):
    temp = x[i,::]
    temp = temp.t()
    temp = 1. * temp / (torch.norm(temp, 2, axis, keepdim=True).expand_as(temp) + 1e-12)
    y[i,::] = temp.t()
  return y

def euclidean_dist(x,y):
    '''
    Args:
    :param x:  pytorch Variable, with shape [m,d]
    :param y:  pytorch Variable, with shape [n,d]
    :return:
    dist: pytorch Variable, with shape[m,n]
    '''
    m,n = x.size(0),y.size(0)
    xx = torch.pow(x,2).sum(1,keepdim=True).expand(m,n)
    yy = torch.pow(y,2).sum(1,keepdim=True).expand(n,m).t()
    dist = xx+yy
    dist.addmm_(1,-2,x,y.t())
    dist = dist.clamp(min=1e-12).sqrt() #for numerical stability
    return dist
def spatial_dist(x,y):
    '''
    Args:
    :param x: pytorch Variable,with shape[m,d]
    :param y: pytorch Variable,with shape[n,d]
    :return:
    dist: pytorch Variable, with shape[m,n]
    '''
    m,n = x.size(0),y.size(0)
    kappa = 1e-3
    dist = Variable(torch.zeros(m,n))
    dist = dist.cuda()
    T = kappa*Variable(torch.eye(x.size(2)))
    T = T.cuda()
    T.detach()
    for i in range(m):
        Proj_M = torch.matmul(torch.inverse(torch.matmul(x[i,::].t(),x[i,::])+T),x[i,::].t())
        Proj_M.detach()
        for j in range(0, n):
            w = torch.matmul(Proj_M, y[j,::])
            w.detach()
            a = torch.matmul(x[i,::],w)-y[j,::]
            dist[i,j] = torch.pow(a,2).sum(0).sqrt().mean()
    return dist

def DCR_L(x, y, p_inds, n_inds):
  """
  Args:
    x: pytorch Variable, with shape [m, d]
    y: pytorch Variable, with shape [n, d]
  Returns:
    dist: pytorch Variable, with shape [m, n]
  """
  #start = time.time()
  m = y.size(0)
  kappa = 0.001
  dist_p = Variable(torch.zeros(m, 1))
  dist_n = Variable(torch.zeros(m, 1))
  dist_p = dist_p.cuda()
  dist_n = dist_n.cuda()
  T = kappa * Variable(torch.eye(x.size(2)))
  T = T.cuda()
  T.detach()
  for i in range(0, m):
    Proj_M1 = torch.matmul(torch.inverse(torch.matmul(x[p_inds[i],:,:].t(), x[p_inds[i],:,:])+T), x[p_inds[i],:,:].t())
    Proj_M1.detach()
    Proj_M2 = torch.matmul(torch.inverse(torch.matmul(x[n_inds[i],:,:].t(), x[n_inds[i],:,:])+T), x[n_inds[i],:,:].t())
    Proj_M2.detach()
    w1 = torch.matmul(Proj_M1, y[i,::])
    w1.detach()
    w2 = torch.matmul(Proj_M2, y[i,::])
    w2.detach()
    a1 = torch.matmul(x[p_inds[i],:,:], w1) - y[i,::]
    a2 = torch.matmul(x[n_inds[i], :, :], w2) - y[i, ::]
    dist_p[i, 0] = torch.pow(a1,2).sum(0).sqrt().mean()
    dist_n[i, 0] = torch.pow(a2, 2).sum(0).sqrt().mean()
  dist_n = dist_n.squeeze(1)
  dist_p = dist_p.squeeze(1)
  return dist_n, dist_p

def hard_example_mining(dist_mat, labels, return_inds=False):
    '''For each anchor, find the hardest positive and negative sample.
    :param dist_mat:  pytorch Variable, pair wise distance between samples, shape[N,N]
    :param labels: pytorch LongTensor, shape[N]
    :param return_inds:whether to return the indices.
    :return:
    dist_ap:pytorch Variable, distance(anchor, positive); shape[N]
    dist_an:pytorch Variable, distance(anchor, negative); shape[N]
    p_inds: pythorch LongTensor,shape[N] indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
    n_inds:pytorch LongTensor, shape[N] indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
    Note:
    Only consider the case in which all labels have same num of samples, thus we can cope with all anchors in parallel.
    '''
    assert len(dist_mat.size()) == 2
    assert dist_mat.size(0) == dist_mat.size(1)
    N = dist_mat.size(0)
    #shape [N,N]
    is_pos = labels.expand(N,N).eq(labels.expand(N,N).t())
    is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
    # print("number:{}".format(len(torch.range())))
    # both dist_ap, relative_p_inds with shape[N,1]
    # print('\n is_pos:{}, N:{}, dist_mat:{}'.format(is_pos.size(), N, dist_mat.size()))
    dist_ap, relative_p_inds = torch.max(dist_mat[is_pos].contiguous().view(N,-1), 1, keepdim=True)
    # both dist_an, relative_n_inds with shape[N,1]
    dist_an, relative_n_inds = torch.min(dist_mat[is_neg].contiguous().view(N,-1), 1, keepdim=True)
    # shape [N]
    dist_ap = dist_ap.squeeze(1)
    dist_an = dist_an.squeeze(1)
    if return_inds:
        # shape[N,N]
        ind = (labels.new().resize_as_(labels).copy_(torch.arange(0,N).long()).unsqueeze(0).expand(N,N))
        # shape [N,1]
        p_inds = torch.gather(ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data)
        n_inds = torch.gather(ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data)
        #shape [N]
        p_inds = p_inds.squeeze(1)
        n_inds = n_inds.squeeze(1)
        return dist_ap, dist_an, p_inds, n_inds
    return dist_ap, dist_an
