import torch
import torch.nn as nn
import torch.distributions as dist
import torch.nn.functional as F
import numpy as np
from collections import OrderedDict


def classification_loss_func(prediction, true_labels, ce_temperature=1.0):
    celoss_criterion = nn.CrossEntropyLoss()
    return celoss_criterion(prediction / ce_temperature, true_labels)

def simpleshot(feature, sub):
    sub_feature = feature - sub
    sub_feature = sub_feature / sub_feature.norm(dim=1, p=2, keepdim=True)
    return sub_feature

def l2_feature(sub_feature):
    sub_feature = sub_feature / sub_feature.norm(dim=1, p=2, keepdim=True)
    return sub_feature

def label_smooth_ce_loss(preds, labels, smooth = 0.2):
    len, class_num = preds.size(0), preds.size(1)
    u = torch.full((len, class_num), smooth / (class_num -1)).cuda()
    label = u.scatter_(dim=1, index=labels.view(-1, 1), value=(1 - smooth))
    logp = F.log_softmax(preds, dim=1)
    loss = torch.sum(-logp * label, dim=1).mean()
    return loss

def compute_cluster_center(features, labels, class_num):
    """
    计算特征中心
    :param features: 特征 n*dim
    :param labels: 特征的label n
    :param class_num: 类别数目 class_num
    :return: 特征的每个类的中心 class_num*dim
    """
    feature_dim = features.size(1)
    feature_centers = torch.zeros((class_num, feature_dim)).cuda()
    for i in range(class_num):
        feature_centers[i] = features[labels == i, :].mean(0)
    return feature_centers

def get_na_label(u_learned_feature, u_selected_feature, u_selected_pred):
    dis = u_learned_feature.mm(u_selected_feature.t())
    u_len, select_len = dis.size(0), dis.size(1)
    print(u_len, select_len)
    _, index = torch.topk(dis, 5)
    # print('index.shape', index[:5,:])
    # print('u_selected_pred',u_learned_feature.shape, u_selected_feature.shape, u_selected_pred.shape)

    na_label = torch.ones(u_len).cuda()
    for ss in range(u_len):
        indx = index[ss]
        row_pred = u_selected_pred.index_select(0, indx)
        sum_pred = torch.sum(row_pred, dim=0)
        _, ll = torch.max(sum_pred.reshape(1,-1), dim=1)
        na_label[ss] = ll
    return na_label

def cal_one_loss(source_cluster_center, l_target_learned_feature, l_target_label):
    C, dim = source_cluster_center.shape
    n_samples = l_target_learned_feature.shape[0]

    loss = 0.0
    for i in range(n_samples):
        dis = torch.cosine_similarity(l_target_learned_feature[i].expand(C, dim), source_cluster_center)
        up = dis[l_target_label[i]]
        down = C - 1 - (torch.sum(dis) - up)
        loss += (1 - up) / down
    loss /= n_samples
    return loss


def cal_st_center_loss(source_center, target_center, len_class):
    class_center_distance = torch.zeros(len_class).cuda()
    for i in range(len_class):
        class_center_distance[i] = 1 - torch.cosine_similarity(source_center[i], target_center[i], dim=0)
    return class_center_distance.mean()

def three_alignment_loss(s_cluster_center, t_cluster_center, all_selected_center):
    class_num, feature_dim = s_cluster_center.size(0), s_cluster_center.size(1)

    three_all_distance = torch.zeros(class_num).cuda()
    for i in range(class_num):
        three_all_distance[i] += (1 - torch.cosine_similarity(s_cluster_center[i], all_selected_center[i], dim=0))
        three_all_distance[i] += (1 - torch.cosine_similarity(t_cluster_center[i], all_selected_center[i], dim=0))
        three_all_distance[i] += (1 - torch.cosine_similarity(t_cluster_center[i], s_cluster_center[i], dim=0))
    return three_all_distance.mean()

def three_alignment_loss_mse(s_cluster_center, t_cluster_center, all_selected_center):
    class_num, feature_dim = s_cluster_center.size(0), s_cluster_center.size(1)

    mseloss_critein = nn.MSELoss()
    three_all_distance = torch.zeros(class_num).cuda()
    for i in range(class_num):
        three_all_distance[i] += (1 - torch.cosine_similarity(s_cluster_center[i], all_selected_center[i], dim=0))
        three_all_distance[i] += (1 - torch.cosine_similarity(t_cluster_center[i], all_selected_center[i], dim=0))
        three_all_distance[i] += (1 - torch.cosine_similarity(t_cluster_center[i], s_cluster_center[i], dim=0))
        three_all_distance[i] += mseloss_critein(s_cluster_center[i], all_selected_center[i])
        three_all_distance[i] += mseloss_critein(t_cluster_center[i], all_selected_center[i])
        three_all_distance[i] += mseloss_critein(t_cluster_center[i], s_cluster_center[i])
    return three_all_distance.mean()



def orth_loss(features, labels, nega_loss):
    labels = labels.reshape(-1,1)
    mask_pos = torch.eq(labels, labels.t()).cuda()
    mask_neg = ~ mask_pos
    # s & d calculation
    dot_prod = torch.matmul(features, features.t())
    pos_total = (mask_pos.float() * dot_prod).sum()
    neg_total = torch.abs(mask_neg.float() * dot_prod).sum()
    pos_mean = pos_total / (mask_pos.sum() + 1e-6)
    neg_mean = neg_total / (mask_neg.sum() + 1e-6)
    loss = (1.0 - pos_mean) + nega_loss * neg_mean
    return loss

def cal_git_loss(source_learned_feature, source_label, l_target_learned_feature,
                 l_target_label, source_cluster_center, target_cluster_center):
    """
    计算 gitloss 每个样本到自己的类的中心的距离/到其他类的距离之和
    :param source_learned_feature: 源域
    :param source_label: 源域的label
    :param l_target_learned_feature: 目标域有label
    :param l_target_label: 目标域的label
    :param source_cluster_center: 源域中心
    :param target_cluster_center: 目标域中心
    :return:
    """
    git_loss = 0.0
    s_len = source_learned_feature.size(0)
    l_len = l_target_learned_feature.size(0)
    class_num = source_cluster_center.size(0)
    s_l_len = s_len + l_len
    s_dist = source_learned_feature.mm(source_cluster_center.t())
    l_dist = l_target_learned_feature.mm(target_cluster_center.t())
    s_dist_sum1 = torch.sum(s_dist, dim=1)
    l_dist_sum1 = torch.sum(l_dist, dim=1)
    for ss in range(s_len):
        max_index = int(source_label[ss])
        # print('s_inner_cos_sim', s_dist[ss, max_index])
        # print('s_intra_cos_sim', (s_dist_sum1[ss] - s_dist[ss, max_index]))
        up = 1 - s_dist[ss, max_index]
        down = (class_num - 1) - (s_dist_sum1[ss] - s_dist[ss, max_index])
        git_loss += up / down
    for ll in range(l_len):
        max_index = int(l_target_label[ll])
        # print('l_inner_cos_sim', l_dist[ll, max_index])
        # print('l_intra_cos_sim', (l_dist_sum1[ll] - l_dist[ll, max_index]))
        up = 1 - l_dist[ll, max_index]
        down = (class_num - 1) - (l_dist_sum1[ll] - l_dist[ll, max_index])
        git_loss += up / down
    git_loss = git_loss / s_l_len
    return git_loss

def pred_u_label(u_features, labeled_centers, combine_pred='Cosine'):
    """
    对全部的 unlabeled target 分配伪装标签
    主要用于最后一次的train加入全部unlabeled target 和 测试正确率 的步骤中
    :param u_features:
    :param labeled_centers:
    :param topk:
    :param combine_pred:
    :return:
    """
    n_samples = u_features.shape[0]
    pred = torch.FloatTensor()
    C, dim = labeled_centers.shape
    for i in range(n_samples):
        if combine_pred.find('Euclidean') != -1:
            dis = -torch.sum(torch.pow(u_features[i].expand(C, dim) - labeled_centers, 2), dim=1)
        elif combine_pred.find('Cosine') != -1:
            dis = torch.cosine_similarity(u_features[i].expand(C, dim), labeled_centers)
        if not i:
            pred = dis.reshape(1, -1)  # 1xC
        else:
            pred = torch.cat((pred, dis.reshape(1, -1)), dim=0)

    # prototype_value, prototype_label = torch.max(pred.data, 1)
    return pred


def pred_u_label_02(u_features, labeled_centers, combine_pred='Cosine'):
    """
    对全部的 unlabeled target 分配伪装标签
    主要用于最后一次的train加入全部unlabeled target 和 测试正确率 的步骤中
    :param u_features:
    :param labeled_centers:
    :param topk:
    :param combine_pred:
    :return:
    """
    u_features = l2_feature(u_features)
    labeled_centers = l2_feature(labeled_centers)
    dis = torch.mm(u_features, labeled_centers.t())
    # prototype_value, prototype_label = torch.max(pred.data, 1)
    return dis

def get_decrease_label(u_features, labeled_centers, epoch, combine_pred='Cosine'):
    pred = pred_u_label(u_features, labeled_centers, combine_pred)

    top_k, indices = torch.topk(pred, 2)
    label = indices[:, 0]
    desc = top_k[:, 0] - top_k[:, 1]
    aver = desc.mean()
    select_index = torch.lt(desc, aver)
    label[select_index] = -1
    if epoch % 50 == 0:
        print('select aver, num:', aver.item(), select_index.sum().item())
    return label

def get_decrease_label_pred(u_features, labeled_centers, epoch, combine_pred='Cosine'):
    pred = pred_u_label(u_features, labeled_centers, combine_pred)

    top_k, indices = torch.topk(pred, 2)
    label = indices[:, 0]
    desc = top_k[:, 0] - top_k[:, 1]
    aver = desc.mean()
    select_index = torch.lt(desc, aver)
    label[select_index] = -1
    if epoch % 50 == 0:
        print('select aver, num:', aver.item(), select_index.sum().item())
    return label, pred

def knn_get_label(u_target_learned_feature, u_target_output, u_target_pseudo_label, pre_label, kk=5):
    # u_target_learned_feature = l2_feature(u_target_learned_feature)
    u_target_output = F.softmax(u_target_output,dim=1)

    # print(u_target_output[2])
    select_index = torch.eq(pre_label, u_target_pseudo_label)

    confident_sample = u_target_learned_feature[select_index, :]
    confident_pred = u_target_output[select_index, :]

    dis = u_target_learned_feature.mm(confident_sample.t())
    u_len, select_len = dis.size(0), dis.size(1)
    # print(u_len, select_len)
    maxxx = min(kk, select_len)
    _, index = torch.topk(dis, maxxx)
    # print('index.shape', index[:5,:])
    # print('u_selected_pred',u_learned_feature.shape, u_selected_feature.shape, u_selected_pred.shape)

    na_label = torch.ones(u_len).cuda()
    for ss in range(u_len):
        if pre_label[ss] != -1:
            na_label[ss] = pre_label[ss]
        else:
            indx = index[ss]
            row_pred = confident_pred.index_select(0, indx)
            sum_pred = torch.sum(row_pred, dim=0)
            _, ll = torch.max(sum_pred.reshape(1, -1), dim=1)
            na_label[ss] = ll
    return na_label


def explicit_semantic_alignment_loss_func(source_learned_features, l_target_learned_features,
                                          u_target_learned_features, source_labels, l_target_labels,
                                          u_target_pseudo_labels, configuration, prototype='three'):
    """
    class-level feature alignment: k-th class features of source, target, source-target,
    and calculate MSELOss between each pair
    :param prototype: how many prototypes used for general loss
    :param source_learned_features: source feature
    :param l_target_learned_features:  labeled target feature
    :param u_target_learned_features: unlabeled target feature
    :param source_labels: source groundtruth
    :param l_target_labels: label target groundtruth
    :param u_target_pseudo_labels: unlabeled target pseudo label
    :param configuration:
    :return:
    """
    class_number = configuration['class_number']
    mu_s = OrderedDict()
    mu_t = OrderedDict()

    if prototype == 'two':
        for i in range(class_number):
            mu_s[i] = []
            mu_t[i] = []

        assert source_learned_features.shape[0] == len(source_labels)
        for i in range(source_learned_features.shape[0]):
            mu_s[int(source_labels[i])].append(source_learned_features[i])

        assert l_target_learned_features.shape[0] == len(l_target_labels)
        for i in range(l_target_learned_features.shape[0]):
            mu_t[int(l_target_labels[i])].append(l_target_learned_features[i])

        assert u_target_learned_features.shape[0] == len(u_target_pseudo_labels)
        for i in range(u_target_learned_features.shape[0]):
            mu_t[int(u_target_pseudo_labels[i])].append(u_target_learned_features[i])

        error_general = 0
        mseloss_critein = nn.MSELoss(size_average=False)

        for i in range(class_number):
            mu_s[i] = torch.mean(torch.stack(mu_s[i], 0).float(), 0).float()

            mu_t[i] = torch.mean(torch.stack(mu_t[i], 0).float(), 0).float()

            error_general += mseloss_critein(mu_s[i], mu_t[i])

        return error_general

    elif prototype == 'three':
        mu_st = OrderedDict()

        for i in range(class_number):
            mu_s[i] = []
            mu_t[i] = []
            mu_st[i] = [[], []]

        assert source_learned_features.shape[0] == len(source_labels)
        for i in range(source_learned_features.shape[0]):
            mu_s[int(source_labels[i])].append(source_learned_features[i])
            mu_st[int(source_labels[i])][0].append(source_learned_features[i])

        assert l_target_learned_features.shape[0] == len(l_target_labels)
        for i in range(l_target_learned_features.shape[0]):
            mu_t[int(l_target_labels[i])].append(l_target_learned_features[i])
            mu_st[int(l_target_labels[i])][1].append(l_target_learned_features[i])

        assert u_target_learned_features.shape[0] == len(u_target_pseudo_labels)
        for i in range(u_target_learned_features.shape[0]):
            mu_t[int(u_target_pseudo_labels[i])].append(u_target_learned_features[i])
            mu_st[int(u_target_pseudo_labels[i])][1].append(u_target_learned_features[i])

        error_general = 0
        mseloss_critein = nn.MSELoss(size_average=False)

        for i in range(class_number):
            source_mean = torch.mean(torch.stack(mu_s[i], 0).float(), 0).float()

            target_mean = torch.mean(torch.stack(mu_t[i], 0).float(), 0).float()

            mu_st_numerator = 0
            mu_st_numerator += torch.sum(torch.stack(mu_st[i][0], 0).float(), 0).float()
            mu_st_numerator += torch.sum(torch.stack(mu_st[i][1], 0).float(), 0).float()
            source_target_mean = torch.div(mu_st_numerator, len(mu_st[i][0]) + len(mu_st[i][1]))

            error_general += mseloss_critein(source_mean, target_mean)
            error_general += mseloss_critein(source_mean, source_target_mean)
            error_general += mseloss_critein(target_mean, source_target_mean)

        return error_general


def knowledge_distillation_loss_func(source_predic, source_label, l_target_predic, l_target_label, args):
    """
        semantic-level alignment: source prediction, target prediction, source label, target label
        q: soft label for class k is the average over the softmax of all activations of source example in class k
        p: each labeled target smaple softmax output with temperature (T>1)
        :param args: temperature parameter
        :param source_predic: source output
        :param source_label:
        :param l_target_predic: labeled target output
        :param l_target_label: labeled target label
        :return: implicit semantic-level alignment loss
        """
    if args.alpha == 1.0:
        return classification_loss_func(l_target_predic, l_target_label), \
               torch.Tensor([0.])[0], torch.Tensor([0.])[0]

    assert source_predic.shape[1] == l_target_predic.shape[1]
    class_num = source_predic.shape[1]
    k_categories = torch.zeros((class_num, class_num))
    source_softmax = F.softmax(source_predic / args.temperature)
    l_target_softmax = F.softmax(l_target_predic)
    soft_loss = 0

    for k in range(class_num):
        k_source_softmax = source_softmax.index_select(dim=0, index=(source_label == k).nonzero().reshape(-1, ))
        k_categories[k] = torch.mean(k_source_softmax, dim=0)

    if torch.cuda.is_available():
        k_categories = k_categories.cuda()

    for k in range(class_num):
        k_l_target_softmax = l_target_softmax.index_select(dim=0, index=(l_target_label == k).nonzero().reshape(-1, ))
        soft_loss -= torch.mean(torch.sum(k_categories[k] * torch.log(k_l_target_softmax + 1e-5), 1))

    hard_loss = classification_loss_func(l_target_predic, l_target_label)
    loss = (1 - args.alpha) * hard_loss + args.alpha * soft_loss
    return loss, (1 - args.alpha) * hard_loss, args.alpha * soft_loss


def get_prototype_label(source_learned_features, l_target_learned_features, u_target_learned_features, source_labels,
                        l_target_labels, configuration, combine_pred, epoch):
    """
    get unlabeled target prototype label
    :param epoch: training epoch
    :param combine_pred: Euclidean, Cosine
    :param configuration: dataset configuration
    :param source_learned_features: source feature
    :param l_target_learned_features:  labeled target feature
    :param u_target_learned_features:  unlabeled target feature
    :param source_labels: source labels
    :param l_target_labels: labeled target labels
    :return: unlabeled target prototype label
    """
    def prototype_softmax(features, feature_centers):
        assert features.shape[1] == feature_centers.shape[1]
        n_samples = features.shape[0]
        C, dim = feature_centers.shape
        pred = torch.FloatTensor()
        for i in range(n_samples):
            if combine_pred.find('Euclidean') != -1:
                dis = -torch.sum(torch.pow(features[i].expand(C, dim) - feature_centers, 2), dim=1)
            elif combine_pred.find('Cosine') != -1:
                dis = torch.cosine_similarity(features[i].expand(C, dim), feature_centers)
            if not i:
                pred = dis.reshape(1, -1)
            else:
                pred = torch.cat((pred, dis.reshape(1, -1)), dim=0)
        return pred

    assert source_learned_features.shape[1] == u_target_learned_features.shape[1]
    class_num = configuration['class_number']
    feature_dim = source_learned_features.shape[1]
    feature_centers = torch.zeros((class_num, feature_dim))
    for k in range(class_num):
        # calculate feature center of each class for source and target
        k_source_feature = source_learned_features.index_select(dim=0,
                                                                index=(source_labels == k).nonzero().reshape(-1, ))
        k_l_target_feature = l_target_learned_features.index_select(dim=0, index=(
                l_target_labels == k).nonzero().reshape(-1, ))
        feature_centers[k] = torch.mean(torch.cat((k_source_feature, k_l_target_feature), dim=0), dim=0)

    if torch.cuda.is_available():
        feature_centers = feature_centers.cuda()

    # assign 'pseudo label' by Euclidean distance or Cosine similarity between feature and prototype,
    # select the most confident samples in each pseudo class, not confident label=-1
    prototype_pred = prototype_softmax(u_target_learned_features, feature_centers)
    prototype_value, prototype_label = torch.max(prototype_pred.data, 1)

    # add threshold
    if combine_pred.find('threshold') != -1:
        if combine_pred == 'Euclidean_threshold':
            # threshold for Euclidean distance
            select_threshold = 0.2
        elif combine_pred == 'Cosine_threshold':
            # Ref: Progressive Feature Alignment for Unsupervised Domain Adaptation CVPR2019
            select_threshold = 1. / (1 + np.exp(-0.8 * (epoch + 1))) - 0.01
            # select_threshold = 0.1
        prototype_label[(prototype_value < select_threshold).nonzero()] = -1

    return prototype_label


def k_means(target_features, target, c):
    eps = 1e-6
    cluster_iter = 5
    class_num = c.size(0)
    c_tar = c.data.clone()
    print('target.shape', target.shape)
    for itr in range(cluster_iter):
        dist_xt_ct_temp = target_features.unsqueeze(1) - c_tar.unsqueeze(0)
        dist_xt_ct = dist_xt_ct_temp.pow(2).sum(2)
        _, idx_sim = (-1 * dist_xt_ct).data.topk(1, 1, True, True)
        print('idx_sim.shape',idx_sim.shape)
        n_correct = (idx_sim.reshape(-1) == target).sum().item()
        acc = float(n_correct) / len(target) * 100.
        print('acc', acc)
        c_tar_temp = torch.cuda.FloatTensor(class_num, c_tar.size(1)).fill_(0)
        count = torch.cuda.FloatTensor(class_num, 1).fill_(0)
        for k in range(class_num):
            c_tar_temp[k] += target_features[idx_sim.squeeze(1) == k].sum(0)
            count[k] += (idx_sim.squeeze(1) == k).float().sum()
        c_tar_temp /= (count + eps)
        c_tar = c_tar_temp.clone()
        del dist_xt_ct_temp
        torch.cuda.empty_cache()
    return c_tar, idx_sim.reshape(-1)

def k_means(target_features, target, c):
    eps = 1e-6
    cluster_iter = 5
    class_num = c.size(0)
    c_tar = c.data.clone()
    print('target.shape', target.shape)
    for itr in range(cluster_iter):
        dist_xt_ct_temp = target_features.unsqueeze(1) - c_tar.unsqueeze(0)
        dist_xt_ct = dist_xt_ct_temp.pow(2).sum(2)
        _, idx_sim = (-1 * dist_xt_ct).data.topk(1, 1, True, True)
        print('idx_sim.shape',idx_sim.shape)
        n_correct = (idx_sim.reshape(-1) == target).sum().item()
        acc = float(n_correct) / len(target) * 100.
        print('acc', acc)
        c_tar_temp = torch.cuda.FloatTensor(class_num, c_tar.size(1)).fill_(0)
        count = torch.cuda.FloatTensor(class_num, 1).fill_(0)
        for k in range(class_num):
            c_tar_temp[k] += target_features[idx_sim.squeeze(1) == k].sum(0)
            count[k] += (idx_sim.squeeze(1) == k).float().sum()
        c_tar_temp /= (count + eps)
        c_tar = c_tar_temp.clone()
        del dist_xt_ct_temp
        torch.cuda.empty_cache()
    return c_tar, idx_sim.reshape(-1)

def k_means_cos(target_features, target, c):
    target_features = l2_feature(target_features)
    c = l2_feature(c)
    eps = 1e-6
    cluster_iter = 5
    class_num = c.size(0)
    c_tar = c.data.clone()
    print('target.shape', target.shape)
    for itr in range(cluster_iter):
        c = l2_feature(c_tar)
        dist_xt_ct = torch.mm(target_features, c.t())
        _, idx_sim = dist_xt_ct.data.topk(1, 1, True, True)
        print('idx_sim.shape',idx_sim.shape)
        n_correct = (idx_sim.reshape(-1) == target).sum().item()
        acc = float(n_correct) / len(target) * 100.
        print('acc', acc)
        c_tar_temp = torch.cuda.FloatTensor(class_num, c_tar.size(1)).fill_(0)
        count = torch.cuda.FloatTensor(class_num, 1).fill_(0)
        for k in range(class_num):
            c_tar_temp[k] += target_features[idx_sim.squeeze(1) == k].sum(0)
            count[k] += (idx_sim.squeeze(1) == k).float().sum()
        c_tar_temp /= (count + eps)
        c_tar = c_tar_temp.clone()

        torch.cuda.empty_cache()
    return c_tar, idx_sim.reshape(-1)


def nn_get_label(u_target_feature, l_target_feature, l_target_label):
    u_target_feature = l2_feature(u_target_feature)
    l_target_feature = l2_feature(l_target_feature)

    u_l_dis = torch.mm(u_target_feature, )


def guassian_kernel(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
    n_samples = int(source.size()[0])+int(target.size()[0]) # s_number + t_number
    total = torch.cat([source, target], dim=0) # (s+t) x dim
    total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0)), int(total.size(1))) # (s+t) x (s+t)1 x dim
    total1 = total.unsqueeze(1).expand(int(total.size(0)), int(total.size(0)), int(total.size(1))) # (s+t)1 x (s+t) x dim
    L2_distance = ((total0-total1)**2).sum(2) # n x n (distance matrix)
    if fix_sigma:
        bandwidth = fix_sigma
    else:
        bandwidth = torch.sum(L2_distance.data) / (n_samples**2-n_samples)
    bandwidth /= kernel_mul ** (kernel_num // 2) # 2.0 ** (5 //2)= 2^2 = 4
    bandwidth_list = [bandwidth * (kernel_mul**i) for i in range(kernel_num)] # 4 * 2.0 ^ i for i in range(5)
    kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]
    return sum(kernel_val) #/len(kernel_val)

def CMMD_2(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None): # dan loss
    s_batch_size = int(source.size()[0])
    t_batch_size = int(target.size()[0])
    st_batch_size = s_batch_size + t_batch_size
    # print(batch_size)
    kernels = guassian_kernel(source, target,
        kernel_mul=kernel_mul, kernel_num=kernel_num, fix_sigma=fix_sigma)
    # print(kernels)
    XX = kernels[:s_batch_size, : s_batch_size]
    YY = kernels[s_batch_size:, s_batch_size:]
    XY = kernels[:s_batch_size, s_batch_size:]
    YX = kernels[s_batch_size:, :s_batch_size]
    # loss = torch.mean(XX + YY - XY - YX)
    loss = torch.sum(XX)/(s_batch_size*s_batch_size) + torch.sum(YY)/(t_batch_size * t_batch_size) \
           - torch.sum(XY)/(s_batch_size*t_batch_size) - torch.sum(YX)/(s_batch_size*t_batch_size)
    return loss


def guassian_kernel1(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
    """计算Gram/核矩阵
    source: sample_size_1 * feature_size 的数据
    target: sample_size_2 * feature_size 的数据
    kernel_mul: 这个概念不太清楚，感觉也是为了计算每个核的bandwith
    kernel_num: 表示的是多核的数量
    fix_sigma: 表示是否使用固定的标准差

		return: (sample_size_1 + sample_size_2) * (sample_size_1 + sample_size_2)的
						矩阵，表达形式:
						[	K_ss K_st
							K_ts K_tt ]
    """
    n_samples = int(source.size()[0]) + int(target.size()[0])
    total = torch.cat([source, target], dim=0)  # 合并在一起

    total0 = total.unsqueeze(0).expand(int(total.size(0)), \
                                       int(total.size(0)), \
                                       int(total.size(1)))
    total1 = total.unsqueeze(1).expand(int(total.size(0)), \
                                       int(total.size(0)), \
                                       int(total.size(1)))
    L2_distance = ((total0 - total1) ** 2).sum(2)  # 计算高斯核中的|x-y|

    # 计算多核中每个核的bandwidth
    if fix_sigma:
        bandwidth = fix_sigma
    else:
        bandwidth = torch.sum(L2_distance.data) / (n_samples ** 2 - n_samples)
    bandwidth /= kernel_mul ** (kernel_num // 2)
    bandwidth_list = [bandwidth * (kernel_mul ** i) for i in range(kernel_num)]

    # 高斯核的公式，exp(-|x-y|/bandwith)
    kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for \
                  bandwidth_temp in bandwidth_list]

    return sum(kernel_val)  # 将多个核合并在一起


def mmd(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
    s_batch_size = int(source.size()[0])
    t_batch_size = int(target.size()[0])
    batch_size = int(source.size()[0])
    kernels = guassian_kernel1(source, target,
                              kernel_mul=kernel_mul,
                              kernel_num=kernel_num,
                              fix_sigma=fix_sigma)
    XX = kernels[:batch_size, :batch_size]  # Source<->Source
    YY = kernels[batch_size:, batch_size:]  # Target<->Target
    XY = kernels[:batch_size, batch_size:]  # Source<->Target
    YX = kernels[batch_size:, :batch_size]  # Target<->Source
    loss = torch.sum(XX)/(s_batch_size*s_batch_size) + torch.sum(YY)/(t_batch_size * t_batch_size) \
           - torch.sum(XY)/(s_batch_size*t_batch_size) - torch.sum(YX)/(s_batch_size*t_batch_size)
    return loss

def cal_bya_loss(a, b):
    a = F.softmax(a, dim=1)
    b = F.softmax(b, dim=1)
    a = torch.sqrt(a)
    b = torch.sqrt(b)
    res = torch.mm(a, b.t())
    sel = torch.ge(res, 0.99)
    ss = res[sel].reshape(-1)
    loss = torch.sum(1 - ss)
    # print(ss.size(), a.shape[0] * b.shape[0])
    return loss / (a.shape[0] * b.shape[0])

def cal_bya_loss_2(a_feature, b_feature, a, b):
    aa = l2_feature(a_feature)
    bb = l2_feature(b_feature)
    tmp = torch.mm(aa, bb.t())
    sel = torch.ge(tmp, 0.7)

    a = F.softmax(a, dim=1)
    b = F.softmax(b, dim=1)
    a = torch.sqrt(a)
    b = torch.sqrt(b)
    res = torch.mm(a, b.t())

    ss = res[sel].reshape(-1)
    loss = torch.sum(1 - ss)
    # print(ss.size(), a.shape[0] * b.shape[0])
    return loss / (a.shape[0] * b.shape[0])