import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np

def calc_hammingDist(B1, B2):
    q = B2.shape[1]
    if len(B1.shape) < 2:
        B1 = B1.unsqueeze(0)
    distH = 0.5 * (q - B1.mm(B2.transpose(0, 1)))
    return distH

def calc_loss(B, F, G, Sim, opt):
    theta = torch.matmul(F, G.transpose(0, 1)) / 2
    term1 = torch.sum(torch.log(1 + torch.exp(theta)) - Sim * theta)
    term2 = torch.sum(torch.pow(B - F, 2) + torch.pow(B - G, 2))
    term3 = torch.sum(torch.pow(F.sum(dim=0), 2) + torch.pow(G.sum(dim=0), 2))
    loss = term1 + opt.gamma * term2 + opt.alphard * term3
    loss = loss / (opt.batch_size * B.shape[0])
    return loss

def calc_neighbor(opt, label1, label2):
    # calculate the similar matrix
    if opt.use_gpu:
        Sim = (label1.matmul(label2.transpose(0, 1)) > 0).type(torch.cuda.FloatTensor)
    else:
        Sim = (label1.matmul(label2.transpose(0, 1)) > 0).type(torch.FloatTensor)
    return Sim

def calc_map_k(qB, rB, query_L, retrieval_L, k=None):
    '''
    qB: 查询图像的编码。{-1,+1}^{mxq} 𝑚 是查询样本数量，𝑞 是每个样本的编码维度。
    rB: 检索图像的编码。{-1,+1}^{nxq} 其中 𝑛 是检索样本数量。
    query_L: 查询标签。{0,1}^{mxl} 其中 𝑙 是标签的数量。
    retrieval_L: 检索标签。  {0,1}^{nxl}
    k: 计算时使用的前 k 个结果（默认为检索数据数量）。
    '''
    num_query = query_L.shape[0]
    map = 0
    if k is None:
        k = retrieval_L.shape[0] #未指定 𝑘，则将其设置为检索样本的数量 n
    for iter in range(num_query):
        q_L = query_L[iter]
        #检查维度，确保能进行矩阵运算
        if len(q_L.shape) < 2:
            q_L = q_L.unsqueeze(0)
        # 计算 gnd，表示当前查询与所有检索样本之间的相关性
        gnd = (q_L.mm(retrieval_L.transpose(0, 1)) > 0).squeeze().type(torch.float32)
        # 计算 tsum，即与当前查询相关的检索样本数量。
        tsum = torch.sum(gnd)
        if tsum == 0:
            continue
        # 调用 calc_hammingDist 函数计算当前查询的编码 qB[iter, :] 与所有检索编码 rB 之间的汉明距离。
        hamm = calc_hammingDist(qB[iter, :], rB)
        # 对汉明距离进行排序，返回排序后的索引 ind。
        _, ind = torch.sort(hamm)
        ind.squeeze_()
        gnd = gnd[ind]
        total = min(k, int(tsum))
        # 计算当前查询相关的检索样本中，最多考虑的样本数量 total，为 𝑘 和 tsum 中的较小值。
        count = torch.arange(1, total + 1).type(torch.float32)
        tindex = torch.nonzero(gnd)[:total].squeeze().type(torch.float32) + 1.0
        if tindex.is_cuda:
            count = count.cuda()
        map = map + torch.mean(count / tindex)
    map = map / num_query
    return map


def iid(opt, train_label):
    """
    Sample I.I.D. client data from dataset
    """
    num_items = int(len(train_label) / opt.num_users)
    dict_users, all_idxs = {}, [i for i in range(len(train_label))]
    for i in range(opt.num_users):
        dict_users[i] = np.random.choice(all_idxs, num_items, replace=False)
        all_idxs = list(set(all_idxs) - set(dict_users[i]))
    return dict_users

