import torch
from data_handler import load_data
from sampling import iid, noniid, noniid_unequal
import numpy as np
from tqdm import tqdm
import os

def get_dataset(opt, n_list, k_list):
    images, tags, labels = load_data(opt.dataset, opt.num_class2, opt.data_path)
    X, Y, L = split_data(opt, images, tags, labels)  #字典，包含查询、训练、检索库

    # sample training data amongst users
    if opt.iid:
        # sample IID user data from dataset
        user_groups = iid(opt, L['train'])
    else:
        # sample Non-IID user data from dataset
        if opt.unequal:
            # choose unequal splits for every user
            user_groups = noniid_unequal(opt, L['train'])
        else:
            # choose equal splits for every user
            user_groups, classes_list = noniid(opt, L['train'], n_list, k_list)

    return X, Y, L, user_groups


def calc_hammingDist(B1, B2):
    q = B2.shape[1]
    if len(B1.shape) < 2:
        B1 = B1.unsqueeze(0)
    distH = 0.5 * (q - B1.mm(B2.transpose(0, 1)))
    return distH


def calc_map_k(qB, rB, query_L, retrieval_L, k=None):
    # qB: {-1,+1}^{mxq}
    # rB: {-1,+1}^{nxq}
    # query_L: {0,1}^{mxl}
    # retrieval_L: {0,1}^{nxl}
    num_query = query_L.shape[0]
    map = 0
    if k is None:
        k = retrieval_L.shape[0]
    for iter in range(num_query):
        q_L = query_L[iter]
        if len(q_L.shape) < 2:
            q_L = q_L.unsqueeze(0)
        gnd = (q_L.mm(retrieval_L.transpose(0, 1)) > 0).squeeze().type(torch.float32) #通过矩阵乘法计算当前查询样本和检索库样本之间的标签相关性，gnd 是一个布尔向量，标识哪些检索样本与查询样本相关。
        tsum = torch.sum(gnd) #tsum 是相关样本的总数量。如果为 0，跳过该查询样本。
        if tsum == 0:
            continue
        hamm = calc_hammingDist(qB[iter, :], rB) # 使用哈希码计算当前查询样本与检索库样本之间的 Hamming 距离。
        _, ind = torch.sort(hamm)
        ind.squeeze_()
        gnd = gnd.to(ind.device)
        gnd = gnd[ind] #按照排序后的索引重新排列相关性标签 gnd。
        total = min(k, int(tsum))
        count = torch.arange(1, total + 1).type(torch.float32)
        tindex = torch.nonzero(gnd, as_tuple=False)[:total].squeeze().type(torch.float32) + 1.0
        if tindex.is_cuda:
            count = count.cuda()
        map = map + torch.mean(count / tindex)
    map = map / num_query
    return map

def cacl_FedCMR_weight(opt, user_groups, train_L):
    # 计算所有客户端的总数据量
    total_data_size = sum(len(user_groups[i]) for i in range(len(user_groups)))

    # 计算所有客户端的总类别数
    total_class_size = sum(len(np.unique(np.argmax(train_L[user_groups[i]], axis=1))) for i in range(opt.num_users))

    # 计算每个客户端的权重
    weights = [((len(user_groups[i]) / total_data_size) * (len(np.unique(np.argmax(train_L[user_groups[i]], axis=1))) / total_class_size)) for i in range(opt.num_users)]

    return weights

def split_data(opt, images, tags, labels):
    X = {}
    X['query'] = images[0: opt.query_size]
    X['train'] = images[opt.query_size: opt.training_size + opt.query_size]
    X['retrieval'] = images[opt.query_size: opt.query_size + opt.database_size]

    Y = {}
    Y['query'] = tags[0: opt.query_size]
    Y['train'] = tags[opt.query_size: opt.training_size + opt.query_size]
    Y['retrieval'] = tags[opt.query_size: opt.query_size + opt.database_size]

    L = {}
    L['query'] = labels[0: opt.query_size]
    L['train'] = labels[opt.query_size: opt.training_size + opt.query_size]
    L['retrieval'] = labels[opt.query_size: opt.query_size + opt.database_size]

    return X, Y, L


def agg_func(protos):
    """
    Returns the average of the weights.
    """

    for [label, proto_list] in protos.items():
        if len(proto_list) > 1:
            proto = 0 * proto_list[0].data
            for i in proto_list:
                proto += i.data
            protos[label] = proto / len(proto_list)
        else:
            protos[label] = proto_list[0]

    return protos

def avg_model(img_client_model_params_list, txt_client_model_params_list, user_groups):
    # 计算所有客户端的总数据量
    total_data_size = sum(len(user_groups[i]) for i in range(len(user_groups)))

    # 获取客户端模型参数的数量
    num_img_params = len(img_client_model_params_list[0])
    num_txt_params = len(txt_client_model_params_list[0])

    # 初始化全局模型参数（通过从第一个客户端获取的模型参数）
    avg_img_model_params = [torch.zeros_like(param) for param in img_client_model_params_list[0]]
    avg_txt_model_params = [torch.zeros_like(param) for param in txt_client_model_params_list[0]]

    # 对每个参数进行加权聚合
    for i in range(len(img_client_model_params_list)):
        # 计算每个客户端的权重
        local_data_size = len(user_groups[i])  # 每个客户端的本地数据量
        client_weight = local_data_size / total_data_size  # 每个客户端的权重

        # 对图像模型参数加权
        for j in range(num_img_params):
            avg_img_model_params[j] += client_weight * img_client_model_params_list[i][j]

        # 对文本模型参数加权
        for j in range(num_txt_params):
            avg_txt_model_params[j] += client_weight * txt_client_model_params_list[i][j]

    return avg_img_model_params, avg_txt_model_params


def FedCMR_agg_model(img_client_model_params_list, txt_client_model_params_list, client_weights, local_losses, opt):
    """
    FedCMR 聚合函数

    Args:
        img_client_model_params_list: 客户端图像模型参数列表
        txt_client_model_params_list: 客户端文本模型参数列表
        client_weights: Sk，客户端数据权重（已归一化）
        local_losses: fk，客户端的本地损失值
        opt: 包含超参数 alpha 的选项

    Returns:
        聚合后的图像模型参数和文本模型参数
    """
    alpha = opt.q_alpha  # 超参数 alpha
    num_clients = len(img_client_model_params_list)

    # 转为 PyTorch 张量
    client_weights = torch.tensor(client_weights, dtype=torch.float32)
    local_losses = torch.tensor(local_losses, dtype=torch.float32)

    # Step 1: 计算 Gompertz 函数 Vk
    loss_mean = torch.mean(local_losses)
    Vk = torch.exp(-torch.exp(local_losses / loss_mean))  # Gompertz function

    # Step 2: 计算 qk
    exp_weights = torch.exp(client_weights + alpha * Vk)
    qk = exp_weights / torch.sum(exp_weights)  # Softmax 归一化

    # Step 3: 聚合模型参数
    num_img_params = len(img_client_model_params_list[0])
    num_txt_params = len(txt_client_model_params_list[0])

    # 初始化全局模型参数
    agg_img_params = [torch.zeros_like(param) for param in img_client_model_params_list[0]]
    agg_txt_params = [torch.zeros_like(param) for param in txt_client_model_params_list[0]]

    # 聚合图像和文本模型参数
    for i, weight in enumerate(qk):
        for j in range(num_img_params):
            agg_img_params[j] += weight * img_client_model_params_list[i][j]
        for j in range(num_txt_params):
            agg_txt_params[j] += weight * txt_client_model_params_list[i][j]

    return agg_img_params, agg_txt_params

def FedCMR_local_update(diff_params, global_params, beta):
    """
       根据公式 result = diff * beta + global 更新模型参数。

       Args:
           diff_params (list[torch.Tensor]): 当前客户端的模型参数差异（例如，局部更新后的参数 - 全局参数）。
           global_params (list[torch.Tensor]): 当前全局模型的参数。
           beta (float): 缩放系数。

       Returns:
           list[torch.Tensor]: 更新后的模型参数。
       """
    # 初始化更新后的参数
    updated_params = []

    # 遍历每个参数张量
    for diff, global_param in zip(diff_params, global_params):
        # 按公式更新参数
        updated_param = beta * diff + global_param
        updated_params.append(updated_param)

    return updated_params

def proto_aggregation(local_protos_list):
    agg_protos_label = dict()
    for idx in local_protos_list:
        local_protos = local_protos_list[idx]
        for label in local_protos.keys():
            if label in agg_protos_label:
                agg_protos_label[label].append(local_protos[label])
            else:
                agg_protos_label[label] = [local_protos[label]]

    for [label, proto_list] in agg_protos_label.items():
        if len(proto_list) > 1:
            proto = 0 * proto_list[0].data
            for i in proto_list:
                proto += i.data
            agg_protos_label[label] = [proto / len(proto_list)]
        else:
            agg_protos_label[label] = [proto_list[0].data]

    return agg_protos_label

def calc_PLFedCMH_loss(B, F, G, Y, train_L, loss_img_proto, loss_txt_proto, opt):
    #计算公式2的第一项和第二项
    term1 = torch.sum(torch.pow(opt.bit * train_L - torch.matmul(F, Y.t()), 2) + torch.pow(opt.bit * train_L - torch.matmul(G, Y.t()),2))

    #计算公式2的第五项和第六项
    term2 = opt.gamma * torch.sum(torch.pow(B - F, 2) + torch.pow(B - G, 2))

    loss = term1 + term2

    #这是公式2的第三项和第四项
    loss_proto = loss_img_proto + loss_txt_proto
    loss += opt.eta * loss_proto

    return loss, loss_proto

def calc_SHDCH_loss(B, F, G, Y1, Y2, S, L1, L2, opt):
    # 计算01的第一层损失
    term1 = opt.alpha * torch.sum(torch.pow(opt.bit * L1 - torch.matmul(F, Y1.t()), 2) + torch.pow(opt.bit * L1 - torch.matmul(G, Y1.t()),2))

    # 计算01的第二层损失
    term2 = opt.beta * torch.sum(torch.pow(opt.bit * L2 - torch.matmul(F, Y2.t()), 2) + torch.pow(opt.bit * L2 - torch.matmul(G, Y2.t()), 2))

    # 计算O2
    term3 = opt.eta * torch.sum(torch.pow(opt.bit * S - torch.matmul(Y1, Y2.t()), 2))

    # 计算01的量化损失
    term4 = opt.gamma * torch.sum(torch.pow(B - F, 2) + torch.pow(B - G, 2))

    loss = term1 + term2 + term3 + term4

    return loss


def generate_image_code(opt,img_model, X, bit):
    batch_size = opt.batch_size
    num_data = X.shape[0]
    index = np.linspace(0, num_data - 1, num_data).astype(int)
    B = torch.zeros(num_data, bit, dtype=torch.float)
    if opt.use_gpu:
        B = B.cuda()
    for i in tqdm(range(num_data // batch_size + 1)):
        ind = index[i * batch_size: min((i + 1) * batch_size, num_data)]
        if len(ind) == 0:
            break
        image = X[ind].type(torch.float)
        if opt.use_gpu:
            image = image.cuda()
        cur_f, _ = img_model(image)
        B[ind, :] = cur_f.data
    B = torch.sign(B)
    return B


def generate_text_code(opt,txt_model, Y, bit):
    batch_size = opt.batch_size
    num_data = Y.shape[0]
    index = np.linspace(0, num_data - 1, num_data).astype(int)
    B = torch.zeros(num_data, bit, dtype=torch.float)
    if opt.use_gpu:
        B = B.cuda()
    for i in tqdm(range(num_data // batch_size + 1)):
        ind = index[i * batch_size: min((i + 1) * batch_size, num_data)]
        if len(ind) == 0:
            break
        text = Y[ind].unsqueeze(1).unsqueeze(-1).type(torch.float)
        if opt.use_gpu:
            text = text.cuda()
        cur_g, _ = txt_model(text)
        B[ind, :] = cur_g.data
    B = torch.sign(B)
    return B


def write_result(recode_path,result):
    with open(os.path.join(recode_path,'result.txt'), 'w') as f:
        for k, v in result.items():
            f.write(k + ' ' + str(v) + '\n')

def update_hypernetwork(hypernet, client_model_params_list, client_id, diff, opt):
    # calculate gradients
    hn_grads = torch.autograd.grad(
        outputs=list(client_model_params_list[client_id]),
        inputs=hypernet.mlp_parameters() + hypernet.fc_layer_parameters() + hypernet.emd_parameters(),
        grad_outputs=list(
            map(lambda tup: tup[1], diff.items())
        ),
        allow_unused=True,
    )
    mlp_grads = hn_grads[: len(hypernet.mlp_parameters())]
    fc_grads = hn_grads[
               len(hypernet.mlp_parameters()): len(
                   hypernet.mlp_parameters() + hypernet.fc_layer_parameters()
               )
               ]
    emd_grads = hn_grads[
                len(hypernet.mlp_parameters() + hypernet.fc_layer_parameters()):
                ]

    for param, grad in zip(hypernet.fc_layer_parameters(), fc_grads):
        if grad is not None:
            param.data -= opt.hn_lr * grad

    for param, grad in zip(hypernet.mlp_parameters(), mlp_grads):
        param.data -= opt.hn_lr * grad

    for param, grad in zip(hypernet.emd_parameters(), emd_grads):
        param.data -= opt.hn_lr * grad

    hypernet.save_hn()

    return hypernet

def valid(opt,img_model, txt_model, query_x, retrieval_x, query_y, retrieval_y, query_L, retrieval_L):
    qBX = generate_image_code(opt,img_model, query_x, opt.bit)
    qBY = generate_text_code(opt,txt_model, query_y, opt.bit)
    rBX = generate_image_code(opt,img_model, retrieval_x, opt.bit)
    rBY = generate_text_code(opt,txt_model, retrieval_y, opt.bit)

    mapi2t = calc_map_k(qBX, rBY, query_L, retrieval_L)
    mapt2i = calc_map_k(qBY, rBX, query_L, retrieval_L)
    return mapi2t, mapt2i


if __name__ == '__main__':
    qB = torch.Tensor([[1, -1, 1, 1],
                       [-1, -1, -1, 1],
                       [1, 1, -1, 1],
                       [1, 1, 1, -1]])
    rB = torch.Tensor([[1, -1, 1, -1],
                       [-1, -1, 1, -1],
                       [-1, -1, 1, -1],
                       [1, 1, -1, -1],
                       [-1, 1, -1, -1],
                       [1, 1, -1, 1]])
    query_L = torch.Tensor([[0, 1, 0, 0],
                            [1, 1, 0, 0],
                            [1, 0, 0, 1],
                            [0, 1, 0, 1]])
    retrieval_L = torch.Tensor([[1, 0, 0, 1],
                                [1, 1, 0, 0],
                                [0, 1, 1, 0],
                                [0, 0, 1, 0],
                                [1, 0, 0, 0],
                                [0, 0, 1, 0]])

    map = calc_map_k(qB, rB, query_L, retrieval_L)
    print(map)
