import copy
import gc
import torch
import numpy as np

from models.hypernetwork_module import ImageHyperNetwork, TextHyperNetwork
from collections import OrderedDict
from update import LocalImgUpdate, LocalTxtUpdate, Local_Hierarchical_ImgUpdate, Local_Hierarchical_TxtUpdate
from utils import agg_func, proto_aggregation, calc_PLFedCMH_loss, calc_SHDCH_loss, write_result, update_hypernetwork, valid, split_data, avg_model, cacl_FedCMR_weight, FedCMR_agg_model, FedCMR_local_update
from data_handler import load_Hierarchical_data


def PLFedCMH_taskheter(opt, X, Y, L, user_groups, img_model, txt_model,  img_client_model_params_list,
                       txt_client_model_params_list, img_all_params_name, txt_all_params_name, recode_path):
    '''
    img_client_model_params_list: 暂存10个客户端的图像模型参数
    txt_client_model_params_list：暂存10个客户端的文本模型参数
    '''
    train_L = torch.from_numpy(L['train'])
    train_x = torch.from_numpy(X['train'])
    train_y = torch.from_numpy(Y['train'])

    query_L = torch.from_numpy(L['query'])
    query_x = torch.from_numpy(X['query'])
    query_y = torch.from_numpy(Y['query'])

    retrieval_L = torch.from_numpy(L['retrieval'])
    retrieval_x = torch.from_numpy(X['retrieval'])
    retrieval_y = torch.from_numpy(Y['retrieval'])

    if opt.use_gpu:
        train_L = train_L.cuda()

    #结果记录
    result = {
        'loss': [],
        'proto_loss': [],
        'mapi2t': [[] for idx in range((opt.num_users))],
        'mapt2i': [[] for idx in range((opt.num_users))]
    }

    max_mapi2t = [0. for idx in range((opt.num_users))]
    max_mapt2i = [0. for idx in range((opt.num_users))]

    # HyperNetwork
    img_hypernet = ImageHyperNetwork(
        modal='img',
        embedding_dim=opt.embedding_dim_img,
        client_num=opt.num_users,
        hidden_dim=opt.hidden_dim_img,
        backbone=img_model,
        gpu=opt.use_gpu,
    )
    txt_hypernet = TextHyperNetwork(
        modal='txt',
        embedding_dim=opt.embedding_dim_txt,
        client_num=opt.num_users,
        hidden_dim=opt.hidden_dim_txt,
        backbone=txt_model,
        gpu=opt.use_gpu,
    )

    # fedproto begin 初始化全局原型码
    global_img_protos, global_txt_protos = [], []
    idxs_users = np.arange(opt.num_users)

    # 创建一个单位矩阵 S，作为全局类的矩阵
    S = torch.tensor(np.eye(opt.num_class2)).cuda()

    # 初始化客户端的缓冲区：F_buffer, G_buffer, Y_buffer, B（B是从F和G计算得到的）
    local_F_buffer_dict, local_G_buffer_dict, local_Y_dict, local_B_dict = {}, {}, {}, {}
    for i in range(opt.num_users):
        num_train = len(user_groups[i]) # 获取每个客户端的训练样本数
        F_buffer = torch.randn(num_train, opt.bit) # 随机初始化图像特征缓冲区
        G_buffer = torch.randn(num_train, opt.bit) # 随机初始化文本特征缓冲区
        Y_buffer = torch.sign(torch.randn(opt.num_class2, opt.bit)) # 随机初始化类标签的哈希码
        if opt.use_gpu:
            F_buffer = F_buffer.cuda()
            G_buffer = G_buffer.cuda()
            Y_buffer = Y_buffer.cuda()
        # 计算 B，B 是 F 和 G 的和的符号（sign）作为初始的哈希码
        B = torch.sign(F_buffer + G_buffer)
        # 将 F, G, Y, B 存入字典中
        local_F_buffer_dict[i] = F_buffer
        local_G_buffer_dict[i] = G_buffer
        local_Y_dict[i] = Y_buffer
        local_B_dict[i] = B

    for round in range(opt.max_epoch):
        #初始化局部原型码以及损失
        local_losses, local_proto_losses = [], []
        local_img_protos, local_txt_protos = {}, {}
        print(f'\n | Global Training Round : {round + 1} |\n')

        # proto_img_loss, proto_txt_loss = 0, 0
        for idx in idxs_users:
            # layer-wise 字典，键是参数名称，值是10个客户端保存的参数值
            img_layer_params_dict = dict(
                zip(img_all_params_name, list(zip(*img_client_model_params_list))))
            txt_layer_params_dict = dict(
                zip(txt_all_params_name, list(zip(*txt_client_model_params_list))))
            # 使用超网络生成当前客户端的聚合权重  img_alapha有8层，txt_alapha有2层
            img_alpha = img_hypernet(idx)
            txt_alpha = txt_hypernet(idx)
            img_aggregated_parameters, txt_aggregated_parameters = {}, {} #这个客户端使用的参数列表
            # 根据超网络生成的权重，对模型参数进行加权聚合   先对超网络生成的权重归一化，再与本地参数相乘，得到聚合后的参数
            for name in img_all_params_name:
                a = img_alpha[name.split(".")[-2]]
                img_aggregated_parameters[name] = torch.sum(
                    a
                    / a.sum()
                    * torch.stack(img_layer_params_dict[name], dim=-1).cuda(),
                    dim=-1,
                )
            img_client_model_params_list[idx] = list(img_aggregated_parameters.values())
            for name in txt_all_params_name:
                a = txt_alpha[name.split(".")[-2]]
                txt_aggregated_parameters[name] = torch.sum(
                    a
                    / a.sum()
                    * torch.stack(txt_layer_params_dict[name], dim=-1).cuda(),
                    dim=-1,
                )
            txt_client_model_params_list[idx] = list(txt_aggregated_parameters.values())

            # theta * alpha, put it into client update  # 初始化客户端本地模型副本并加载聚合后的参数
            img_model_net = copy.deepcopy(img_model)
            img_model_net.load_state_dict(img_aggregated_parameters, strict=True)
            txt_model_net = copy.deepcopy(txt_model)
            txt_model_net.load_state_dict(txt_aggregated_parameters, strict=True)

            img_protos, txt_protos = {}, {}

            # 为当前客户端准备数据
            idxs = user_groups[idx]
            # pick up the certain client's training image, text and label dataset
            local_train_x = train_x[idxs]
            local_train_y = train_y[idxs]
            local_train_L = train_L[idxs]

            epoch_loss = {'total': [], 'proto': []}
            epoch_weights = {'image': img_model_net.state_dict(),
                             'text': txt_model_net.state_dict()}
            # the weights before train  # 保存当前模型参数，供后续计算参数变化
            img_frz_model_params = OrderedDict({
                name: param.clone().detach().requires_grad_(param.requires_grad)
                for name, param in epoch_weights['image'].items()})
            txt_frz_model_params = OrderedDict({
                name: param.clone().detach().requires_grad_(param.requires_grad)
                for name, param in epoch_weights['text'].items()})

            for iter in range(opt.train_ep):
                # update local weights every epoch
                img_model_net.load_state_dict(epoch_weights['image'])
                txt_model_net.load_state_dict(epoch_weights['text'])

                local_img_model = LocalImgUpdate(opt=opt, F_buffer=local_F_buffer_dict[idx], Y=local_Y_dict[idx],
                                                 B=local_B_dict[idx], train_img=local_train_x,
                                                 train_label=local_train_L)
                #img_w是更新后的模型参数，local_F_buffer是更新后的图像特征缓冲区，img_protos是更新后的图像原型，loss_img_proto_loss是图像原型损失
                img_w, local_F_buffer, img_protos, loss_img_proto_loss = local_img_model.update_weights_het(
                    global_img_protos, model=img_model_net)
                local_F_buffer_dict[idx] = local_F_buffer

                local_txt_model = LocalTxtUpdate(opt=opt, G_buffer=local_G_buffer_dict[idx], Y=local_Y_dict[idx],
                                                 B=local_B_dict[idx], train_txt=local_train_y,
                                                 train_label=local_train_L)
                txt_w, local_G_buffer, txt_protos, loss_txt_proto_loss = local_txt_model.update_weights_het(
                    global_txt_protos, model=txt_model_net)
                local_G_buffer_dict[idx] = local_G_buffer
                # update B
                local_B = torch.sign(opt.gamma * (local_F_buffer + local_G_buffer))
                local_B_dict[idx] = local_B

                # update Y
                local_Q = opt.bit * (torch.matmul(local_F_buffer.t(), local_train_L.to(torch.float32))
                                     + torch.matmul(local_G_buffer.t(), local_train_L.to(torch.float32))
                                     + opt.s_param * torch.matmul(local_Y_dict[idx].t(), S.to(torch.float32)))

                for i in range(3):
                    local_Y_buffer = local_Y_dict[idx]
                    for k in range(opt.bit):
                        local_sel_ind = np.setdiff1d([ii for ii in range(opt.bit)], k)
                        local_yk = local_Y_dict[idx][:, k]
                        local_Y_ = local_Y_dict[idx][:, local_sel_ind]
                        local_Fk = local_F_buffer[:, k]
                        local_F_ = local_F_buffer[:, local_sel_ind]
                        local_Gk = local_G_buffer[:, k]
                        local_G_ = local_G_buffer[:, local_sel_ind]

                        local_y = torch.sign(
                            local_Q[k, :] - (torch.matmul(local_Y_, torch.matmul(local_F_.t(), local_Fk))
                                             + torch.matmul(local_Y_, torch.matmul(local_G_.t(), local_Gk))))
                        local_Y_dict[idx][:, k] = local_y
                    if np.linalg.norm(
                            local_Y_dict[idx].cpu().numpy() - local_Y_buffer.cpu().numpy()) < 1e-6 * np.linalg.norm(
                        local_Y_buffer.cpu().numpy()):
                        break

                #计算的公式2
                loss, loss_proto = calc_PLFedCMH_loss(local_B, local_F_buffer, local_G_buffer, local_Y_dict[idx], local_train_L,
                                             loss_img_proto_loss, loss_txt_proto_loss, opt)
                print(
                    'Local loss...global round: %3d, client: %3d, local epoch: %3d, loss: %3.3f, proto loss: %3.3f, comment: update B'
                    % (round + 1, idx + 1, iter + 1, loss.item(), loss_proto.item()))
                epoch_loss['total'].append(loss.item())
                epoch_loss['proto'].append(loss_proto.item())
                epoch_weights['image'] = img_w
                epoch_weights['text'] = txt_w

            #聚合本地原型
            agg_img_protos = agg_func(img_protos)
            agg_txt_protos = agg_func(txt_protos)
            # 计算客户端平均损失
            epoch_loss['total'] = sum(epoch_loss['total']) / len(epoch_loss['total'])
            epoch_loss['proto'] = sum(epoch_loss['proto']) / len(epoch_loss['proto'])
            #记录这个客户端的损失和原型
            local_losses.append(copy.deepcopy(epoch_loss['total']))
            local_proto_losses.append(copy.deepcopy(epoch_loss['proto']))
            local_img_protos[idx] = agg_img_protos
            local_txt_protos[idx] = agg_txt_protos
            # calculate delta for local update
            img_diff = OrderedDict({
                k: p1 - p0
                for (k, p1), p0 in zip(
                    epoch_weights['image'].items(),
                    img_frz_model_params.values(), )})
            txt_diff = OrderedDict({
                k: p1 - p0
                for (k, p1), p0 in zip(
                    epoch_weights['text'].items(),
                    txt_frz_model_params.values(), )})
            # update hypernetwork
            img_hypernet = update_hypernetwork(img_hypernet, img_client_model_params_list, idx, img_diff, opt)
            txt_hypernet = update_hypernetwork(txt_hypernet, txt_client_model_params_list, idx, txt_diff, opt)
            # update client model  使用加差异的方法
            img_updated_params = []
            for param, diff in zip(
                    img_client_model_params_list[idx], img_diff.values()
            ):
                img_updated_params.append((param + diff).detach())
            img_client_model_params_list[idx] = img_updated_params
            txt_updated_params = []
            for param, diff in zip(
                    txt_client_model_params_list[idx], txt_diff.values()
            ):
                txt_updated_params.append((param + diff).detach())
            txt_client_model_params_list[idx] = txt_updated_params

            del img_model_net, txt_model_net
            gc.collect()

        # calculate avg clients' losses as global  计算这个轮次的全局平均损失
        loss_avg = sum(local_losses) / len(local_losses)
        loss_proto_avg = sum(local_proto_losses) / len(local_proto_losses)
        print('Global loss...global round: %3d, loss: %3.3f, proto loss: %3.3f' % (round + 1, loss_avg, loss_proto_avg))
        result['loss'].append(float(loss_avg))
        result['proto_loss'].append(float(loss_proto_avg))

        #聚合全局原型
        global_img_protos = proto_aggregation(local_img_protos)
        global_txt_protos = proto_aggregation(local_txt_protos)

        # valid  每个通讯轮次进行一次评估
        if opt.valid:
            for idx in range(opt.num_users):
                img_parameters = OrderedDict(
                    {
                        k: p
                        for k, p in zip(
                        img_all_params_name,
                        img_client_model_params_list[idx]
                        )
                    }
                )
                txt_parameters = OrderedDict(
                    {
                        k: p
                        for k, p in zip(
                        txt_all_params_name,
                        txt_client_model_params_list[idx]
                        )
                    }
                )
                img_model.load_state_dict(img_parameters)
                txt_model.load_state_dict(txt_parameters)
                mapi2t, mapt2i = valid(opt, img_model, txt_model, query_x, retrieval_x, query_y, retrieval_y,
                                       query_L, retrieval_L)
                print('...round: %3d, cilent: %3d, valid MAP: MAP(i->t): %3.4f, MAP(t->i): %3.4f' % (round + 1, idx+1, mapi2t, mapt2i))
                result['mapi2t'][idx].append(mapi2t.item())
                result['mapt2i'][idx].append(mapt2i.item())
                if mapt2i >= max_mapt2i[idx] and mapi2t >= max_mapi2t[idx]:
                    max_mapi2t[idx] = mapi2t.item()
                    max_mapt2i[idx] = mapt2i.item()
                    img_model.save(recode_path)
                    txt_model.save(recode_path)

    print('...training procedure finish')
    if opt.valid:
        max_mapi2t_avg = sum(max_mapi2t) / len(max_mapi2t)
        max_mapt2i_avg = sum(max_mapt2i) / len(max_mapt2i)
        print('   max MAP: MAP(i->t): %3.4f, MAP(t->i): %3.4f' % (max_mapi2t_avg, max_mapt2i_avg))
        result['max_mapi2t_list'] = max_mapi2t
        result['max_mapt2i_list'] = max_mapt2i
        result['max_mapi2t'] = max_mapi2t_avg
        result['max_mapt2i'] = max_mapt2i_avg
    else:
        mapi2t, mapt2i = valid(opt,img_model, txt_model, query_x, retrieval_x, query_y, retrieval_y,
                               query_L, retrieval_L)
        print('   max MAP: MAP(i->t): %3.4f, MAP(t->i): %3.4f' % (mapi2t, mapt2i))
        result['max_mapi2t'] = mapi2t
        result['max_mapt2i'] = mapt2i

    write_result(recode_path,result)
    img_hypernet.clean_models()
    txt_hypernet.clean_models()


def FedAvg_taskheter(opt, user_groups, img_model, txt_model, img_client_model_params_list, txt_client_model_params_list, img_all_params_name, txt_all_params_name, recode_path):
    #加载层次数据
    images, tags, labels = load_Hierarchical_data(opt.dataset, opt.data_path)
    #创建标签的关联矩阵，如果某个第一层标签i是某个第二层标签j的父标签，则S[i][j] = 1
    S = torch.zeros(opt.num_class1, opt.num_class2)
    index1 = 0
    index2 = 0
    for i in range(0, len(labels)):
        for j in range(0, len(labels[0])):
            if (labels[i][j] == 1 and j < opt.num_class1):
                index1 = j
            elif labels[i][j] == 1 and j >= opt.num_class1:
                index2 = j
        S[index1][index2 - opt.num_class1] = 1
    if opt.use_gpu:
        S = S.cuda()

    X, Y, L = split_data(opt, images, tags, labels)

    train_L = torch.from_numpy(L['train'])
    train_x = torch.from_numpy(X['train'])
    train_y = torch.from_numpy(Y['train'])

    query_L = torch.from_numpy(L['query'])[:,opt.num_class1:] #使用第二层的标签进行评估
    query_x = torch.from_numpy(X['query'])
    query_y = torch.from_numpy(Y['query'])

    retrieval_L = torch.from_numpy(L['retrieval'])[:,opt.num_class1:] #使用第二层的标签进行评估
    retrieval_x = torch.from_numpy(X['retrieval'])
    retrieval_y = torch.from_numpy(Y['retrieval'])


    #结果记录
    result = {
        'loss': [],
        'mapi2t': [],
        'mapt2i': []
    }

    max_mapi2t = 0.
    max_mapt2i = 0.

    # 获得客户端索引
    idxs_users = np.arange(opt.num_users)

    # 初始化客户端的缓冲区：F_buffer, G_buffer, Y_buffer, B（B是从F和G计算得到的）
    local_F_buffer_dict, local_G_buffer_dict, local_Y1_dict, local_Y2_dict, local_B_dict = {}, {}, {}, {}, {}
    for i in range(opt.num_users):
        num_train = len(user_groups[i])  # 获取每个客户端的训练样本数
        F_buffer = torch.randn(num_train, opt.bit)  # 随机初始化图像特征缓冲区
        G_buffer = torch.randn(num_train, opt.bit)  # 随机初始化文本特征缓冲区
        Y1_buffer = torch.sign(torch.randn(opt.num_class1, opt.bit))  # 随机初始化第一层类标签的哈希码
        Y2_buffer = torch.sign(torch.randn(opt.num_class2, opt.bit))  # 随机初始化第一层类标签的哈希码

        if opt.use_gpu:
            F_buffer = F_buffer.cuda()
            G_buffer = G_buffer.cuda()
            Y1_buffer = Y1_buffer.cuda()
            Y2_buffer = Y2_buffer.cuda()

        # 计算 B，B 是 F 和 G 的和的符号（sign）作为初始的哈希码
        B = torch.sign(F_buffer + G_buffer)
        # 将 F, G, Y1, Y2, B 存入字典中
        local_F_buffer_dict[i] = F_buffer
        local_G_buffer_dict[i] = G_buffer
        local_Y1_dict[i] = Y1_buffer
        local_Y2_dict[i] = Y2_buffer
        local_B_dict[i] = B


    for round in range(opt.max_epoch):
        #初始化局部原型码以及损失
        local_losses = []
        print(f'\n | Global Training Round : {round + 1} |\n')

        # proto_img_loss, proto_txt_loss = 0, 0
        for idx in idxs_users:
            # 为当前客户端准备数据
            idxs = user_groups[idx]
            # pick up the certain client's training image, text and label dataset
            local_train_x = train_x[idxs]
            local_train_y = train_y[idxs]
            local_train_L = train_L[idxs]
            #第一层标签
            local_train_L1 = local_train_L[:,0:opt.num_class1]
            #第二层标签
            local_train_L2 = local_train_L[:,opt.num_class1:]
            if opt.use_gpu:
                local_train_x = local_train_x.cuda()
                local_train_y = local_train_y.cuda()
                local_train_L = local_train_L.cuda()
                local_train_L1 = local_train_L1.cuda()
                local_train_L2 = local_train_L2.cuda()


            # 加载这个客户端的模型
            client_img_parameters = OrderedDict(
                {
                    k: p
                    for k, p in zip(
                    img_all_params_name,
                    img_client_model_params_list[idx]
                )
                }
            )
            client_txt_parameters = OrderedDict(
                {
                    k: p
                    for k, p in zip(
                    txt_all_params_name,
                    txt_client_model_params_list[idx]
                )
                }
            )
            img_model_net = copy.deepcopy(img_model)
            img_model_net.load_state_dict(client_img_parameters, strict=True)
            txt_model_net = copy.deepcopy(txt_model)
            txt_model_net.load_state_dict(client_txt_parameters, strict=True)

            epoch_loss = {'total': []}
            epoch_weights = {'image': img_model_net.state_dict(),
                             'text': txt_model_net.state_dict()}
            # the weights before train  # 保存当前模型参数，供后续计算参数变化
            img_frz_model_params = OrderedDict({
                name: param.clone().detach().requires_grad_(param.requires_grad)
                for name, param in epoch_weights['image'].items()})
            txt_frz_model_params = OrderedDict({
                name: param.clone().detach().requires_grad_(param.requires_grad)
                for name, param in epoch_weights['text'].items()})

            for iter in range(opt.train_ep):
                # update local weights every epoch
                img_model_net.load_state_dict(epoch_weights['image'])
                txt_model_net.load_state_dict(epoch_weights['text'])

                local_img_model = Local_Hierarchical_ImgUpdate(opt=opt, F_buffer=local_F_buffer_dict[idx], Y1=local_Y1_dict[idx], Y2=local_Y2_dict[idx],
                                                 B=local_B_dict[idx], train_img=local_train_x,
                                                 train_label=local_train_L)
                #img_w是更新后的模型参数，local_F_buffer是更新后的图像特征缓冲区
                img_w, local_F_buffer = local_img_model.update_weights_het(model=img_model_net)
                local_F_buffer_dict[idx] = local_F_buffer

                local_txt_model = Local_Hierarchical_TxtUpdate(opt=opt, G_buffer=local_G_buffer_dict[idx], Y1=local_Y1_dict[idx], Y2=local_Y2_dict[idx],
                                                 B=local_B_dict[idx], train_txt=local_train_y,
                                                 train_label=local_train_L)
                txt_w, local_G_buffer = local_txt_model.update_weights_het(model=txt_model_net)
                local_G_buffer_dict[idx] = local_G_buffer

                # update B
                local_B = torch.sign(opt.gamma * (local_F_buffer + local_G_buffer))
                local_B_dict[idx] = local_B

                # update Y
                local_Q1 = opt.bit * ( opt.alpha * (torch.matmul(local_F_buffer.t(), local_train_L1.to(torch.float32)) + torch.matmul(local_G_buffer.t(), local_train_L1.to(torch.float32)))
                                     + opt.eta * torch.matmul(local_Y2_dict[idx].t(), S.to(torch.float32).t()))

                local_Q2 = opt.bit * ( opt.beta * (torch.matmul(local_F_buffer.t(), local_train_L2.to(torch.float32)) + torch.matmul(local_G_buffer.t(), local_train_L2.to(torch.float32)))
                                      + opt.eta * torch.matmul(local_Y1_dict[idx].t(), S.to(torch.float32)))



                for i in range(3):
                    local_Y1_buffer = local_Y1_dict[idx]
                    for k in range(opt.bit):
                        local_sel_ind = np.setdiff1d([ii for ii in range(opt.bit)], k)
                        local_Y1_ = local_Y1_dict[idx][:, local_sel_ind]
                        local_y1k = local_Y1_dict[idx][:, k]
                        local_Y2_ = local_Y2_dict[idx][:, local_sel_ind]
                        local_y2k = local_Y2_dict[idx][:, k]
                        local_Fk = local_F_buffer[:, k]
                        local_F_ = local_F_buffer[:, local_sel_ind]
                        local_Gk = local_G_buffer[:, k]
                        local_G_ = local_G_buffer[:, local_sel_ind]

                        y1 = torch.sign(
                            local_Q1[k, :]
                            - opt.eta * torch.matmul(local_Y1_, torch.matmul(local_Y2_.t(), local_y2k))
                            - opt.alpha * (torch.matmul(local_Y1_, torch.matmul(local_F_.t(), local_Fk)) + torch.matmul(local_Y1_, torch.matmul(local_G_.t(), local_Gk)))
                        )
                        local_Y1_dict[idx][:, k] = y1
                    if np.linalg.norm(
                            local_Y1_dict[idx].cpu().numpy() - local_Y1_buffer.cpu().numpy()) < 1e-6 * np.linalg.norm(local_Y1_buffer.cpu().numpy()):
                        break

                for i in range(3):
                    local_Y2_buffer = local_Y2_dict[idx]
                    for k in range(opt.bit):
                        local_sel_ind = np.setdiff1d([ii for ii in range(opt.bit)], k)
                        local_Y1_ = local_Y1_dict[idx][:, local_sel_ind]
                        local_y1k = local_Y1_dict[idx][:, k]
                        local_Y2_ = local_Y2_dict[idx][:, local_sel_ind]
                        local_y2k = local_Y2_dict[idx][:, k]
                        local_Fk = local_F_buffer[:, k]
                        local_F_ = local_F_buffer[:, local_sel_ind]
                        local_Gk = local_G_buffer[:, k]
                        local_G_ = local_G_buffer[:, local_sel_ind]

                        y2 = torch.sign(
                            local_Q2[k, :]
                            - opt.eta * torch.matmul(local_Y2_, torch.matmul(local_Y1_.t(), local_y1k))
                            - opt.beta * (torch.matmul(local_Y2_, torch.matmul(local_F_.t(), local_Fk)) + torch.matmul(local_Y2_, torch.matmul(local_G_.t(), local_Gk)))
                        )
                        local_Y2_dict[idx][:, k] = y2
                    if np.linalg.norm(
                            local_Y2_dict[idx].cpu().numpy() - local_Y2_buffer.cpu().numpy()) < 1e-6 * np.linalg.norm(local_Y2_buffer.cpu().numpy()):
                        break

                #计算的公式2
                loss = calc_SHDCH_loss(local_B, local_F_buffer, local_G_buffer, local_Y1_dict[idx], local_Y2_dict[idx], S, local_train_L1, local_train_L2, opt)
                print(
                    'Local loss...global round: %3d, client: %3d, local epoch: %3d, loss: %3.3f, comment: update B'
                    % (round + 1, idx + 1, iter + 1, loss.item()))
                epoch_loss['total'].append(loss.item())
                epoch_weights['image'] = img_w
                epoch_weights['text'] = txt_w


            # 计算客户端平均损失
            epoch_loss['total'] = sum(epoch_loss['total']) / len(epoch_loss['total'])

            # 记录这个客户端的损失
            local_losses.append(copy.deepcopy(epoch_loss['total']))

            # calculate delta for local update
            img_diff = OrderedDict({
                k: p1 - p0
                for (k, p1), p0 in zip(
                    epoch_weights['image'].items(),
                    img_frz_model_params.values(), )})
            txt_diff = OrderedDict({
                k: p1 - p0
                for (k, p1), p0 in zip(
                    epoch_weights['text'].items(),
                    txt_frz_model_params.values(), )})

            # update client model  使用加差异的方法更新客户端模型
            img_updated_params = []
            for param, diff in zip(
                    img_client_model_params_list[idx], img_diff.values()
            ):
                img_updated_params.append((param + diff).detach())
            img_client_model_params_list[idx] = img_updated_params
            txt_updated_params = []
            for param, diff in zip(
                    txt_client_model_params_list[idx], txt_diff.values()
            ):
                txt_updated_params.append((param + diff).detach())
            txt_client_model_params_list[idx] = txt_updated_params

            del img_model_net, txt_model_net

            gc.collect()

        # calculate avg clients' losses as global  计算这个轮次的全局平均损失
        loss_avg = sum(local_losses) / len(local_losses)
        print('Global loss...global round: %3d, loss: %3.3f' % (round + 1, loss_avg))
        result['loss'].append(float(loss_avg))

        # 聚合全局模型
        img_server_model_params_list, txt_server_model_params_list = avg_model(img_client_model_params_list,txt_client_model_params_list, user_groups)

        # 为每个客户端分发全局模型
        for client in range(opt.num_users):
            img_client_model_params_list[client] = img_server_model_params_list
            txt_client_model_params_list[client] = txt_server_model_params_list

        # valid  每个通讯轮次进行一次评估 使用全局模型评估
        if opt.valid:
            img_parameters = OrderedDict(
                {
                    k: p
                    for k, p in zip(
                    img_all_params_name,
                    img_server_model_params_list
                )
                }
            )
            txt_parameters = OrderedDict(
                {
                    k: p
                    for k, p in zip(
                    txt_all_params_name,
                    txt_server_model_params_list
                )
                }
            )
            img_model.load_state_dict(img_parameters)
            txt_model.load_state_dict(txt_parameters)
            mapi2t, mapt2i = valid(opt, img_model, txt_model, query_x.cuda(), retrieval_x.cuda(), query_y.cuda(), retrieval_y.cuda(),
                                   query_L.cuda(), retrieval_L.cuda())
            print('...round: %3d, valid MAP: MAP(i->t): %3.4f, MAP(t->i): %3.4f' % (round + 1, mapi2t, mapt2i))
            result['mapi2t'].append(mapi2t.item())
            result['mapt2i'].append(mapt2i.item())
            if mapt2i >= max_mapt2i and mapi2t >= max_mapi2t:
                max_mapi2t = mapi2t.item()
                max_mapt2i = mapt2i.item()
                img_model.save(recode_path)
                txt_model.save(recode_path)

    print('...training procedure finish')
    if opt.valid:
        print('   max MAP: MAP(i->t): %3.4f, MAP(t->i): %3.4f' % (max_mapi2t, max_mapt2i))
        result['max_mapi2t'] = max_mapi2t
        result['max_mapt2i'] = max_mapt2i
    else:
        mapi2t, mapt2i = valid(opt, img_model, txt_model, query_x.cuda(), retrieval_x.cuda(), query_y.cuda(), retrieval_y.cuda(),
                               query_L.cuda(), retrieval_L.cuda())
        print('   max MAP: MAP(i->t): %3.4f, MAP(t->i): %3.4f' % (mapi2t, mapt2i))
        result['max_mapi2t'] = mapi2t.item()
        result['max_mapt2i'] = mapt2i.item()

    write_result(recode_path, result)

# def FedCMR_taskheter(opt, user_groups, img_model, txt_model, img_client_model_params_list, txt_client_model_params_list, img_all_params_name, txt_all_params_name, recode_path):
#     #加载层次数据
#     images, tags, labels = load_Hierarchical_data(opt.dataset, opt.data_path)
#     #创建标签的关联矩阵，如果某个第一层标签i是某个第二层标签j的父标签，则S[i][j] = 1
#     S = torch.zeros(opt.num_class1, opt.num_class2)
#     index1 = 0
#     index2 = 0
#     for i in range(0, len(labels)):
#         for j in range(0, len(labels[0])):
#             if (labels[i][j] == 1 and j < opt.num_class1):
#                 index1 = j
#             elif labels[i][j] == 1 and j >= opt.num_class1:
#                 index2 = j
#         S[index1][index2 - opt.num_class1] = 1
#     if opt.use_gpu:
#         S = S.cuda()
#
#     X, Y, L = split_data(opt, images, tags, labels)
#
#     train_L = torch.from_numpy(L['train'])
#     train_x = torch.from_numpy(X['train'])
#     train_y = torch.from_numpy(Y['train'])
#
#     query_L = torch.from_numpy(L['query'])[:,opt.num_class1:] #使用第二层的标签进行评估
#     query_x = torch.from_numpy(X['query'])
#     query_y = torch.from_numpy(Y['query'])
#
#     retrieval_L = torch.from_numpy(L['retrieval'])[:,opt.num_class1:] #使用第二层的标签进行评估
#     retrieval_x = torch.from_numpy(X['retrieval'])
#     retrieval_y = torch.from_numpy(Y['retrieval'])
#
#
#
#
#     #结果记录
#     result = {
#         'loss': [],
#         'mapi2t': [[] for idx in range(opt.num_users)],
#         'mapt2i': [[] for idx in range(opt.num_users)]
#     }
#
#     max_mapi2t = [0. for idx in range(opt.num_users)]
#     max_mapt2i = [0. for idx in range(opt.num_users)]
#
#     # 获得客户端索引
#     idxs_users = np.arange(opt.num_users)
#
#     # 初始化客户端的缓冲区：F_buffer, G_buffer, Y_buffer, B（B是从F和G计算得到的）  先计算80%的数据
#     local_F_buffer_dict, local_G_buffer_dict, local_Y1_dict, local_Y2_dict, local_B_dict, local_F_train_buffer_dict , local_G_train_buffer_dict, local_B_train_dict= {}, {}, {}, {}, {}, {}, {}, {}
#     for i in range(opt.num_users):
#         num_train = len(user_groups[i])  # 获取每个客户端的训练样本数
#         selected_num = int(num_train * 0.8)
#         F_buffer = torch.randn(selected_num, opt.bit)  # 随机初始化图像特征缓冲区
#         G_buffer = torch.randn(selected_num, opt.bit)  # 随机初始化文本特征缓冲区
#         F_train_buffer = torch.randn(num_train-selected_num, opt.bit) #后续训练使用的缓冲区
#         G_train_buffer = torch.randn(num_train-selected_num, opt.bit) #后续训练使用的缓冲区
#         Y1_buffer = torch.sign(torch.randn(opt.num_class1, opt.bit))  # 随机初始化第一层类标签的哈希码
#         Y2_buffer = torch.sign(torch.randn(opt.num_class2, opt.bit))  # 随机初始化第一层类标签的哈希码
#
#         if opt.use_gpu:
#             F_buffer = F_buffer.cuda()
#             G_buffer = G_buffer.cuda()
#             Y1_buffer = Y1_buffer.cuda()
#             Y2_buffer = Y2_buffer.cuda()
#             F_train_buffer = F_train_buffer.cuda()
#             G_train_buffer = G_train_buffer.cuda()
#
#         # 计算 B，B 是 F 和 G 的和的符号（sign）作为初始的哈希码
#         B = torch.sign(F_buffer + G_buffer)
#         B_train = torch.sign(F_train_buffer + G_train_buffer)
#         # 将 F, G, Y1, Y2, B 存入字典中
#         local_F_buffer_dict[i] = F_buffer
#         local_G_buffer_dict[i] = G_buffer
#         local_Y1_dict[i] = Y1_buffer
#         local_Y2_dict[i] = Y2_buffer
#         local_B_dict[i] = B
#
#         local_F_train_buffer_dict[i] = F_train_buffer
#         local_G_train_buffer_dict[i] = G_train_buffer
#         local_B_train_dict[i] = B_train
#
#     #计算每个客户端的权重，这是文章中的Sk，对于每个客户端来说是固定的
#     cilent_weights = cacl_FedCMR_weight(opt, user_groups, train_L[:,opt.num_class1:])
#
#
#     for round in range(opt.max_epoch):
#         #初始化损失
#         local_losses = []
#         print(f'\n | Global Training Round : {round + 1} |\n')
#         print(f'\n | local Training Round : {round + 1} |\n')
#         # 记录更新差异，方便后续计算
#         img_update_diff = [[] for _ in range(opt.num_users)]
#         txt_update_diff = [[] for _ in range(opt.num_users)]
#         for idx in idxs_users:
#             # 为当前客户端准备数据
#             idxs = user_groups[idx]
#             selected_idxs = int(len(idxs) * 0.8)
#             # pick up the certain client's training image, text and label dataset，先使用80%的数据进行训练，文章是随机采取，这里已经随机过了，所以使用前80%的数据
#             local_train_x = train_x[idxs][:selected_idxs]
#             local_train_y = train_y[idxs][:selected_idxs]
#             local_train_L = train_L[idxs][:selected_idxs]
#             #第一层标签
#             local_train_L1 = local_train_L[:,0:opt.num_class1]
#             #第二层标签
#             local_train_L2 = local_train_L[:,opt.num_class1:]
#             if opt.use_gpu:
#                 local_train_x = local_train_x.cuda()
#                 local_train_y = local_train_y.cuda()
#                 local_train_L = local_train_L.cuda()
#                 local_train_L1 = local_train_L1.cuda()
#                 local_train_L2 = local_train_L2.cuda()
#
#
#             # 加载这个客户端的模型,也就是 W0
#             client_img_parameters = OrderedDict(
#                 {
#                     k: p
#                     for k, p in zip(
#                     img_all_params_name,
#                     img_client_model_params_list[idx]
#                 )
#                 }
#             )
#             client_txt_parameters = OrderedDict(
#                 {
#                     k: p
#                     for k, p in zip(
#                     txt_all_params_name,
#                     txt_client_model_params_list[idx]
#                 )
#                 }
#             )
#             img_model_net = copy.deepcopy(img_model)
#             img_model_net.load_state_dict(client_img_parameters, strict=True)
#             txt_model_net = copy.deepcopy(txt_model)
#             txt_model_net.load_state_dict(client_txt_parameters, strict=True)
#
#             epoch_loss = {'total': []}
#             epoch_weights = {'image': img_model_net.state_dict(),
#                              'text': txt_model_net.state_dict()}
#             # the weights before train  # 保存当前模型参数W0，供后续计算参数变化
#             img_frz_model_params = OrderedDict({
#                 name: param.clone().detach().requires_grad_(param.requires_grad)
#                 for name, param in epoch_weights['image'].items()})
#             txt_frz_model_params = OrderedDict({
#                 name: param.clone().detach().requires_grad_(param.requires_grad)
#                 for name, param in epoch_weights['text'].items()})
#
#
#             #用80%的数据进行训练
#             for iter in range(opt.train_ep):
#                 # update local weights every epoch
#                 img_model_net.load_state_dict(epoch_weights['image'])
#                 txt_model_net.load_state_dict(epoch_weights['text'])
#
#                 local_img_model = Local_Hierarchical_ImgUpdate(opt=opt, F_buffer=local_F_buffer_dict[idx], Y1=local_Y1_dict[idx], Y2=local_Y2_dict[idx],
#                                                  B=local_B_dict[idx], train_img=local_train_x,
#                                                  train_label=local_train_L)
#                 #img_w是更新后的模型参数，local_F_buffer是更新后的图像特征缓冲区
#                 img_w, local_F_buffer = local_img_model.update_weights_het(model=img_model_net)
#                 local_F_buffer_dict[idx] = local_F_buffer
#
#                 local_txt_model = Local_Hierarchical_TxtUpdate(opt=opt, G_buffer=local_G_buffer_dict[idx], Y1=local_Y1_dict[idx], Y2=local_Y2_dict[idx],
#                                                  B=local_B_dict[idx], train_txt=local_train_y,
#                                                  train_label=local_train_L)
#                 txt_w, local_G_buffer = local_txt_model.update_weights_het(model=txt_model_net)
#                 local_G_buffer_dict[idx] = local_G_buffer
#
#                 # update B
#                 local_B = torch.sign(opt.gamma * (local_F_buffer + local_G_buffer))
#                 local_B_dict[idx] = local_B
#
#                 # update Y
#                 local_Q1 = opt.bit * ( opt.alpha * (torch.matmul(local_F_buffer.t(), local_train_L1.to(torch.float32)) + torch.matmul(local_G_buffer.t(), local_train_L1.to(torch.float32)))
#                                      + opt.eta * torch.matmul(local_Y2_dict[idx].t(), S.to(torch.float32).t()))
#
#                 local_Q2 = opt.bit * ( opt.beta * (torch.matmul(local_F_buffer.t(), local_train_L2.to(torch.float32)) + torch.matmul(local_G_buffer.t(), local_train_L2.to(torch.float32)))
#                                       + opt.eta * torch.matmul(local_Y1_dict[idx].t(), S.to(torch.float32)))
#
#
#                 # 更新Y1
#                 for i in range(3):
#                     local_Y1_buffer = local_Y1_dict[idx]
#                     for k in range(opt.bit):
#                         local_sel_ind = np.setdiff1d([ii for ii in range(opt.bit)], k)
#                         local_Y1_ = local_Y1_dict[idx][:, local_sel_ind]
#                         local_y1k = local_Y1_dict[idx][:, k]
#                         local_Y2_ = local_Y2_dict[idx][:, local_sel_ind]
#                         local_y2k = local_Y2_dict[idx][:, k]
#                         local_Fk = local_F_buffer[:, k]
#                         local_F_ = local_F_buffer[:, local_sel_ind]
#                         local_Gk = local_G_buffer[:, k]
#                         local_G_ = local_G_buffer[:, local_sel_ind]
#
#                         y1 = torch.sign(
#                             local_Q1[k, :]
#                             - opt.eta * torch.matmul(local_Y1_, torch.matmul(local_Y2_.t(), local_y2k))
#                             - opt.alpha * (torch.matmul(local_Y1_, torch.matmul(local_F_.t(), local_Fk)) + torch.matmul(local_Y1_, torch.matmul(local_G_.t(), local_Gk)))
#                         )
#                         local_Y1_dict[idx][:, k] = y1
#                     if np.linalg.norm(
#                             local_Y1_dict[idx].cpu().numpy() - local_Y1_buffer.cpu().numpy()) < 1e-6 * np.linalg.norm(local_Y1_buffer.cpu().numpy()):
#                         break
#
#                 # 更新Y2
#                 for i in range(3):
#                     local_Y2_buffer = local_Y2_dict[idx]
#                     for k in range(opt.bit):
#                         local_sel_ind = np.setdiff1d([ii for ii in range(opt.bit)], k)
#                         local_Y1_ = local_Y1_dict[idx][:, local_sel_ind]
#                         local_y1k = local_Y1_dict[idx][:, k]
#                         local_Y2_ = local_Y2_dict[idx][:, local_sel_ind]
#                         local_y2k = local_Y2_dict[idx][:, k]
#                         local_Fk = local_F_buffer[:, k]
#                         local_F_ = local_F_buffer[:, local_sel_ind]
#                         local_Gk = local_G_buffer[:, k]
#                         local_G_ = local_G_buffer[:, local_sel_ind]
#
#                         y2 = torch.sign(
#                             local_Q2[k, :]
#                             - opt.eta * torch.matmul(local_Y2_, torch.matmul(local_Y1_.t(), local_y1k))
#                             - opt.beta * (torch.matmul(local_Y2_, torch.matmul(local_F_.t(), local_Fk)) + torch.matmul(local_Y2_, torch.matmul(local_G_.t(), local_Gk)))
#                         )
#                         local_Y2_dict[idx][:, k] = y2
#                     if np.linalg.norm(
#                             local_Y2_dict[idx].cpu().numpy() - local_Y2_buffer.cpu().numpy()) < 1e-6 * np.linalg.norm(local_Y2_buffer.cpu().numpy()):
#                         break
#
#                 #计算的公式2
#                 loss = calc_SHDCH_loss(local_B, local_F_buffer, local_G_buffer, local_Y1_dict[idx], local_Y2_dict[idx], S, local_train_L1, local_train_L2, opt)
#                 print(
#                     'Local loss...global round: %3d, client: %3d, local epoch: %3d, loss: %3.3f, comment: update B'
#                     % (round + 1, idx + 1, iter + 1, loss.item()))
#                 epoch_loss['total'].append(loss.item())
#                 epoch_weights['image'] = img_w
#                 epoch_weights['text'] = txt_w
#
#
#             # 计算这个客户端的损失
#             # epoch_loss['total'] = sum(epoch_loss['total']) / len(epoch_loss['total'])
#             epoch_loss['total'] = sum(epoch_loss['total'])
#
#             # 记录这个客户端的损失
#             local_losses.append(copy.deepcopy(epoch_loss['total']))
#
#             # calculate delta for local update
#             img_diff = OrderedDict({
#                 k: p1 - p0
#                 for (k, p1), p0 in zip(
#                     epoch_weights['image'].items(),
#                     img_frz_model_params.values(), )})
#             txt_diff = OrderedDict({
#                 k: p1 - p0
#                 for (k, p1), p0 in zip(
#                     epoch_weights['text'].items(),
#                     txt_frz_model_params.values(), )})
#
#             # update client model  使用加差异的方法更新客户端模型,这个就是wt
#             img_updated_params = []
#             for param, diff in zip(
#                     img_client_model_params_list[idx], img_diff.values()
#             ):
#                 img_updated_params.append((param + diff).detach())
#                 img_update_diff[idx].append(diff.detach())
#             # img_updated_diff就是wt-w0
#             img_client_model_params_list[idx] = img_updated_params
#             txt_updated_params = []
#             for param, diff in zip(
#                     txt_client_model_params_list[idx], txt_diff.values()
#             ):
#                 txt_updated_params.append((param + diff).detach())
#                 txt_update_diff[idx].append(diff.detach())
#             txt_client_model_params_list[idx] = txt_updated_params
#
#
#             del img_model_net, txt_model_net
#
#             gc.collect()
#
#         # calculate avg clients' losses as global  计算这个轮次的全局平均损失
#         loss_avg = sum(local_losses) / len(local_losses)
#         print('Global loss...global round: %3d, loss: %3.3f' % (round + 1, loss_avg))
#         result['loss'].append(float(loss_avg))
#
#         # 聚合全局权重Wt
#         img_global_model_params_list, txt_global_model_params_list = FedCMR_agg_model(img_client_model_params_list, txt_client_model_params_list, cilent_weights, local_losses, opt)
#
#         # 为每个客户端进行本地更新模型
#         for client in range(opt.num_users):
#             img_client_model_params_list[client] = FedCMR_local_update(img_update_diff[client], img_global_model_params_list, opt.q_beta)
#             txt_client_model_params_list[client] = FedCMR_local_update(txt_update_diff[client], txt_global_model_params_list, opt.q_beta)
#
#         print(f'\n | local update Round : {round + 1} |\n')
#         # 继续用剩余的20%数据进行本地迭代  并进行验证
#         for idx in idxs_users:
#             # 为当前客户端准备数据
#             idxs = user_groups[idx]
#             selected_idxs = int(len(idxs) * 0.8)
#             # pick up the certain client's training image, text and label dataset，继续用剩余的20%数据进行本地迭代
#             local_train_x = train_x[idxs][selected_idxs:]
#             local_train_y = train_y[idxs][selected_idxs:]
#             local_train_L = train_L[idxs][selected_idxs:]
#             # 第一层标签
#             local_train_L1 = local_train_L[:, 0:opt.num_class1]
#             # 第二层标签
#             local_train_L2 = local_train_L[:, opt.num_class1:]
#             if opt.use_gpu:
#                 local_train_x = local_train_x.cuda()
#                 local_train_y = local_train_y.cuda()
#                 local_train_L = local_train_L.cuda()
#                 local_train_L1 = local_train_L1.cuda()
#                 local_train_L2 = local_train_L2.cuda()
#
#             # 加载这个客户端的模型,也就是刚刚聚合的全局模型
#             client_img_parameters = OrderedDict(
#                 {
#                     k: p
#                     for k, p in zip(
#                     img_all_params_name,
#                     img_client_model_params_list[idx]
#                 )
#                 }
#             )
#             client_txt_parameters = OrderedDict(
#                 {
#                     k: p
#                     for k, p in zip(
#                     txt_all_params_name,
#                     txt_client_model_params_list[idx]
#                 )
#                 }
#             )
#             img_model_net = copy.deepcopy(img_model)
#             img_model_net.load_state_dict(client_img_parameters, strict=True)
#             txt_model_net = copy.deepcopy(txt_model)
#             txt_model_net.load_state_dict(client_txt_parameters, strict=True)
#
#
#             epoch_weights = {'image': img_model_net.state_dict(),
#                              'text': txt_model_net.state_dict()}
#             # the weights before train  # 保存当前模型参数W0，供后续计算参数变化
#             img_frz_model_params = OrderedDict({
#                 name: param.clone().detach().requires_grad_(param.requires_grad)
#                 for name, param in epoch_weights['image'].items()})
#             txt_frz_model_params = OrderedDict({
#                 name: param.clone().detach().requires_grad_(param.requires_grad)
#                 for name, param in epoch_weights['text'].items()})
#
#             # 用20%的数据进行训练
#             # for iter in range(opt.train_ep):
#             for iter in range(2):
#                 # update local weights every epoch
#                 img_model_net.load_state_dict(epoch_weights['image'])
#                 txt_model_net.load_state_dict(epoch_weights['text'])
#
#                 local_img_model = Local_Hierarchical_ImgUpdate(opt=opt, F_buffer=local_F_train_buffer_dict[idx],
#                                                                Y1=local_Y1_dict[idx], Y2=local_Y2_dict[idx],
#                                                                B=local_B_train_dict[idx], train_img=local_train_x,
#                                                                train_label=local_train_L)
#                 # img_w是更新后的模型参数，local_F_buffer是更新后的图像特征缓冲区
#                 img_w, local_F_buffer = local_img_model.update_weights_het(model=img_model_net)
#                 local_F_train_buffer_dict[idx] = local_F_buffer
#
#                 local_txt_model = Local_Hierarchical_TxtUpdate(opt=opt, G_buffer=local_G_train_buffer_dict[idx],
#                                                                Y1=local_Y1_dict[idx], Y2=local_Y2_dict[idx],
#                                                                B=local_B_train_dict[idx], train_txt=local_train_y,
#                                                                train_label=local_train_L)
#                 txt_w, local_G_buffer = local_txt_model.update_weights_het(model=txt_model_net)
#                 local_G_train_buffer_dict[idx] = local_G_buffer
#
#                 # update B
#                 local_B = torch.sign(opt.gamma * (local_F_buffer + local_G_buffer))
#                 local_B_train_dict[idx] = local_B
#
#                 # update Y
#                 local_Q1 = opt.bit * (opt.alpha * (
#                             torch.matmul(local_F_buffer.t(), local_train_L1.to(torch.float32)) + torch.matmul(
#                         local_G_buffer.t(), local_train_L1.to(torch.float32)))
#                                       + opt.eta * torch.matmul(local_Y2_dict[idx].t(), S.to(torch.float32).t()))
#
#                 local_Q2 = opt.bit * (opt.beta * (
#                             torch.matmul(local_F_buffer.t(), local_train_L2.to(torch.float32)) + torch.matmul(
#                         local_G_buffer.t(), local_train_L2.to(torch.float32)))
#                                       + opt.eta * torch.matmul(local_Y1_dict[idx].t(), S.to(torch.float32)))
#
#                 for i in range(3):
#                     local_Y1_buffer = local_Y1_dict[idx]
#                     for k in range(opt.bit):
#                         local_sel_ind = np.setdiff1d([ii for ii in range(opt.bit)], k)
#                         local_Y1_ = local_Y1_dict[idx][:, local_sel_ind]
#                         local_y1k = local_Y1_dict[idx][:, k]
#                         local_Y2_ = local_Y2_dict[idx][:, local_sel_ind]
#                         local_y2k = local_Y2_dict[idx][:, k]
#                         local_Fk = local_F_buffer[:, k]
#                         local_F_ = local_F_buffer[:, local_sel_ind]
#                         local_Gk = local_G_buffer[:, k]
#                         local_G_ = local_G_buffer[:, local_sel_ind]
#
#                         y1 = torch.sign(
#                             local_Q1[k, :]
#                             - opt.eta * torch.matmul(local_Y1_, torch.matmul(local_Y2_.t(), local_y2k))
#                             - opt.alpha * (torch.matmul(local_Y1_, torch.matmul(local_F_.t(), local_Fk)) + torch.matmul(
#                                 local_Y1_, torch.matmul(local_G_.t(), local_Gk)))
#                         )
#                         local_Y1_dict[idx][:, k] = y1
#                     if np.linalg.norm(
#                             local_Y1_dict[idx].cpu().numpy() - local_Y1_buffer.cpu().numpy()) < 1e-6 * np.linalg.norm(
#                         local_Y1_buffer.cpu().numpy()):
#                         break
#
#                 for i in range(3):
#                     local_Y2_buffer = local_Y2_dict[idx]
#                     for k in range(opt.bit):
#                         local_sel_ind = np.setdiff1d([ii for ii in range(opt.bit)], k)
#                         local_Y1_ = local_Y1_dict[idx][:, local_sel_ind]
#                         local_y1k = local_Y1_dict[idx][:, k]
#                         local_Y2_ = local_Y2_dict[idx][:, local_sel_ind]
#                         local_y2k = local_Y2_dict[idx][:, k]
#                         local_Fk = local_F_buffer[:, k]
#                         local_F_ = local_F_buffer[:, local_sel_ind]
#                         local_Gk = local_G_buffer[:, k]
#                         local_G_ = local_G_buffer[:, local_sel_ind]
#
#                         y2 = torch.sign(
#                             local_Q2[k, :]
#                             - opt.eta * torch.matmul(local_Y2_, torch.matmul(local_Y1_.t(), local_y1k))
#                             - opt.beta * (torch.matmul(local_Y2_, torch.matmul(local_F_.t(), local_Fk)) + torch.matmul(
#                                 local_Y2_, torch.matmul(local_G_.t(), local_Gk)))
#                         )
#                         local_Y2_dict[idx][:, k] = y2
#                     if np.linalg.norm(
#                             local_Y2_dict[idx].cpu().numpy() - local_Y2_buffer.cpu().numpy()) < 1e-6 * np.linalg.norm(
#                         local_Y2_buffer.cpu().numpy()):
#                         break
#
#                 # 记录更新后的参数
#                 epoch_weights['image'] = img_w
#                 epoch_weights['text'] = txt_w
#
#         # calculate delta for local update
#         img_diff = OrderedDict({
#             k: p1 - p0
#             for (k, p1), p0 in zip(
#                 epoch_weights['image'].items(),
#                 img_frz_model_params.values(), )})
#         txt_diff = OrderedDict({
#             k: p1 - p0
#             for (k, p1), p0 in zip(
#                 epoch_weights['text'].items(),
#                 txt_frz_model_params.values(), )})
#
#         # update client model  使用加差异的方法更新客户端模型,这个就是wt
#         img_updated_params = []
#         for param, diff in zip(
#                 img_client_model_params_list[idx], img_diff.values()
#         ):
#             img_updated_params.append((param + diff).detach())
#         img_client_model_params_list[idx] = img_updated_params
#         txt_updated_params = []
#         for param, diff in zip(
#                 txt_client_model_params_list[idx], txt_diff.values()
#         ):
#             txt_updated_params.append((param + diff).detach())
#         txt_client_model_params_list[idx] = txt_updated_params
#
#         print('Local update...global round: %3d, client: %3d,  comment: update W' % (round + 1, idx + 1,))
#         del img_model_net, txt_model_net
#
#         gc.collect()
#
#         # valid  每5个通讯轮次进行一次评估
#         if (round + 1) % 5 == 0 or round == 0:
#             if opt.valid:
#                 for idx in range(opt.num_users):
#                     img_parameters = OrderedDict(
#                         {
#                             k: p
#                             for k, p in zip(
#                             img_all_params_name,
#                             img_client_model_params_list[idx]
#                         )
#                         }
#                     )
#                     txt_parameters = OrderedDict(
#                         {
#                             k: p
#                             for k, p in zip(
#                             txt_all_params_name,
#                             txt_client_model_params_list[idx]
#                         )
#                         }
#                     )
#                     img_model.load_state_dict(img_parameters)
#                     txt_model.load_state_dict(txt_parameters)
#                     mapi2t, mapt2i = valid(opt, img_model, txt_model, query_x, retrieval_x, query_y, retrieval_y,
#                                            query_L, retrieval_L)
#                     print('...round: %3d, cilent: %3d, valid MAP: MAP(i->t): %3.4f, MAP(t->i): %3.4f' % (
#                     round + 1, idx + 1, mapi2t, mapt2i))
#                     result['mapi2t'][idx].append(mapi2t.item())
#                     result['mapt2i'][idx].append(mapt2i.item())
#                     if mapt2i >= max_mapt2i[idx] and mapi2t >= max_mapi2t[idx]:
#                         max_mapi2t[idx] = mapi2t.item()
#                         max_mapt2i[idx] = mapt2i.item()
#                         img_model.save(recode_path)
#                         txt_model.save(recode_path)
#
#
#     print('...training procedure finish')
#     if opt.valid:
#         max_mapi2t_avg = sum(max_mapi2t) / len(max_mapi2t)
#         max_mapt2i_avg = sum(max_mapt2i) / len(max_mapt2i)
#         print('   max MAP: MAP(i->t): %3.4f, MAP(t->i): %3.4f' % (max_mapi2t_avg, max_mapt2i_avg))
#         result['max_mapi2t_list'] = max_mapi2t
#         result['max_mapt2i_list'] = max_mapt2i
#         result['max_mapi2t'] = max_mapi2t_avg
#         result['max_mapt2i'] = max_mapt2i_avg
#     else:
#         mapi2t, mapt2i = valid(opt,img_model, txt_model, query_x, retrieval_x, query_y, retrieval_y,
#                                query_L, retrieval_L)
#         print('   max MAP: MAP(i->t): %3.4f, MAP(t->i): %3.4f' % (mapi2t, mapt2i))
#         result['max_mapi2t'] = mapi2t
#         result['max_mapt2i'] = mapt2i
#
#     write_result(recode_path,result)


def FedCMR_taskheter(opt, user_groups, img_model, txt_model, img_client_model_params_list, txt_client_model_params_list, img_all_params_name, txt_all_params_name, recode_path):
    #加载层次数据
    images, tags, labels = load_Hierarchical_data(opt.dataset, opt.data_path)
    #创建标签的关联矩阵，如果某个第一层标签i是某个第二层标签j的父标签，则S[i][j] = 1
    S = torch.zeros(opt.num_class1, opt.num_class2)
    index1 = 0
    index2 = 0
    for i in range(0, len(labels)):
        for j in range(0, len(labels[0])):
            if (labels[i][j] == 1 and j < opt.num_class1):
                index1 = j
            elif labels[i][j] == 1 and j >= opt.num_class1:
                index2 = j
        S[index1][index2 - opt.num_class1] = 1
    if opt.use_gpu:
        S = S.cuda()

    X, Y, L = split_data(opt, images, tags, labels)

    train_L = torch.from_numpy(L['train'])
    train_x = torch.from_numpy(X['train'])
    train_y = torch.from_numpy(Y['train'])

    query_L = torch.from_numpy(L['query'])[:,opt.num_class1:] #使用第二层的标签进行评估
    query_x = torch.from_numpy(X['query'])
    query_y = torch.from_numpy(Y['query'])

    retrieval_L = torch.from_numpy(L['retrieval'])[:,opt.num_class1:] #使用第二层的标签进行评估
    retrieval_x = torch.from_numpy(X['retrieval'])
    retrieval_y = torch.from_numpy(Y['retrieval'])




    #结果记录
    result = {
        'loss': [],
        'mapi2t': [[] for idx in range(opt.num_users)],
        'mapt2i': [[] for idx in range(opt.num_users)]
    }

    max_mapi2t = [0. for idx in range(opt.num_users)]
    max_mapt2i = [0. for idx in range(opt.num_users)]

    # 获得客户端索引
    idxs_users = np.arange(opt.num_users)

    # 初始化客户端的缓冲区：F_buffer, G_buffer, Y_buffer, B（B是从F和G计算得到的）  先计算80%的数据
    local_F_buffer_dict, local_G_buffer_dict, local_Y1_dict, local_Y2_dict, local_B_dict, local_F_train_buffer_dict , local_G_train_buffer_dict, local_B_train_dict= {}, {}, {}, {}, {}, {}, {}, {}
    for i in range(opt.num_users):
        num_train = len(user_groups[i])  # 获取每个客户端的训练样本数
        F_buffer = torch.randn(num_train, opt.bit)  # 随机初始化图像特征缓冲区
        G_buffer = torch.randn(num_train, opt.bit)  # 随机初始化文本特征缓冲区
        Y1_buffer = torch.sign(torch.randn(opt.num_class1, opt.bit))  # 随机初始化第一层类标签的哈希码
        Y2_buffer = torch.sign(torch.randn(opt.num_class2, opt.bit))  # 随机初始化第一层类标签的哈希码

        if opt.use_gpu:
            F_buffer = F_buffer.cuda()
            G_buffer = G_buffer.cuda()
            Y1_buffer = Y1_buffer.cuda()
            Y2_buffer = Y2_buffer.cuda()

        # 计算 B，B 是 F 和 G 的和的符号（sign）作为初始的哈希码
        B = torch.sign(F_buffer + G_buffer)
        # 将 F, G, Y1, Y2, B 存入字典中
        local_F_buffer_dict[i] = F_buffer
        local_G_buffer_dict[i] = G_buffer
        local_Y1_dict[i] = Y1_buffer
        local_Y2_dict[i] = Y2_buffer
        local_B_dict[i] = B


    #计算每个客户端的权重，这是文章中的Sk，对于每个客户端来说是固定的
    cilent_weights = cacl_FedCMR_weight(opt, user_groups, train_L[:,opt.num_class1:])


    for round in range(opt.max_epoch):
        #初始化损失
        local_losses = []
        print(f'\n | Global Training Round : {round + 1} |\n')
        print(f'\n | local Training Round : {round + 1} |\n')
        # 记录更新差异，方便后续计算
        img_update_diff = [[] for _ in range(opt.num_users)]
        txt_update_diff = [[] for _ in range(opt.num_users)]
        for idx in idxs_users:
            # 为当前客户端准备数据
            idxs = user_groups[idx]
            # pick up the certain client's training image, text and label dataset，先使用80%的数据进行训练，文章是随机采取，这里已经随机过了，所以使用前80%的数据
            local_train_x = train_x[idxs]
            local_train_y = train_y[idxs]
            local_train_L = train_L[idxs]
            #第一层标签
            local_train_L1 = local_train_L[:,0:opt.num_class1]
            #第二层标签
            local_train_L2 = local_train_L[:,opt.num_class1:]
            if opt.use_gpu:
                local_train_x = local_train_x.cuda()
                local_train_y = local_train_y.cuda()
                local_train_L = local_train_L.cuda()
                local_train_L1 = local_train_L1.cuda()
                local_train_L2 = local_train_L2.cuda()


            # 加载这个客户端的模型,也就是 W0
            client_img_parameters = OrderedDict(
                {
                    k: p
                    for k, p in zip(
                    img_all_params_name,
                    img_client_model_params_list[idx]
                )
                }
            )
            client_txt_parameters = OrderedDict(
                {
                    k: p
                    for k, p in zip(
                    txt_all_params_name,
                    txt_client_model_params_list[idx]
                )
                }
            )
            img_model_net = copy.deepcopy(img_model)
            img_model_net.load_state_dict(client_img_parameters, strict=True)
            txt_model_net = copy.deepcopy(txt_model)
            txt_model_net.load_state_dict(client_txt_parameters, strict=True)

            epoch_loss = {'total': []}
            epoch_weights = {'image': img_model_net.state_dict(),
                             'text': txt_model_net.state_dict()}
            # the weights before train  # 保存当前模型参数W0，供后续计算参数变化
            img_frz_model_params = OrderedDict({
                name: param.clone().detach().requires_grad_(param.requires_grad)
                for name, param in epoch_weights['image'].items()})
            txt_frz_model_params = OrderedDict({
                name: param.clone().detach().requires_grad_(param.requires_grad)
                for name, param in epoch_weights['text'].items()})


            #进行训练
            for iter in range(opt.train_ep):
                # update local weights every epoch
                img_model_net.load_state_dict(epoch_weights['image'])
                txt_model_net.load_state_dict(epoch_weights['text'])

                local_img_model = Local_Hierarchical_ImgUpdate(opt=opt, F_buffer=local_F_buffer_dict[idx], Y1=local_Y1_dict[idx], Y2=local_Y2_dict[idx],
                                                 B=local_B_dict[idx], train_img=local_train_x,
                                                 train_label=local_train_L)
                #img_w是更新后的模型参数，local_F_buffer是更新后的图像特征缓冲区
                img_w, local_F_buffer = local_img_model.update_weights_het(model=img_model_net)
                local_F_buffer_dict[idx] = local_F_buffer

                local_txt_model = Local_Hierarchical_TxtUpdate(opt=opt, G_buffer=local_G_buffer_dict[idx], Y1=local_Y1_dict[idx], Y2=local_Y2_dict[idx],
                                                 B=local_B_dict[idx], train_txt=local_train_y,
                                                 train_label=local_train_L)
                txt_w, local_G_buffer = local_txt_model.update_weights_het(model=txt_model_net)
                local_G_buffer_dict[idx] = local_G_buffer

                # update B
                local_B = torch.sign(opt.gamma * (local_F_buffer + local_G_buffer))
                local_B_dict[idx] = local_B

                # update Y
                local_Q1 = opt.bit * ( opt.alpha * (torch.matmul(local_F_buffer.t(), local_train_L1.to(torch.float32)) + torch.matmul(local_G_buffer.t(), local_train_L1.to(torch.float32)))
                                     + opt.eta * torch.matmul(local_Y2_dict[idx].t(), S.to(torch.float32).t()))

                local_Q2 = opt.bit * ( opt.beta * (torch.matmul(local_F_buffer.t(), local_train_L2.to(torch.float32)) + torch.matmul(local_G_buffer.t(), local_train_L2.to(torch.float32)))
                                      + opt.eta * torch.matmul(local_Y1_dict[idx].t(), S.to(torch.float32)))


                # 更新Y1
                for i in range(3):
                    local_Y1_buffer = local_Y1_dict[idx]
                    for k in range(opt.bit):
                        local_sel_ind = np.setdiff1d([ii for ii in range(opt.bit)], k)
                        local_Y1_ = local_Y1_dict[idx][:, local_sel_ind]
                        local_y1k = local_Y1_dict[idx][:, k]
                        local_Y2_ = local_Y2_dict[idx][:, local_sel_ind]
                        local_y2k = local_Y2_dict[idx][:, k]
                        local_Fk = local_F_buffer[:, k]
                        local_F_ = local_F_buffer[:, local_sel_ind]
                        local_Gk = local_G_buffer[:, k]
                        local_G_ = local_G_buffer[:, local_sel_ind]

                        y1 = torch.sign(
                            local_Q1[k, :]
                            - opt.eta * torch.matmul(local_Y1_, torch.matmul(local_Y2_.t(), local_y2k))
                            - opt.alpha * (torch.matmul(local_Y1_, torch.matmul(local_F_.t(), local_Fk)) + torch.matmul(local_Y1_, torch.matmul(local_G_.t(), local_Gk)))
                        )
                        local_Y1_dict[idx][:, k] = y1
                    if np.linalg.norm(
                            local_Y1_dict[idx].cpu().numpy() - local_Y1_buffer.cpu().numpy()) < 1e-6 * np.linalg.norm(local_Y1_buffer.cpu().numpy()):
                        break

                # 更新Y2
                for i in range(3):
                    local_Y2_buffer = local_Y2_dict[idx]
                    for k in range(opt.bit):
                        local_sel_ind = np.setdiff1d([ii for ii in range(opt.bit)], k)
                        local_Y1_ = local_Y1_dict[idx][:, local_sel_ind]
                        local_y1k = local_Y1_dict[idx][:, k]
                        local_Y2_ = local_Y2_dict[idx][:, local_sel_ind]
                        local_y2k = local_Y2_dict[idx][:, k]
                        local_Fk = local_F_buffer[:, k]
                        local_F_ = local_F_buffer[:, local_sel_ind]
                        local_Gk = local_G_buffer[:, k]
                        local_G_ = local_G_buffer[:, local_sel_ind]

                        y2 = torch.sign(
                            local_Q2[k, :]
                            - opt.eta * torch.matmul(local_Y2_, torch.matmul(local_Y1_.t(), local_y1k))
                            - opt.beta * (torch.matmul(local_Y2_, torch.matmul(local_F_.t(), local_Fk)) + torch.matmul(local_Y2_, torch.matmul(local_G_.t(), local_Gk)))
                        )
                        local_Y2_dict[idx][:, k] = y2
                    if np.linalg.norm(
                            local_Y2_dict[idx].cpu().numpy() - local_Y2_buffer.cpu().numpy()) < 1e-6 * np.linalg.norm(local_Y2_buffer.cpu().numpy()):
                        break

                #计算的公式2
                loss = calc_SHDCH_loss(local_B, local_F_buffer, local_G_buffer, local_Y1_dict[idx], local_Y2_dict[idx], S, local_train_L1, local_train_L2, opt)
                print(
                    'Local loss...global round: %3d, client: %3d, local epoch: %3d, loss: %3.3f, comment: update B'
                    % (round + 1, idx + 1, iter + 1, loss.item()))
                epoch_loss['total'].append(loss.item())
                epoch_weights['image'] = img_w
                epoch_weights['text'] = txt_w


            # 计算这个客户端的损失
            # epoch_loss['total'] = sum(epoch_loss['total']) / len(epoch_loss['total'])
            epoch_loss['total'] = sum(epoch_loss['total'])

            # 记录这个客户端的损失
            local_losses.append(copy.deepcopy(epoch_loss['total']))

            # calculate delta for local update
            img_diff = OrderedDict({
                k: p1 - p0
                for (k, p1), p0 in zip(
                    epoch_weights['image'].items(),
                    img_frz_model_params.values(), )})
            txt_diff = OrderedDict({
                k: p1 - p0
                for (k, p1), p0 in zip(
                    epoch_weights['text'].items(),
                    txt_frz_model_params.values(), )})

            # update client model  使用加差异的方法更新客户端模型,这个就是wt
            img_updated_params = []
            for param, diff in zip(
                    img_client_model_params_list[idx], img_diff.values()
            ):
                img_updated_params.append((param + diff).detach())
                img_update_diff[idx].append(diff.detach())
            # img_updated_diff就是wt-w0
            img_client_model_params_list[idx] = img_updated_params
            txt_updated_params = []
            for param, diff in zip(
                    txt_client_model_params_list[idx], txt_diff.values()
            ):
                txt_updated_params.append((param + diff).detach())
                txt_update_diff[idx].append(diff.detach())
            txt_client_model_params_list[idx] = txt_updated_params


            del img_model_net, txt_model_net

            gc.collect()

        # calculate avg clients' losses as global  计算这个轮次的全局平均损失
        loss_avg = sum(local_losses) / len(local_losses)
        print('Global loss...global round: %3d, loss: %3.3f' % (round + 1, loss_avg))
        result['loss'].append(float(loss_avg))

        # 聚合全局权重Wt
        img_global_model_params_list, txt_global_model_params_list = FedCMR_agg_model(img_client_model_params_list, txt_client_model_params_list, cilent_weights, local_losses, opt)

        # 为每个客户端进行本地更新模型
        for client in range(opt.num_users):
            img_client_model_params_list[client] = FedCMR_local_update(img_update_diff[client], img_global_model_params_list, opt.q_beta)
            txt_client_model_params_list[client] = FedCMR_local_update(txt_update_diff[client], txt_global_model_params_list, opt.q_beta)

        # valid  每5个通讯轮次进行一次评估
        if (round + 1) % 5 == 0 or round == 0:
            if opt.valid:
                for idx in range(opt.num_users):
                    img_parameters = OrderedDict(
                        {
                            k: p
                            for k, p in zip(
                            img_all_params_name,
                            img_client_model_params_list[idx]
                        )
                        }
                    )
                    txt_parameters = OrderedDict(
                        {
                            k: p
                            for k, p in zip(
                            txt_all_params_name,
                            txt_client_model_params_list[idx]
                        )
                        }
                    )
                    img_model.load_state_dict(img_parameters)
                    txt_model.load_state_dict(txt_parameters)
                    mapi2t, mapt2i = valid(opt, img_model, txt_model, query_x, retrieval_x, query_y, retrieval_y,
                                           query_L, retrieval_L)
                    print('...round: %3d, cilent: %3d, valid MAP: MAP(i->t): %3.4f, MAP(t->i): %3.4f' % (
                        round + 1, idx + 1, mapi2t, mapt2i))
                    result['mapi2t'][idx].append(mapi2t.item())
                    result['mapt2i'][idx].append(mapt2i.item())
                    if mapt2i >= max_mapt2i[idx] and mapi2t >= max_mapi2t[idx]:
                        max_mapi2t[idx] = mapi2t.item()
                        max_mapt2i[idx] = mapt2i.item()
                        img_model.save(recode_path)
                        txt_model.save(recode_path)




    print('...training procedure finish')
    if opt.valid:
        max_mapi2t_avg = sum(max_mapi2t) / len(max_mapi2t)
        max_mapt2i_avg = sum(max_mapt2i) / len(max_mapt2i)
        print('   max MAP: MAP(i->t): %3.4f, MAP(t->i): %3.4f' % (max_mapi2t_avg, max_mapt2i_avg))
        result['max_mapi2t_list'] = max_mapi2t
        result['max_mapt2i_list'] = max_mapt2i
        result['max_mapi2t'] = max_mapi2t_avg
        result['max_mapt2i'] = max_mapt2i_avg
    else:
        mapi2t, mapt2i = valid(opt,img_model, txt_model, query_x, retrieval_x, query_y, retrieval_y,
                               query_L, retrieval_L)
        print('   max MAP: MAP(i->t): %3.4f, MAP(t->i): %3.4f' % (mapi2t, mapt2i))
        result['max_mapi2t'] = mapi2t
        result['max_mapt2i'] = mapt2i

    write_result(recode_path,result)