import os
import datetime
import sys
from config import DefaultConfig
import numpy as np
import torch


from datasets import Priv_NAMES as DATASET_NAMES
from datasets import get_prive_dataset
from data_handler import *



class Logger(object):
    def __init__(self, filename='default.txt', stream=sys.stdout):
        # 确保目录存在
        os.makedirs(os.path.dirname(filename), exist_ok=True)

        self.terminal = stream
        self.log = open(filename, 'w')

    def write(self, message):
        self.terminal.write(message)
        self.log.write(message)

    def flush(self):
        pass


def train(method,dataset,**kwargs):
    # 记录训练开始时间
    start_time = datetime.datetime.now()
    opt = DefaultConfig(method,dataset,**kwargs).opt

    # 设置CUDA设备
    if opt.use_gpu:
        torch.cuda.set_device(opt.gpu)
        torch.cuda.manual_seed(opt.seed)
        torch.manual_seed(opt.seed)
    else:
        torch.manual_seed(opt.seed)
    np.random.seed(opt.seed)

    priv_dataset = get_prive_dataset(opt) #得到FedLeaDigits类
    pri_train_loaders, test_loaders = load_data(priv_dataset, opt)

    # if opt.Dirichlet:
    #     Dirichlet_Beta = f'Dirichlet_{opt.Beta:.1f}'
    #     if opt.centralized:
    #         recode_path = os.path.join('.', opt.result_dir, 'centralized', opt.method, opt.datasets, str(opt.bit) + 'bit', Dirichlet_Beta)
    #     else:
    #         recode_path = os.path.join('.', opt.result_dir, opt.method, opt.datasets, str(opt.bit) + 'bit' , Dirichlet_Beta)
    #     if opt.method == 'PT_FUCH':
    #         X, Y, L, user_group = get_Dirichlet_dataset(opt, opt.Beta, vgg=True, pretrain_model_path=opt.pretrain_model_path)
    #         user_groups = {key: value.astype(int) for key, value in user_group.items()}
    #     else:
    #         X, Y, L, user_groups = get_Dirichlet_dataset(opt, opt.Beta)



    # sys.stdout = Logger(os.path.join(recode_path, 'train_log.txt'), sys.stdout)

    # 打印配置信息
    print(opt)


    # y_dim = Y['train'].shape[1] # 获取标签的维度
    print('...loading and splitting data finish')

    # # 加载预训练的模型
    # pretrain_model = load_pretrain_model(opt.pretrain_model_path)
    #
    #
    # # layer-wise  为每个客户端创建空的参数列表
    # img_client_model_params_list = []
    # txt_client_model_params_list = []
    #
    # # build models for clients and save weights in local  创建客户端的图像模型和文本模型
    # if opt.method == 'FedCAFE':
    #     # 创建全局模型
    #     num_prototypes = L['train'].shape[1] #原型的数量是数据集中类别(最细粒度)的数量
    #     server_img_model = CAFE_ImgModule(opt.bit, pretrain_model, num_prototypes)
    #     server_txt_model= CAFE_TxtModule(y_dim, opt.bit, num_prototypes)
    #     #server_img_model = AE_ImgModule(opt.bit, pretrain_model)
    #     #server_txt_model = AE_TxtModule(y_dim, opt.bit)
    #     if opt.use_gpu:
    #         server_img_model = server_img_model.cuda()
    #         server_txt_model = server_txt_model.cuda()
    #     #初始化客户端模型
    #     models_img, models_txt = [], []
    #     for client in range(opt.num_users):
    #         model_img = copy.deepcopy(server_img_model)
    #         model_txt = copy.deepcopy(server_txt_model)
    #         models_img.append(model_img)
    #         models_txt.append(model_txt)
    #
    #     del X, Y, L  # 清除非层次数据在内存中的空间
    #     FedCAFE_taskheter(opt, user_groups, models_img, models_txt, server_img_model, server_txt_model, recode_path)
    # elif opt.method == 'CAFE_Noise':
    #     # 创建全局模型
    #     num_prototypes = L['train'].shape[1]  # 原型的数量是数据集中类别(最细粒度)的数量
    #     server_img_model = CAFE_ImgModule(opt.bit, pretrain_model, num_prototypes)
    #     server_txt_model = CAFE_TxtModule(y_dim, opt.bit, num_prototypes)
    #     if opt.use_gpu:
    #         server_img_model = server_img_model.cuda()
    #         server_txt_model = server_txt_model.cuda()
    #     # 初始化客户端模型
    #     models_img, models_txt = [], []
    #     for client in range(opt.num_users):
    #         model_img = copy.deepcopy(server_img_model)
    #         model_txt = copy.deepcopy(server_txt_model)
    #         models_img.append(model_img)
    #         models_txt.append(model_txt)
    #
    #     del X, Y, L  # 清除非层次数据在内存中的空间
    #     CAFE_Noise_taskheter(opt, user_groups, models_img, models_txt, server_img_model, server_txt_model, recode_path)
    #
    # elif opt.method == 'CAFE_TGP':
    #     # 创建全局模型
    #     num_prototypes = L['train'].shape[1]  # 原型的数量是数据集中类别(最细粒度)的数量
    #     server_img_model = CAFE_ImgModule(opt.bit, pretrain_model, num_prototypes)
    #     server_txt_model = CAFE_TxtModule(y_dim, opt.bit, num_prototypes)
    #     if opt.use_gpu:
    #         server_img_model = server_img_model.cuda()
    #         server_txt_model = server_txt_model.cuda()
    #     # 初始化客户端模型
    #     models_img, models_txt = [], []
    #     for client in range(opt.num_users):
    #         model_img = copy.deepcopy(server_img_model)
    #         model_txt = copy.deepcopy(server_txt_model)
    #         models_img.append(model_img)
    #         models_txt.append(model_txt)
    #
    #     del X, Y, L  # 清除非层次数据在内存中的空间
    #     CAFE_TGP_taskheter(opt, user_groups, models_img, models_txt, server_img_model, server_txt_model, recode_path)
    # elif opt.method == 'CAFE_fuse':
    #     # 创建全局模型
    #     num_prototypes = L['train'].shape[1]  # 原型的数量是数据集中类别(最细粒度)的数量
    #     img_model = CAFE_ImgModule(opt.bit, pretrain_model, num_prototypes)
    #     txt_model = CAFE_TxtModule(y_dim, opt.bit, num_prototypes)
    #     # img_model = GE_ImgModule(opt.bit, pretrain_model, num_prototypes)
    #     # txt_model = GE_TxtModule(y_dim, opt.bit, num_prototypes)
    #     if opt.use_gpu:
    #         img_model = img_model.cuda()
    #         txt_model = txt_model.cuda()
    #     # 获取图像模型和文本模型的所有参数名称
    #     img_all_params_name = [name for name in img_model.state_dict().keys()]
    #     txt_all_params_name = [name for name in txt_model.state_dict().keys()]
    #
    #     # 为每个客户端初始化模型参数，并将参数保存到客户端模型参数列表
    #     for client in range(opt.num_users):
    #         img_client_model_params_list.append(list(img_model.state_dict().values()))
    #         txt_client_model_params_list.append(list(txt_model.state_dict().values()))
    #
    #     del X, Y, L  # 清除非层次数据在内存中的空间
    #     CAFE_fuse_taskheter(opt, user_groups, img_model, txt_model,
    #                       img_client_model_params_list, txt_client_model_params_list,
    #                       img_all_params_name, txt_all_params_name, recode_path)
    #
    # elif opt.method == 'AEPFCMH':
    #     # 创建全局模型
    #     img_model = AE_ImgModule(opt.bit, pretrain_model)
    #     txt_model = AE_TxtModule(y_dim, opt.bit)
    #     if opt.use_gpu:
    #         img_model = img_model.cuda()
    #         txt_model = txt_model.cuda()
    #     # 获取图像模型和文本模型的所有参数名称
    #     img_all_params_name = [name for name in img_model.state_dict().keys()]
    #     txt_all_params_name = [name for name in txt_model.state_dict().keys()]
    #
    #     # 为每个客户端初始化模型参数，并将参数保存到客户端模型参数列表
    #     for client in range(opt.num_users):
    #         img_client_model_params_list.append(list(img_model.state_dict().values()))
    #         txt_client_model_params_list.append(list(txt_model.state_dict().values()))
    #
    #     del X, Y, L  # 清除非层次数据在内存中的空间
    #     AEPFCMH_taskheter(opt, user_groups, img_model, txt_model,
    #                      img_client_model_params_list, txt_client_model_params_list,
    #                      img_all_params_name, txt_all_params_name, recode_path)
    #
    # elif opt.method == 'AE_fuse':
    #     # 创建全局模型
    #     num_prototypes = L['train'].shape[1]  # 原型的数量是数据集中类别(最细粒度)的数量
    #     img_model = AE_ImgModule(opt.bit, pretrain_model)
    #     txt_model = AE_TxtModule(y_dim, opt.bit)
    #     if opt.use_gpu:
    #         img_model = img_model.cuda()
    #         txt_model = txt_model.cuda()
    #     # 获取图像模型和文本模型的所有参数名称
    #     img_all_params_name = [name for name in img_model.state_dict().keys()]
    #     txt_all_params_name = [name for name in txt_model.state_dict().keys()]
    #
    #     # 为每个客户端初始化模型参数，并将参数保存到客户端模型参数列表
    #     for client in range(opt.num_users):
    #         img_client_model_params_list.append(list(img_model.state_dict().values()))
    #         txt_client_model_params_list.append(list(txt_model.state_dict().values()))
    #
    #     del X, Y, L  # 清除非层次数据在内存中的空间
    #     AE_fuse_taskheter(opt, user_groups, img_model, txt_model,
    #                      img_client_model_params_list, txt_client_model_params_list,
    #                      img_all_params_name, txt_all_params_name, recode_path)
    #
    # elif opt.method == 'PT_FUCH':
    #     # 初始化客户端模型
    #     private_img_model_list = []
    #     private_txt_model_list = []
    #     for client in range(opt.num_users):
    #         private_model_img = PT_ImgModule(y_dim=4096, bit=opt.bit, hiden_layer=opt.num_hiden_layers[0])
    #         private_model_txt = PT_TxtModule(y_dim=y_dim, bit=opt.bit, hiden_layer=opt.num_hiden_layers[1])
    #
    #         private_img_model_list.append(private_model_img)
    #         private_txt_model_list.append(private_model_txt)
    #
    #     # 初始化全局模型
    #     global_model_img = PT_ImgModule(y_dim=4096, bit=opt.bit, hiden_layer=opt.num_hiden_layers[0])
    #     global_model_txt = PT_TxtModule(y_dim=y_dim, bit=opt.bit, hiden_layer=opt.num_hiden_layers[1])
    #
    #     PT_FUCH_taskheter(opt, X, Y, L, user_groups, private_img_model_list, private_txt_model_list, global_model_img, global_model_txt, recode_path)
    #
    # else:
    #     img_model = ImgModule(opt.bit, pretrain_model)
    #     txt_model = TxtModule(y_dim, opt.bit)
    #     if opt.use_gpu:
    #         img_model = img_model.cuda()
    #         txt_model = txt_model.cuda()
    #
    #     # 获取图像模型和文本模型的所有参数名称
    #     img_all_params_name = [name for name in img_model.state_dict().keys()]
    #     txt_all_params_name = [name for name in txt_model.state_dict().keys()]
    #
    #     # 为每个客户端初始化模型参数，并将参数保存到客户端模型参数列表
    #     for client in range(opt.num_users):
    #         img_client_model_params_list.append(list(img_model.state_dict().values()))
    #         txt_client_model_params_list.append(list(txt_model.state_dict().values()))
    #
    #     if opt.method == 'PLFedCMH':
    #         # 调用 FedProto_taskheter 方法进行联邦训练
    #         PLFedCMH_taskheter(opt, X, Y, L, user_groups, img_model, txt_model,
    #                             img_client_model_params_list, txt_client_model_params_list,
    #                            img_all_params_name, txt_all_params_name,recode_path)
    #     elif opt.method == 'FedProto1':
    #         del X, Y, L  # 清除非层次数据在内存中的空间
    #         FedProto1_taskheter(opt, user_groups, img_model, txt_model,
    #                            img_client_model_params_list, txt_client_model_params_list,
    #                            img_all_params_name, txt_all_params_name, recode_path)
    #     elif opt.method == 'FedAvg':
    #         del X, Y, L #清除非层次数据在内存中的空间
    #         FedAvg_taskheter(opt, user_groups, img_model, txt_model,
    #                          img_client_model_params_list, txt_client_model_params_list,
    #                          img_all_params_name, txt_all_params_name, recode_path)
    #     elif opt.method == 'FedProx':
    #         del X, Y, L  # 清除非层次数据在内存中的空间
    #         FedProx_taskheter(opt, user_groups, img_model, txt_model,
    #                          img_client_model_params_list, txt_client_model_params_list,
    #                          img_all_params_name, txt_all_params_name, recode_path)
    #     elif opt.method == 'FedCMR':
    #         del X, Y, L  # 清除非层次数据在内存中的空间
    #         # # 定义全局模型
    #         FedCMR_taskheter(opt, user_groups, img_model, txt_model,
    #                          img_client_model_params_list, txt_client_model_params_list,
    #                          img_all_params_name, txt_all_params_name, recode_path)
    #     elif opt.method == 'FedProto':
    #         del X, Y, L # 清除非层次数据在内存中的空间
    #         FedProto_taskheter(opt, user_groups, img_model, txt_model,
    #                            img_client_model_params_list, txt_client_model_params_list,
    #                            img_all_params_name, txt_all_params_name, recode_path)
    #     elif opt.method == 'FedTGP':
    #         del X, Y, L # 清除非层次数据在内存中的空间
    #         FedTGP_taskheter(opt, user_groups, img_model, txt_model,
    #                            img_client_model_params_list, txt_client_model_params_list,
    #                            img_all_params_name, txt_all_params_name, recode_path)
    #     elif opt.method == 'FedTGP1':
    #         del X, Y, L  # 清除非层次数据在内存中的空间
    #         FedTGP1_taskheter(opt, user_groups, img_model, txt_model,
    #                          img_client_model_params_list, txt_client_model_params_list,
    #                          img_all_params_name, txt_all_params_name, recode_path)


    # 记录程序结束时间并计算总运行时长
    end_time = datetime.datetime.now()
    process_time = end_time - start_time
    print('The program runs ', process_time.seconds, 'seconds')
    sys.stdout = sys.stdout.terminal  # Reset to original stdout





# def test(**kwargs):
#     opt = DefaultConfig(**kwargs).opt
#
#     images, tags, labels = load_data(opt.datasets, opt.num_class2, opt.data_path)
#     y_dim = tags.shape[1]
#
#     X, Y, L = split_data(opt, images, tags, labels)
#     print('...loading and splitting data finish')
#
#     img_model = ImgModule(opt.bit)
#     txt_model = TxtModule(y_dim, opt.bit)
#
#     if opt.load_img_path:
#         img_model.load(opt.load_img_path)
#
#     if opt.load_txt_path:
#         txt_model.load(opt.load_txt_path)
#
#     if opt.use_gpu:
#         img_model = img_model.cuda()
#         txt_model = txt_model.cuda()
#
#     query_L = torch.from_numpy(L['query'])
#     query_x = torch.from_numpy(X['query'])
#     query_y = torch.from_numpy(Y['query'])
#
#     retrieval_L = torch.from_numpy(L['retrieval'])
#     retrieval_x = torch.from_numpy(X['retrieval'])
#     retrieval_y = torch.from_numpy(Y['retrieval'])
#
#     qBX = generate_image_code(opt,img_model, query_x, opt.bit)
#     qBY = generate_text_code(opt,txt_model, query_y, opt.bit)
#     rBX = generate_image_code(opt,img_model, retrieval_x, opt.bit)
#     rBY = generate_text_code(opt,txt_model, retrieval_y, opt.bit)
#
#     if opt.use_gpu:
#         query_L = query_L.cuda()
#         retrieval_L = retrieval_L.cuda()
#
#     mapi2t = calc_map_k(qBX, rBY, query_L, retrieval_L)
#     mapt2i = calc_map_k(qBY, rBX, query_L, retrieval_L)
#     print('...test MAP: MAP(i->t): %3.3f, MAP(t->i): %3.3f' % (mapi2t, mapt2i))






if __name__ == '__main__':
    train(method='FedAvg', dataset='Digits', dirichlet=True, bit=32, Beta=0.5)




