from config import DefaultConfig
from data_handler import *
import numpy as np
import torch
from models import ImgModule, TxtModule
from utils import get_dataset, calc_map_k, split_data, generate_text_code, generate_image_code
from comparison import PLFedCMH_taskheter, FedAvg_taskheter, FedCMR_taskheter
import os
import datetime
import sys


class Logger(object):
    def __init__(self, filename='default.txt', stream=sys.stdout):
        # 确保目录存在
        os.makedirs(os.path.dirname(filename), exist_ok=True)

        self.terminal = stream
        self.log = open(filename, 'w')

    def write(self, message):
        self.terminal.write(message)
        self.log.write(message)

    def flush(self):
        pass


def train(method,dataset,**kwargs):
    # 记录训练开始时间
    start_time = datetime.datetime.now()
    opt = DefaultConfig(method,dataset,**kwargs).opt

    # 设置CUDA设备
    if opt.use_gpu:
        torch.cuda.set_device(opt.gpu)
        torch.cuda.manual_seed(opt.seed)
        torch.manual_seed(opt.seed)
    else:
        torch.manual_seed(opt.seed)
    np.random.seed(opt.seed)

    # 将标准输出和标准错误输出重定向到文件 result.txt 和 error.txt，用于记录训练过程中的输出和错误。
    iid_unequal = 'iid' if opt.iid else 'noniid_unequal' if opt.unequal else 'noniid_equal'
    recode_path = os.path.join('.', opt.result_dir, opt.method, opt.dataset, str(opt.bit) + 'bit' , iid_unequal)
    sys.stdout = Logger(os.path.join(recode_path,'train_log.txt'), sys.stdout)

    # 打印配置信息
    print(opt)

    # 随机生成每个客户端的类别数量（n_list）和每个客户端的样本数量（k_list）
    n_list = np.random.randint(max(2, opt.ways - opt.stdev), min(opt.num_class2, opt.ways + opt.stdev + 1),
                               opt.num_users)
    k_list = np.random.randint(opt.shots - opt.stdev + 1, opt.shots + opt.stdev - 1, opt.num_users)

    # 加载预训练的模型
    pretrain_model = load_pretrain_model(opt.pretrain_model_path)

    # 获取训练数据集和标签数据  user_groups是个字典，键是客户端id，值是客户端训练数据的索引
    X, Y, L, user_groups = get_dataset(opt, n_list, k_list)
    y_dim = Y['train'].shape[1] # 获取标签的维度
    print('...loading and splitting data finish')

    # layer-wise  为每个客户端创建空的参数列表
    img_client_model_params_list = []
    txt_client_model_params_list = []

    # build models for clients and save weights in local  创建客户端的图像模型和文本模型
    img_model = ImgModule(opt.bit, pretrain_model)
    txt_model = TxtModule(y_dim, opt.bit)
    if opt.use_gpu:
        img_model = img_model.cuda()
        txt_model = txt_model.cuda()

    # 获取图像模型和文本模型的所有参数名称
    img_all_params_name = [name for name in img_model.state_dict().keys()]
    txt_all_params_name = [name for name in txt_model.state_dict().keys()]

    # 为每个客户端初始化模型参数，并将参数保存到客户端模型参数列表
    for client in range(opt.num_users):
        img_client_model_params_list.append(list(img_model.state_dict().values()))
        txt_client_model_params_list.append(list(txt_model.state_dict().values()))



    if opt.method == 'PLFedCMH':
        # 调用 FedProto_taskheter 方法进行联邦训练
        PLFedCMH_taskheter(opt, X, Y, L, user_groups, img_model, txt_model,
                            img_client_model_params_list, txt_client_model_params_list,
                           img_all_params_name, txt_all_params_name,recode_path)
    elif opt.method == 'FedAvg':
        del X, Y, L #清除非层次数据在内存中的空间
        FedAvg_taskheter(opt, user_groups, img_model, txt_model,
                         img_client_model_params_list, txt_client_model_params_list,
                         img_all_params_name, txt_all_params_name, recode_path)
    elif opt.method == 'FedCMR':
        del X, Y, L  # 清除非层次数据在内存中的空间
        # # 定义全局模型
        FedCMR_taskheter(opt, user_groups, img_model, txt_model,
                         img_client_model_params_list, txt_client_model_params_list,
                         img_all_params_name, txt_all_params_name, recode_path)

    # 记录程序结束时间并计算总运行时长
    end_time = datetime.datetime.now()
    process_time = end_time - start_time
    print('The program runs ', process_time.seconds, 'seconds')
    sys.stdout = sys.stdout.terminal  # Reset to original stdout





def test(**kwargs):
    opt = DefaultConfig(**kwargs).opt

    images, tags, labels = load_data(opt.dataset, opt.num_class2, opt.data_path)
    y_dim = tags.shape[1]

    X, Y, L = split_data(opt, images, tags, labels)
    print('...loading and splitting data finish')

    img_model = ImgModule(opt.bit)
    txt_model = TxtModule(y_dim, opt.bit)

    if opt.load_img_path:
        img_model.load(opt.load_img_path)

    if opt.load_txt_path:
        txt_model.load(opt.load_txt_path)

    if opt.use_gpu:
        img_model = img_model.cuda()
        txt_model = txt_model.cuda()

    query_L = torch.from_numpy(L['query'])
    query_x = torch.from_numpy(X['query'])
    query_y = torch.from_numpy(Y['query'])

    retrieval_L = torch.from_numpy(L['retrieval'])
    retrieval_x = torch.from_numpy(X['retrieval'])
    retrieval_y = torch.from_numpy(Y['retrieval'])

    qBX = generate_image_code(opt,img_model, query_x, opt.bit)
    qBY = generate_text_code(opt,txt_model, query_y, opt.bit)
    rBX = generate_image_code(opt,img_model, retrieval_x, opt.bit)
    rBY = generate_text_code(opt,txt_model, retrieval_y, opt.bit)

    if opt.use_gpu:
        query_L = query_L.cuda()
        retrieval_L = retrieval_L.cuda()

    mapi2t = calc_map_k(qBX, rBY, query_L, retrieval_L)
    mapt2i = calc_map_k(qBY, rBX, query_L, retrieval_L)
    print('...test MAP: MAP(i->t): %3.3f, MAP(t->i): %3.3f' % (mapi2t, mapt2i))






if __name__ == '__main__':
    iids = [True, False]
    bits = [16,32,64]
    unequals = [True, False]
    for iid_ in iids:
        if iid_:
            for bit_ in bits:
                train(method='FedCMR', dataset='FashionVC', iid=iid_, bit=bit_)
        else:
            for unequal_ in unequals:
                for bit_ in bits:
                    train(method='FedCMR', dataset='FashionVC', iid=iid_, unequal=unequal_, bit=bit_)

