import argparse
import torch
import random
import numpy as np

from algorithm.ClientTrainer import ClientTrainer
from utils.config import Config

import os
import time
import logging

from model.model import MMModel
from dataloader.datasets import MMDataset, split_datasets, get_data_ratio
from utils.dist_tools import init_distributed_mode, cleanup

from collections import defaultdict
from torch.utils.data import ConcatDataset
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from model.MultiModalAttentionModel import MultiModalAttentionModel
from torch.utils.data import DataLoader
from tqdm import tqdm
from torch.nn import MultiheadAttention

def parse_args():
    parser = argparse.ArgumentParser()
    
    parser.add_argument('--dataset', type=str, default='N24News',
                        help='support N24News/Food101')
    
    parser.add_argument('--num_clients', type=int, default='12')
    
    # --------------directory configuration----------------
    parser.add_argument('--pretrained_dir', type=str, default='pretrained',
                        help='path to pretrained models from Hugging Face.')
    parser.add_argument('--data_dir', type=str, default='./data',
                        help='support wmsa')
    parser.add_argument('--logs_dir', type=str, default='./logs',
                        help='path to log results.')
    parser.add_argument('--model_save_dir', type=str, default='./pths',
                        help='path to save model parameters.')
    
    # --------------training configuration----------------
    parser.add_argument('--seed', nargs='+', type=int, default=0,
                        help='set seeds for multiple runs!')
    parser.add_argument('--um_global_epochs', type=int, default=2)
    parser.add_argument('--mm_global_epochs', type=int, default=2)

    # --------------model configuration----------------
    parser.add_argument('--text_encoder', type=str, default='bert_base',
                        help='bert_base/roberta_base/bert_large')
    parser.add_argument('--image_encoder', type=str, default='vit_base',
                        help='vit_base/vit_large')
    parser.add_argument('--text_out', type=int, default=768,
                        help='text_out')
    parser.add_argument('--img_out', type=int, default=768,
                        help='img_out')
    
    # --------------distribution configuration----------------
    parser.add_argument('--dist_on_itp', action='store_true')
    parser.add_argument('--local-rank', type=int, default=0) # 分布式启动之后自动传递给这个参数
    parser.add_argument('--dist_url', default='env://')

    parser.add_argument('--data_type', type=str, default='it',
                        help='image/text/it')
    
    return parser.parse_args()

def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True

def set_log(args):
    if not os.path.exists(args.logs_dir):
        os.makedirs(args.logs_dir)
    log_file_path = os.path.join(args.logs_dir, f'{args.dataset}-{str_time}.log')
    # set logging
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)

    # 不然会输出很多无用的信息
    logging.getLogger('PIL').setLevel(logging.WARNING)

    for ph in logger.handlers:
        logger.removeHandler(ph)
    # add FileHandler to log file
    formatter_file = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s', datefmt='%Y-%m-%d %H:%M:%S')
    fh = logging.FileHandler(log_file_path)
    fh.setLevel(logging.DEBUG)
    fh.setFormatter(formatter_file)
    logger.addHandler(fh)
    # add StreamHandler to terminal outputs
    formatter_stream = logging.Formatter('%(message)s')
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    ch.setFormatter(formatter_stream)
    logger.addHandler(ch)
    return logger

def visualize_pca_tsne(features, labels, pca_components=50, perplexity=30, random_state=42, type = 'text'):
    """
    使用 PCA 和 t-SNE 可视化数据分布。
    :param features: 特征矩阵
    :param labels: 标签列表
    :param pca_components: PCA 降维的目标维度
    :param perplexity: t-SNE 的 perplexity 参数
    :param random_state: 随机种子
    """
    # PCA 降维
    print("Performing PCA...")
    pca = PCA(n_components=pca_components, random_state=random_state)
    pca_result = pca.fit_transform(features)
    print(f"PCA 完成，降维到 {pca_components} 维。")

    # t-SNE 降维
    print("Performing t-SNE...")
    tsne = TSNE(n_components=2, perplexity=perplexity, random_state=random_state)
    tsne_result = tsne.fit_transform(pca_result)
    print("t-SNE 完成，降维到 2 维。")

    # 自定义颜色列表（支持 30 种颜色）
    custom_colors = [
        '#FF0000', '#00FF00', '#0000FF', '#FFFF00', '#FF00FF', '#00FFFF',
        '#800000', '#808000', '#008000', '#800080', '#008080', '#000080',
        '#FFA500', '#A52A2A', '#8A2BE2', '#5F9EA0', '#7FFF00', '#D2691E',
        '#FF7F50', '#6495ED', '#DC143C', '#00FFFF', '#00008B', '#008B8B',
        '#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F'
    ]

    # 创建自定义颜色映射
    custom_cmap = ListedColormap(custom_colors)
    # 可视化
    plt.figure(figsize=(10, 8))
    scatter = plt.scatter(tsne_result[:, 0], tsne_result[:, 1], c=labels, cmap=custom_cmap, alpha=0.7)
    plt.colorbar(scatter, label='Class')
    plt.title('PCA + t-SNE Visualization')
    plt.xlabel('Dimension 1')
    plt.ylabel('Dimension 2')
    plt.savefig(type+'-tsne', format='png', dpi=300)
    plt.show()


localtime = time.localtime(time.time())
str_time = f'{str(localtime.tm_mon)}-{str(localtime.tm_mday)}-{str(localtime.tm_hour)}-{str(localtime.tm_min)}'
def main():
    torch.autograd.set_detect_anomaly(True)
    torch.cuda.empty_cache()

    args = parse_args()

    config = Config(args)
    args = config.get_config()
    setup_seed(args.seed)
    if torch.cuda.device_count() > 1:
        init_distributed_mode(args)
    else:
        args.gpu = 0
    torch.cuda.set_device(args.gpu)
    args.device = torch.device("cuda", args.gpu)
    
    logger = set_log(args)

    args.data_dir = os.path.join(args.data_dir, args.dataset)

    logger.info("Pytorch version: " + torch.__version__)
    logger.info("CUDA version: " + torch.version.cuda)
    logger.info(f"CUDA device: + {torch.cuda.current_device()}")
    logger.info(f"CUDNN version: {torch.backends.cudnn.version()}")
    logger.info("GPU name: " + torch.cuda.get_device_name())
    logger.info("Current Hyper-Parameters:")
    logger.info(args)

    if not os.path.exists(args.model_save_dir):
        os.makedirs(args.model_save_dir)
    print('setting seed:', args.seed)
    args.seed = str(args.seed)

    #!
    args.num_clients = 1

    # train_sets, valid_sets, test_set = split_datasets(args)

    if args.dataset in ['N24News']:
        train_set = MMDataset(args, 'news/nytimes_train.json')
        valid_set = MMDataset(args, 'news/nytimes_dev.json')
        test_set = MMDataset(args, 'news/nytimes_test.json')
        whole_dataset = ConcatDataset([train_set, valid_set, test_set])
    

    # 按类别分组
    class_to_indices = defaultdict(list)
    for idx in range(len(whole_dataset)):
        _, _, _, _, label = whole_dataset[idx]
        class_to_indices[label].append(idx)

    num_samples_per_class = 200
    sampled_indices = []
    for label, indices in class_to_indices.items():
        if len(indices) >= num_samples_per_class:
            sampled_indices.extend(random.sample(indices, num_samples_per_class))
        else:
            sampled_indices.extend(indices)  # 如果样本不足，保留所有样本
    
    
    model = MultiModalAttentionModel(args)
    model.load_state_dict(torch.load(os.path.join(args.model_save_dir, f'{args.dataset}-0-text-{args.seed}-best-torchattn.pth'), map_location='cuda:'+str(args.gpu)))
    model = model.to(args.device)

    # 创建新的数据集
    sampled_dataset = torch.utils.data.Subset(whole_dataset, sampled_indices)
    # 创建 DataLoader
    sample_loader = DataLoader(sampled_dataset, batch_size=32, num_workers=args.num_workers, pin_memory=False)
    # 分别存储文本和图片模型输出的特征
    text_features = []
    image_features = []
    labels_list = []
    with tqdm(sample_loader) as td:
        for batch_image, text_input_ids, text_token_type_ids, text_attention_mask, batch_label in td:
            text = text_input_ids.to(args.device), text_token_type_ids.to(args.device), text_attention_mask.to(args.device)
            image = batch_image.to(args.device)
            labels = batch_label.to(args.device).view(-1)
            # optimizer.zero_grad()
            text, image = model(text, image, labels, infer=False, as_encoder=True)
            # 将 batch 的特征拆分成单个样本的特征
            text_features.extend(text.cpu().detach().numpy())  # 转为 NumPy 数组并存储
            image_features.extend(image.cpu().detach().numpy())
            labels_list.extend(batch_label.cpu().detach().numpy())

    # 将MultiModalAttentionModel的vt_attn的qkv的weights提取出来
    vt_attn = model.vt_attn
    q_weight, k_weight, v_weight = torch.split(vt_attn.in_proj_weight, args.text_out, dim=0)

    q_weight_split = torch.split(q_weight, 1, dim=0)  # 按行拆分，每个子张量形状为 [1, 768]
    q_weight_tensors = [row.squeeze(0).cpu().detach().numpy() for row in q_weight_split]  # 转为 NumPy 数组并存储
    # qw加入图像特征，其他加入文本
    image_features = np.concatenate((np.array(image_features), np.array(q_weight_tensors)), axis=0)

    k_weight_split = torch.split(k_weight, 1, dim=0)  # 按行拆分，每个子张量形状为 [1, 768]
    k_weight_tensors = [row.squeeze(0).cpu().detach().numpy() for row in k_weight_split]  # 转为 NumPy 数组并存储
    v_weight_split = torch.split(v_weight, 1, dim=0)  # 按行拆分，每个子张量形状为 [1, 768]
    v_weight_tensors = [row.squeeze(0).cpu().detach().numpy() for row in v_weight_split]  # 转为 NumPy 数组并存储
    # 和kv数组拼接
    text_features = np.concatenate((np.array(text_features), np.array(k_weight_tensors)), axis=0)
    text_features = np.concatenate((np.array(text_features), np.array(v_weight_tensors)), axis=0)

    # 处理tv_attn
    tv_attn = model.tv_attn
    q_weight, k_weight, v_weight = torch.split(tv_attn.in_proj_weight, args.img_out, dim=0)
    # qw加入文本特征，其他加入图像特征
    q_weight_split = torch.split(q_weight, 1, dim=0)  # 按行拆分，每个子张量形状为 [1, 768]
    q_weight_tensors = [row.squeeze(0).cpu().detach().numpy() for row in q_weight_split]  # 转为 NumPy 数组并存储
    # qw加入文本特征，其他加入图像特征
    text_features = np.concatenate((np.array(text_features), np.array(q_weight_tensors)), axis=0)
    # 处理kv
    k_weight_split = torch.split(k_weight, 1, dim=0)  # 按行拆分，每个子张量形状为 [1, 768]
    k_weight_tensors = [row.squeeze(0).cpu().detach().numpy() for row in k_weight_split]  # 转为 NumPy 数组并存储
    v_weight_split = torch.split(v_weight, 1, dim=0)  # 按行拆分，每个子张量形状为 [1, 768]
    v_weight_tensors = [row.squeeze(0).cpu().detach().numpy() for row in v_weight_split]  # 转为 NumPy 数组并存储
    # 和kv数组拼接
    image_features = np.concatenate((np.array(image_features), np.array(k_weight_tensors)), axis=0)
    image_features = np.concatenate((np.array(image_features), np.array(v_weight_tensors)), axis=0)

    attn = MultiheadAttention(args.text_out, num_heads=4, dropout=0.1)
    q_weight, k_weight, v_weight = torch.split(attn.in_proj_weight, args.text_out, dim=0)
    q_weight_split = torch.split(q_weight, 1, dim=0)  # 按行拆分，每个子张量形状为 [1, 768]
    q_weight_tensors = [row.squeeze(0).cpu().detach().numpy() for row in q_weight_split]  # 转为 NumPy 数组并存储
    # qw加入图像特征，其他加入文本
    image_features = np.concatenate((np.array(image_features), np.array(q_weight_tensors)), axis=0)
    text_features = np.concatenate((np.array(text_features), np.array(q_weight_tensors)), axis=0)
    # 处理kv
    k_weight_split = torch.split(k_weight, 1, dim=0)  # 按行拆分，每个子张量形状为 [1, 768]
    k_weight_tensors = [row.squeeze(0).cpu().detach().numpy() for row in k_weight_split]  # 转为 NumPy 数组并存储
    v_weight_split = torch.split(v_weight, 1, dim=0)  # 按行拆分，每个子张量形状为 [1, 768]
    v_weight_tensors = [row.squeeze(0).cpu().detach().numpy() for row in v_weight_split]  # 转为 NumPy 数组并存储
    # 和kv数组拼接
    image_features = np.concatenate((np.array(image_features), np.array(k_weight_tensors)), axis=0)
    image_features = np.concatenate((np.array(image_features), np.array(v_weight_tensors)), axis=0)
    text_features = np.concatenate((np.array(text_features), np.array(k_weight_tensors)), axis=0)
    text_features = np.concatenate((np.array(text_features), np.array(v_weight_tensors)), axis=0)


    
    labels = np.array(labels_list)
    # 获取最大label
    max_label = np.max(labels)
    labels = np.concatenate((labels, (max_label+1) * np.ones((q_weight.shape[0],))), axis=0)
    labels = np.concatenate((labels, (max_label+2) * np.ones((q_weight.shape[0],))), axis=0)
    labels = np.concatenate((labels, (max_label+3) * np.ones((q_weight.shape[0],))), axis=0)
    labels = np.concatenate((labels, (max_label+4) * np.ones((q_weight.shape[0],))), axis=0)
    labels = np.concatenate((labels, (max_label+5) * np.ones((q_weight.shape[0],))), axis=0)
    labels = np.concatenate((labels, (max_label+6) * np.ones((q_weight.shape[0],))), axis=0)


    visualize_pca_tsne(text_features, labels, pca_components=50, perplexity=30, random_state=42, type = 'text')
    visualize_pca_tsne(image_features, labels, pca_components=50, perplexity=30, random_state=42, type = 'image')



    # model = None
    # optimizer = None
    # scheduler = None
    # data_type = 'text'
    # client_trainer = ClientTrainer(args, model, optimizer, scheduler, 0, data_type, logger)
    # client_trainer.set_model()
    # client_trainer.set_optimizer()
    # client_trainer.set_scheduler()
    # client_trainer.set_dataloader(train_sets[0], valid_sets[0], test_set)
    # client_trainer.train()
    # client_trainer.test_epoch()
    
    if torch.cuda.device_count() > 1:
        cleanup()
    return

    img_client_id_list = list(range(0, args.num_clients//3))
    text_client_id_list = list(range(args.num_clients//3, 2*args.num_clients//3))
    mm_client_id_list = list(range(2*args.num_clients//3, args.num_clients))
    img_client_id_list += mm_client_id_list
    text_client_id_list += mm_client_id_list
    
    global_model = MMModel(args)
    for global_epoch in range(args.um_global_epochs):
        for client_id in img_client_id_list:
            model = None
            optimizer = None
            scheduler = None
            data_type = 'image'
            client_trainer = ClientTrainer(args, model, optimizer, scheduler, client_id, data_type, logger)
            if global_epoch == 0:
                client_trainer.set_model()
            else:
                client_trainer.load_model(global_model)
            client_trainer.set_optimizer()
            client_trainer.set_scheduler()
            client_trainer.set_dataloader(train_sets[client_id], valid_sets[client_id], test_set)
            client_trainer.train()
            client_trainer.test_epoch()
        
        for client_id in text_client_id_list:
            model = None
            optimizer = None
            scheduler = None
            data_type = 'text'
            client_trainer = ClientTrainer(args, model, optimizer, scheduler, client_id, data_type, logger)
            if global_epoch == 0:
                client_trainer.set_model()
            else:
                client_trainer.load_model(global_model)
            client_trainer.set_optimizer()
            client_trainer.set_scheduler()
            client_trainer.set_dataloader(train_sets[client_id], valid_sets[client_id], test_set)
            client_trainer.train()
            client_trainer.test_epoch()

        # server update
        
        img_data_ratio = get_data_ratio(args, img_client_id_list)
        img_global_state_dict = {}
        img_global_state_dict.update({name: torch.zeros_like(param).to(args.device) for name, param in global_model.image_encoder.named_parameters()})
        img_global_state_dict.update({name: torch.zeros_like(param).to(args.device) for name, param in global_model.image_classfier.named_parameters()})

        for client_id in img_client_id_list:
            client_params = torch.load(os.path.join(args.model_save_dir, f'{args.dataset}-{client_id}-image-{args.seed}-best.pth'))
            for name, param in client_params.items():
                assert name in img_global_state_dict.keys()
                if name == 'image_encoder.model.embeddings.cls_token':
                    print(name)
                img_global_state_dict[name] += param * img_data_ratio[client_id]
        

        text_data_ratio = get_data_ratio(args, text_client_id_list)
        text_global_state_dict = {}
        text_global_state_dict.update({name: torch.zeros_like(param).to(args.device) for name, param in global_model.text_encoder.named_parameters()})
        text_global_state_dict.update({name: torch.zeros_like(param).to(args.device) for name, param in global_model.text_classfier.named_parameters()})

        for client_id in text_client_id_list:
            client_params = torch.load(os.path.join(args.model_save_dir, f'{args.dataset}-{client_id}-text-{args.seed}-best.pth'))
            for name, param in client_params.items():
                # assert name in text_global_state_dict
                text_global_state_dict[name] += param * text_data_ratio[client_id]
        
        global_model.load_image_params(img_global_state_dict)

        global_model.load_text_params(text_global_state_dict)
    
    global_model_save_dir = os.path.join(args.model_save_dir, f'{args.dataset}-global-uni-{args.seed}-best.pth')
    torch.save(global_model.state_dict(), global_model_save_dir)
    
    # -----------------multimodal fusion-----------------
    for global_epoch in range(args.mm_global_epochs):
        for client_id in mm_client_id_list:
            model = None
            optimizer = None
            scheduler = None
            data_type = 'it'
            client_trainer = ClientTrainer(args, model, optimizer, scheduler, client_id, data_type, logger)
            client_trainer.load_model(global_model)
            client_trainer.set_optimizer()
            client_trainer.set_scheduler()
            client_trainer.set_dataloader(train_sets[client_id], valid_sets[client_id], test_set)
            client_trainer.train()
            client_trainer.test_epoch()
        
        # server update
        mm_data_ratio = get_data_ratio(args, mm_client_id_list)
        mm_global_state_dict = {name: torch.zeros_like(param).to(args.device) for name, param in global_model.named_parameters()}
        for client_id in mm_client_id_list:
            client_params = torch.load(os.path.join(args.model_save_dir, f'{args.dataset}-{client_id}-it-{args.seed}-best.pth'))
            for name, param in client_params.items():
                mm_global_state_dict[name] += param * mm_data_ratio[client_id]

        global_model.load_state_dict(mm_global_state_dict)
    
    global_model_save_dir = os.path.join(args.model_save_dir, f'{args.dataset}-global-mm-{args.seed}-best.pth')
    torch.save(global_model.state_dict(), global_model_save_dir)

    cleanup()

if __name__ == '__main__':
    main()