import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader

from data_p import make_basic_dataset
from data_p import samplers
import torch
import collections
from tqdm import tqdm
from modeling import build_model 
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
def pca_visa(local1, local2, local3, local4):
    # 提取上半身特征矩阵和下半身特征矩阵
    local1_body_features = local1.cpu().numpy() # ...
    local2_body_features = local2.cpu().numpy() # ...
    local3_body_features = local3.cpu().numpy() # ...
    local4_body_features = local4.cpu().numpy() # ...


    # 分别进行PCA,降维到2维
    pca_local1 = PCA(2) 
    local1_2d = pca_local1.fit_transform(local1_body_features)

    pca_local2 = PCA(2)
    local2_2d = pca_local2.fit_transform(local2_body_features)

    pca_local3 = PCA(2) 
    local3_2d = pca_local3.fit_transform(local3_body_features)

    pca_local4 = PCA(2)
    local4_2d = pca_local4.fit_transform(local4_body_features)
    # 绘制2D特征点云图
    # 绘制2D特征点云图,使用不同颜色和标记进行区分
    plt.figure() 
    plt.scatter(local1_2d[:,0], local1_2d[:,1], c='b', marker='x', label='local1 body')
    plt.scatter(local2_2d[:,0], local2_2d[:,1], c='r', marker='o', label='local2 body')  
    plt.scatter(local3_2d[:,0], local3_2d[:,1], c='g', marker='^', label='local3 body')   
    plt.scatter(local4_2d[:,0], local4_2d[:,1], c='y', marker='*', label='local4 body')

    plt.xlabel('PC1')
    plt.ylabel('PC2')
    # plt.xlim(-5, 10)   # x轴范围-5到10
    # plt.ylim(-2, 8)
    plt.legend()
    plt.title('PCA Visualization of Four Body Parts Features')
    # plt.show() 
    # 添加这行代码进行保存
    plt.savefig('/home/ubuntu/yuyu/heat_map_picture/448_all_without_distill_pca_visualization.png')  

    plt.show() 
    # plt.figure() 
    # plt.scatter(upper_2d[:,0], upper_2d[:,1], c='b', label='upper body')
    # plt.scatter(lower_2d[:,0], lower_2d[:,1], c='r', label='lower body')
    # plt.xlabel('PC1')
    # plt.ylabel('PC2')
    # plt.legend()
    # plt.title('PCA Visualization of Upper and Lower Body Features')
    # plt.show() 

def t_sne(features, labels):
    # 示例特征向量
    # features = np.random.rand(100, 512)  # 假设有100个样本，每个样本有512维特征向量
    # labels = np.random.randint(0, 10, size=100)  # 对应的标签

    # 执行T-SNE降维
    tsne = TSNE(n_components=2, random_state=42)
    # tsne = TSNE(n_components=3, random_state=42)
    features_tsne = tsne.fit_transform(features)
    # Define colors and markers for each class
    # 设定不同颜色和形状,

    # 绘制T-SNE可视化结果
    plt.figure(figsize=(8, 6))

    # 根据类别绘制不同颜色和形状的散点图
    # colors = ['#0072BD', '#D95319', '#7E2F8E', '#77AC30', '#4DBEEE', '#EDB120', '#A2142F', '#494949', '#7FC97F', '#FDB813']
    colors = ['#FF8C00', '#1E90FF', '#FF69B4', '#00FA9A', '#FF1493', '#8B4513', '#DC143C', '#00CED1', ] # '#00CED1', '#800080'
    markers = ['o', 's', 'd', '^', 'v', 'p', '*', '+', ]    # 'x', 'h'

    # Create a dictionary with class labels as keys and corresponding colors as values
    # color_map = {54: colors[0], 108: colors[1], 135: colors[2], 39: colors[3], 121: colors[4], 110: colors[5], 41: colors[6], 75: colors[7]}
    # makers_map = {54: makers[0], 108: makers[1], 135: makers[2], 39: makers[3], 121: makers[4], 110: makers[5], 41: makers[6], 75: makers[7]}

    # Create an array of colors for each data point based on its label
    # point_colors = [color_map[label.item()] for label in labels]
    # point_makers = [makers_map[label.item()] for label in labels]
    # Plot the scatter plot with the colors
    # scatter = plt.scatter(features_tsne[:, 0], features_tsne[:, 1], c=point_colors, marker=point_makers)   
    scatter = plt.scatter(features_tsne[:, 0], features_tsne[:, 1], c=labels, cmap='rainbow')

    # 为不同类别的样本设置不同的颜色和标记
    # colors = ['r', 'g', 'b', 'y', 'c', 'm', 'k', 'purple']
    # markers = ['o', 's', 'v', '^', 'p', '*', 'x', 'D']

    # 使用 unique 函数，获取不重复元素的列表和每个元素在原始张量中的索引
    unique_elements, indices = torch.unique(labels, sorted=False, return_inverse=True)

            # 输出结果
    # print("Unique elements:", unique_elements)  [115, 156, 54, 108, 135, 39, 121, 110, 41, 75]
    print("Unique elements:", unique_elements.tolist())
    # print("indices:", indices)
            
    # raise "ssss"

    fig, ax = plt.subplots()
    for ii,i in enumerate(unique_elements.tolist()):
        idx = np.where(labels == i)[0]
        scatter = ax.scatter(features_tsne[idx,0], features_tsne[idx,1], marker=markers[ii], color=colors[ii])



    # 添加颜色图例
    # 添加图例
    legend = ax.legend(range(8), title="Classes")
    # ax.set_xlim(-40, 40)
    legend.get_title().set_fontsize(fontsize=10)  # fontsize 可以是任意数字大小
    # legend.get_title().set_position((-39, -35)) # 调整标题在图像上的位置
    ax.add_artist(legend)

    # plt.colorbar(scatter)
    # legend = plt.legend(*scatter.legend_elements(), title='Classes')
    # plt.setp(legend.get_texts(), fontsize=10)

    # 设置坐标轴标签和标题
    plt.xlabel('T-SNE Dimension 1', fontsize=12)
    plt.ylabel('T-SNE Dimension 2', fontsize=12)
    plt.title('T-SNE Visualization for Re-identification', fontsize=14)
    plt.savefig('/home/ubuntu/yuyu/gitee_UFDN/UFDN-Reid/T_SNE/11111.png')
    # 显示图形
    plt.show()


class MY_PVASFF(torch.nn.Module):
    def __init__(self, cfg, classes):
        super(MY_PVASFF, self).__init__()
        # self.PPVASFF = ParsingReidModel(576, cfg.model.last_stride, cfg.model.pretrain_model, cfg.model.neck,
        #                      cfg.test.neck_feat, cfg.model.name, cfg.model.pretrain_choice).to(cfg.device)
        self.PPVASFF = build_model(cfg, 576).to('cuda')
        # self.PPVASFF = build_model(cfg, 576).to('cuda')             #(cfg, classes).to(cfg.test.device)

        self.batch = None


    def forward(self, image):
        # mask = self.batch['mask'].unsqueeze(0).to('cuda:0')
        # output = self.PPVASFF(image, mask)
        output = self.PPVASFF(image)
        # print(output[0].shape)
        # raise "sss"
        score = output[0]    # ['cls_score']
        # print(output[0].shape)
        # print(output[1][0].shape)
        # print(output[1][1].shape)
        # print(output[1][2].shape)
        # print(output[2].shape)
        # raise "sss"    output[0], output[1], 
        return output   

def run_t_sne(cfg, model_path, use_attention, preview_model):
    device = 'cuda:0'
    train_dataset, valid_dataset, meta_dataset = make_basic_dataset(cfg, "/home/ubuntu/yuyu/pven/pven6005/examples/outputs/veri776.pkl", #cfg.data.pkl_path,vehicleid.pkl
                                                                    (256, 256), #cfg.data.train_size,
                                                                    (256, 256), #cfg.data.valid_size,
                                                                    10, #cfg.data.pad,
                                                                    test_ext='',  #test_ext=cfg.data.test_ext,_800
                                                                    re_prob=0.5, #re_prob=cfg.data.re_prob,
                                                                    with_mask=False, #with_mask=cfg.data.with_mask,
                                                                    )
    
    sampler = getattr(samplers, 'RandomIdentitySampler')(valid_dataset.meta_dataset, batch_size=cfg.SOLVER.IMS_PER_BATCH, num_instances=cfg.DATALOADER.NUM_INSTANCE)
    val_loader = DataLoader(valid_dataset, sampler=sampler, batch_size=400, num_workers=8,  # cfg.data.test_num_workers
                              pin_memory=True, shuffle=False)
       
    # train_dataset = DataLoader(train_dataset, batch_size=320, num_workers=8,  # cfg.data.test_num_workers
    #                           pin_memory=True, shuffle=False)
    train_loader = DataLoader(train_dataset, sampler=sampler, batch_size=cfg.SOLVER.IMS_PER_BATCH,   #  cfg.data.train_num_workers# 
                              num_workers=8, pin_memory=True)                              
     
    model = MY_PVASFF(cfg, 576)


    state_metas = torch.load(model_path, 'cpu')
    predict_dict = state_metas['state_dict']

    model_dict = model.PPVASFF.state_dict()
    new_dict = collections.OrderedDict()
        # print(predict_dict.keys())
        # print(model_dict.keys())
        # raise "ss"
        # exit()
    # for k, v in predict_dict.items():
    #     if k[7:] in model.state_dict().keys() and v.size() == model.state_dict()[k[7:]].size():
    #         new_dict[k[7:]] = v

    for k, v in predict_dict.items():
        if k in model.PPVASFF.state_dict().keys() and v.size() == model.PPVASFF.state_dict()[k].size():
            new_dict[k] = v

            
    print('loading params {}'.format(new_dict.keys()))
        # # raise "sss"
    for k, v in model_dict.items():
        if k not in new_dict.keys():
            new_dict[k] = v
        
    model_dict.update(new_dict)
    model.PPVASFF.load_state_dict(model_dict)
    Local1 = []
    Local2 = []
    Local3 = []
    Local4 = []
    feats = []
    label_la = []
    stop = 0
    for idx, batch in tqdm(enumerate(val_loader)):
            for name, item in batch.items():
                if isinstance(item, torch.Tensor):
                    batch[name] = item.to(device)
            # batch        
            input_tensor = batch['image']
            # print(batch["id"].shape)
            # raise "ss"
            label = batch["id"].cpu().detach()
            model.batch = batch
            model.eval()
            # local1, local2, local3, local4 = model(input_tensor)    _, _, 
            # print(input_tensor.shape)
            # print(label)
            # # 使用 unique 函数，获取不重复元素的列表和每个元素在原始张量中的索引
            # unique_elements, indices = torch.unique(label, sorted=False, return_inverse=True)

            # # 输出结果
            # print("Unique elements:", unique_elements)
            
            # print(input_tensor[:320//8, :, :, :].shape)
            # raise "ss"
            for i in range(10):
                # print(input_tensor[(320//8)*(i):(320//8)*(i+1),:,:,:].shape)
                # raise "Ssss"
                feat0, feat2, feat3, _, _ = model(input_tensor[(400//10)*(i):(400//10)*(i+1),:,:,:])
                feats.append(feat3.cpu().detach())
                # label_la.append(label)
            # feat, _, _ = model(input_tensor)
            # print(feat[0][0].shape)
            # raise "sss"
            # Local1.append(local1.cpu().detach())
            # Local2.append(local2.cpu().detach())
            # Local3.append(local3.cpu().detach())
            # Local4.append(local4.cpu().detach())
            # feats.append(feat.cpu().detach())
            # label_la.append(label)
            # stop += 1
            # if stop == 1:
            #     break
    # all_feats_local1 = torch.cat(Local1, dim=0)
    # all_feats_local2 = torch.cat(Local2, dim=0)
    # all_feats_local3 = torch.cat(Local3, dim=0)
    # all_feats_local4 = torch.cat(Local4, dim=0)  
    # print(feats[0].shape)
            all_feats = torch.cat(feats, dim=0)
            all_labels = label    # torch.cat(label_la, dim=0)  
            # print(all_feats.shape)
            # print(all_labels.shape)
            # raise "Sss"
            t_sne(features=all_feats, labels=all_labels)
            raise "sss"
            print(all_feats[1].shape)
            pca_visa(all_feats[1], all_feats[2], all_feats[3], all_feats[4])
            raise "sss"