import copy
import torch
import torchvision.transforms as transforms
import numpy as np


from torchvision.models import vision_transformer
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
import numpy as np
import os
import torch
import warnings
from mmcls.apis import multi_gpu_test, single_gpu_test
from mmcls.datasets import build_dataloader, build_dataset
from mmcls.models import build_classifier
from mmcls.utils import (auto_select_device, get_root_logger,
                         setup_multi_processes, wrap_distributed_model,
                         wrap_non_distributed_model)
from mmcv import DictAction
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
                         wrap_fp16_model)
from numbers import Number

import random

def parse_args():
    parser = argparse.ArgumentParser(description='mmcls test model')
    parser.add_argument('config', help='test config file path')
    parser.add_argument('checkpoint', help='checkpoint file')
    parser.add_argument('--out', help='output result file')
    out_options = ['class_scores', 'pred_score', 'pred_label', 'pred_class']
    parser.add_argument(
        '--out-items',
        nargs='+',
        default=['all'],
        choices=out_options + ['none', 'all'],
        help='Besides metrics, what items will be included in the output '
        f'result file. You can choose some of ({", ".join(out_options)}), '
        'or use "all" to include all above, or use "none" to disable all of '
        'above. Defaults to output all.',
        metavar='')
    parser.add_argument(
        '--metrics',
        type=str,
        default='accuracy',
        nargs='+',
        help='evaluation metrics, which depends on the dataset, e.g., '
        '"accuracy", "precision", "recall", "f1_score", "support" for single '
        'label dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for '
        'multi-label dataset')
    parser.add_argument('--show', action='store_true', help='show results')
    parser.add_argument(
        '--show-dir', help='directory where painted images will be saved')
    parser.add_argument(
        '--gpu-collect',
        action='store_true',
        help='whether to use gpu to collect results')
    parser.add_argument('--tmpdir', help='tmp dir for writing some results')
    parser.add_argument(
        '--cfg-options',
        nargs='+',
        action=DictAction,
        help='override some settings in the used config, the key-value pair '
        'in xxx=yyy format will be merged into config file. If the value to '
        'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
        'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
        'Note that the quotation marks are necessary and that no white space '
        'is allowed.')
    parser.add_argument(
        '--show-options',
        nargs='+',
        action=DictAction,
        help='custom options for show_result. key-value pair in xxx=yyy.'
        'Check available options in `model.show_result`.')
    parser.add_argument(
        '--gpu-ids',
        type=int,
        nargs='+',
        help='(Deprecated, please use --gpu-id) ids of gpus to use '
        '(only applicable to non-distributed testing)')
    parser.add_argument(
        '--gpu-id',
        type=int,
        default=0,
        help='id of gpu to use '
        '(only applicable to non-distributed testing)')
    parser.add_argument(
        '--launcher',
        choices=['none', 'pytorch', 'slurm', 'mpi'],
        default='none',
        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)
    parser.add_argument('--device', help='device used for testing')
    parser.add_argument(
        '--output-prediction',
        help='where to save prediction in csv file',
        default=False)
    parser.add_argument('--arch', type=str, default='vit')
    parser.add_argument('--ft', type=str, default='full')
    parser.add_argument('--mode', type=str, default='test')
    args = parser.parse_args()
    if 'LOCAL_RANK' not in os.environ:
        os.environ['LOCAL_RANK'] = str(args.local_rank)

    assert args.metrics or args.out, \
        'Please specify at least one of output path and evaluation metrics.'

    return args



args = parse_args()
if args.mode == 'test':
    input_text = 'plots/tsne/test.txt'
elif args.mode == 'train':
    input_text = 'data/MedFMC/ISIC/train.txt'

def get_labels():
    output_file = input_text  # 输出文件名

    labels = []
    with open(output_file, 'r') as f:
        for line in f:
            line = line.strip().split()
            label = [int(x) for x in line[-1:]]  # 从第2个元素开始转换为整数
            labels.append(label)

    labels_tensor = torch.tensor(labels)
    return labels_tensor


# 获取CLS Token的特征向量
def get_cls_embeddings(model, dataloader):
    cls_embeddings = []
    labels = []
    for images in dataloader:
        with torch.no_grad():
            outputs = model(images['img'],device='cpu')
            cls_tokens = outputs
            cls_embeddings.append(cls_tokens)
            # labels.append(images['gt_label'])
    cls_embeddings = torch.cat(cls_embeddings, dim=0).cpu().numpy()
    # labels = torch.cat(labels, dim=0).cpu().numpy()
    labels = get_labels()
    return cls_embeddings,labels

# 保留5位小数，不进行四舍五入
def truncate_to_five_decimals(array):
    factor = 10**3  # 定义保留小数位数的倍率
    return np.floor(array * factor) / factor  # 先放大，取整后再还原

def get_multi_cls_embeddings(model1, model2, dataloader):
    cls_embeddings = []
    cls_embeddings2 = []
    labels = []

    # 设置模型为 eval 模式
    model1.eval()
    model2.eval()

    # 确保随机性固定
    set_seed(42)

    for images in dataloader:
        with torch.no_grad():
            outputs = model1(images['img'],device='cpu')
            outputs2 = model2(images['img'],device='cpu')
            cls_embeddings.append(outputs)
            cls_embeddings2.append(outputs2)

    cls_embeddings = torch.cat(cls_embeddings, dim=0).cpu().numpy()
    cls_embeddings2 = torch.cat(cls_embeddings2, dim=0).cpu().numpy()

    labels = get_labels()
    return cls_embeddings, cls_embeddings2, labels

def set_seed(seed=42):
    """Set random seed for reproducibility."""
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


def main():
    set_seed(42)  # Set random seed for reproducibility
    cfg = mmcv.Config.fromfile(args.config)
    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)
    cfg.output_prediction = args.output_prediction
    # set multi-process settings
    setup_multi_processes(cfg)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None

    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids[0:1]
        warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
                      'Because we only support single GPU mode in '
                      'non-distributed testing. Use the first GPU '
                      'in `gpu_ids` now.')
    else:
        cfg.gpu_ids = [args.gpu_id]
    # cfg.device = args.device or auto_select_device()
    cfg.device = 'cpu'

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)


    cfg.data.test['ann_file'] = input_text
    dataset = build_dataset(cfg.data.test, default_args=dict(test_mode=True))

    # build the dataloader
    # The default loader config
    loader_cfg = dict(
        # cfg.gpus will be ignored if distributed
        num_gpus=1 if cfg.device == 'ipu' else len(cfg.gpu_ids),
        dist=distributed,
        round_up=True,
    )
    # The overall dataloader settings
    loader_cfg.update({
        k: v
        for k, v in cfg.data.items() if k not in [
            'train', 'val', 'test', 'train_dataloader', 'val_dataloader',
            'test_dataloader'
        ]
    })
    test_loader_cfg = {
        **loader_cfg,
        'shuffle': False,  # Not shuffle by default
        'sampler_cfg': None,  # Not use sampler by default
        **cfg.data.get('test_dataloader', {}),
    }
    # the extra round_up data will be removed during gpu/cpu collect
    data_loader = build_dataloader(dataset, **test_loader_cfg)

    # build the model and load checkpoint
    model = build_classifier(cfg.model)




    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)


    load_checkpoint(model, args.checkpoint, map_location='cpu')



    model = wrap_non_distributed_model(model, device=cfg.device)


    
    # 获取CLS Token的特征向量
    cls_embeddings, labels = get_cls_embeddings(model, data_loader)

    # cls_embeddings_o, _ = get_cls_embeddings(model, data_loader)
    
    # cls_embeddings, cls_embeddings_o, labels = get_multi_cls_embeddings(model,model, data_loader)


    # np.save('plots/tsne/ISIC/'+'ISIC_cls_embeddings_vit'+'.npy', cls_embeddings)
    # np.save('plots/tsne/ISIC/'+'ISIC_labels_vit'+'.npy', labels)

    np.save(f'plots/tsne/ISIC/{args.ft}/'+f'ISIC_cls_embeddings_{args.arch}'+'.npy', cls_embeddings)
    np.save(f'plots/tsne/ISIC/{args.ft}/'+f'ISIC_labels_{args.arch}'+'.npy', labels)
    
    # np.save(f'plots/tsne/ISIC/{args.ft}/'+f'ISIC_cls_embeddings_o{args.arch}'+'.npy', cls_embeddings_o)
    # np.save(f'plots/tsne/ISIC/{args.ft}/'+f'ISIC_labels_o{args.arch}'+'.npy', labels)

# 运行主函数
if __name__ == '__main__':
    main()