
import os

import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn

from torchvision import transforms as pth_transforms
from torchvision import models as torchvision_models
from torchvision.datasets.folder import default_loader
from torchvision import datasets

from medfmc.utils import utils
# import vision_transformer as vits


from mmcls.models import build_classifier
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
                         wrap_fp16_model)
from mmcls.utils import (auto_select_device, get_root_logger,
                         setup_multi_processes, wrap_distributed_model,
                         wrap_non_distributed_model)
from mmcls.datasets import ImageNet
from mmcv import Config, DictAction
import warnings


from torch.utils.data import Subset
import numpy as np



from torchvision.datasets.folder import default_loader
from torchvision.datasets.folder import IMG_EXTENSIONS
from torchvision.datasets.folder import DatasetFolder
from torch.utils.data import Dataset, DataLoader




class CustomImageFolder(Dataset):
    def __init__(self, root, txt_file, transform=None, loader=default_loader):
        self.root = root
        self.transform = transform
        self.loader = loader
        self.imgs = self._make_dataset(txt_file)

    def _make_dataset(self, txt_file):
        images = []
        with open(txt_file, 'r') as f:
            for line in f:
                img_name, class_id = line.strip().split()
                img_path = os.path.join(self.root, img_name)
                item = (img_path, int(class_id))
                images.append(item)
        return images

    def __len__(self):
        return len(self.imgs)

    def __getitem__(self, index):
        path, target = self.imgs[index]
        img = self.loader(path)
        
        if self.transform is not None:
            img = self.transform(img)
        
        return img, target

class ReturnIndexDataset(CustomImageFolder):
    def __init__(self, root, txt_file, transform=None, loader=default_loader):
        super(ReturnIndexDataset, self).__init__(root, txt_file, transform=transform, loader=loader)

    def __getitem__(self, idx):
        img, lab = super(ReturnIndexDataset, self).__getitem__(idx)
        return img, idx


def extract_feature_pipeline(args, cfg):
    # ============ preparing data ... ============
    transform = pth_transforms.Compose([
        pth_transforms.Resize(256, interpolation=3),
        pth_transforms.CenterCrop(224),
        pth_transforms.ToTensor(),
        pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])
    root_dir = f"/home/zuwenqiang/data/MedFMC/MedFMC_train/{args.dataset}/images/"
    train_file = f"/home/zuwenqiang/data/MedFMC/MedFMC_train/{args.dataset}/trainval.txt"
    val_file = f"/home/zuwenqiang/data/MedFMC/MedFMC_train/{args.dataset}/test.txt"
    dataset_train = ReturnIndexDataset(root_dir,train_file, transform=transform)
    dataset_val = ReturnIndexDataset(root_dir,val_file, transform=transform)

    # dataset_train = ReturnIndexDataset(os.path.join(args.data_path, 'train'), transform=transform)
    # dataset_val = ReturnIndexDataset(os.path.join(args.data_path, 'val'), transform=transform)


    sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)
    data_loader_train = torch.utils.data.DataLoader(
        dataset_train,
        sampler=sampler,
        batch_size=args.batch_size_per_gpu,
        num_workers=args.num_workers,
        pin_memory=True,
        drop_last=False,
    )
    data_loader_val = torch.utils.data.DataLoader(
        dataset_val,
        batch_size=args.batch_size_per_gpu,
        num_workers=args.num_workers,
        pin_memory=True,
        drop_last=False,
    )
    print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")


    # ============ building network ... ============
    model = build_classifier(cfg.model)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    model = wrap_non_distributed_model(
        model, device=cfg.device, device_ids=cfg.gpu_ids)
    # model.CLASSES = ImageNet.CLASSES





    # ============ extract features ... ============
    print("Extracting features for train set...")
    train_features = extract_features(model, data_loader_train, args.use_cuda)
    print("Extracting features for val set...")
    test_features = extract_features(model, data_loader_val, args.use_cuda)

    if utils.get_rank() == 0:
        train_features = nn.functional.normalize(train_features, dim=1, p=2)
        test_features = nn.functional.normalize(test_features, dim=1, p=2)

    train_labels = torch.tensor([int(s[-1]) for s in dataset_train.imgs]).long()
    test_labels = torch.tensor([int(s[-1]) for s in dataset_val.imgs]).long()
    # save features and labels
    if args.dump_features and dist.get_rank() == 0:
        dump_folder = args.dump_features
        # 检查文件夹是否存在，如果不存在则创建
        if not os.path.exists(dump_folder):
            os.makedirs(dump_folder)
        torch.save(train_features.cpu(), os.path.join(args.dump_features, "trainfeat.pth"))
        torch.save(test_features.cpu(), os.path.join(args.dump_features, "testfeat.pth"))
        torch.save(train_labels.cpu(), os.path.join(args.dump_features, "trainlabels.pth"))
        torch.save(test_labels.cpu(), os.path.join(args.dump_features, "testlabels.pth"))
    return train_features, test_features, train_labels, test_labels









@torch.no_grad()
def extract_features(model, data_loader, use_cuda=True, multiscale=False):
    metric_logger = utils.MetricLogger(delimiter="  ")
    features = None
    for samples, index in metric_logger.log_every(data_loader, 100):
        samples = samples.cuda(non_blocking=True)
        index = index.cuda(non_blocking=True)
        if multiscale:
            feats = utils.multi_scale(samples, model)
        else:
            feats = model(samples).clone()

        # init storage feature matrix
        if dist.get_rank() == 0 and features is None:
            features = torch.zeros(len(data_loader.dataset), feats.shape[-1])
            if use_cuda:
                features = features.cuda(non_blocking=True)
            print(f"Storing features into tensor of shape {features.shape}")

        # get indexes from all processes
        y_all = torch.empty(dist.get_world_size(), index.size(0), dtype=index.dtype, device=index.device)
        y_l = list(y_all.unbind(0))
        y_all_reduce = torch.distributed.all_gather(y_l, index, async_op=True)
        y_all_reduce.wait()
        index_all = torch.cat(y_l)

        # share features between processes
        feats_all = torch.empty(
            dist.get_world_size(),
            feats.size(0),
            feats.size(1),
            dtype=feats.dtype,
            device=feats.device,
        )
        output_l = list(feats_all.unbind(0))
        output_all_reduce = torch.distributed.all_gather(output_l, feats, async_op=True)
        output_all_reduce.wait()

        # update storage feature matrix
        if dist.get_rank() == 0:
            if use_cuda:
                features.index_copy_(0, index_all, torch.cat(output_l))
            else:
                features.index_copy_(0, index_all.cpu(), torch.cat(output_l).cpu())
    return features


@torch.no_grad()
def knn_classifier(train_features, train_labels, test_features, test_labels, k, T,dataset):
    CLASSES = {'ISIC':7,'APTOS':5}
    num_classes = CLASSES[dataset]
    top1, top5, total = 0.0, 0.0, 0
    train_features = train_features.t()
    num_test_images, num_chunks = test_labels.shape[0], 100
    imgs_per_chunk = num_test_images // num_chunks
    retrieval_one_hot = torch.zeros(k, num_classes).to(train_features.device)
    for idx in range(0, num_test_images, imgs_per_chunk):
        # get the features for test images
        features = test_features[idx : min((idx + imgs_per_chunk), num_test_images), :]
        targets = test_labels[idx : min((idx + imgs_per_chunk), num_test_images)]
        batch_size = targets.shape[0]

        # calculate the dot product and compute top-k neighbors
        similarity = torch.mm(features, train_features)
        distances, indices = similarity.topk(k, largest=True, sorted=True)
        candidates = train_labels.view(1, -1).expand(batch_size, -1)
        retrieved_neighbors = torch.gather(candidates, 1, indices)

        retrieval_one_hot.resize_(batch_size * k, num_classes).zero_()
        retrieval_one_hot.scatter_(1, retrieved_neighbors.view(-1, 1), 1)
        distances_transform = distances.clone().div_(T).exp_()
        probs = torch.sum(
            torch.mul(
                retrieval_one_hot.view(batch_size, -1, num_classes),
                distances_transform.view(batch_size, -1, 1),
            ),
            1,
        )
        _, predictions = probs.sort(1, True)

        # find the predictions that match the target
        correct = predictions.eq(targets.data.view(-1, 1))
        top1 = top1 + correct.narrow(1, 0, 1).sum().item()
        total += targets.size(0)
    top1 = top1 * 100.0 / total

    return top1





if __name__ == '__main__':
    parser = argparse.ArgumentParser('Evaluation with weighted k-NN on ImageNet')
    parser.add_argument('config', help='test config file path')
    parser.add_argument('checkpoint', help='checkpoint file')
    parser.add_argument('--device', help='device used for testing')
    parser.add_argument(
        '--gpu-ids',
        type=int,
        nargs='+',
        help='(Deprecated, please use --gpu-id) ids of gpus to use '
        '(only applicable to non-distributed testing)')
    parser.add_argument(
        '--gpu-id',
        type=int,
        default=0,
        help='id of gpu to use '
        '(only applicable to non-distributed testing)')

    parser.add_argument('--batch_size_per_gpu', default=128, type=int, help='Per-GPU batch-size')
    parser.add_argument('--nb_knn', default=[10, 20, 100, 200], nargs='+', type=int,
        help='Number of NN to use. 20 is usually working the best.')
    parser.add_argument('--temperature', default=0.07, type=float,
        help='Temperature used in the voting coefficient')

    parser.add_argument('--data_path', default='', type=str)
    parser.add_argument('--dataset', default='', type=str)


    parser.add_argument('--use_cuda', default=True, type=bool,
        help="Should we store the features on GPU? We recommend setting this to False if you encounter OOM")



    parser.add_argument('--dump_features', default=None,
        help='Path where to save computed features, empty for no saving')
    parser.add_argument('--load_features', default=None, help="""If the features have
        already been computed, where to find them.""")
    parser.add_argument('--num_workers', default=1, type=int, help='Number of data loading workers per GPU.')
    parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
        distributed training; see https://pytorch.org/docs/stable/distributed.html""")
    parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")


    parser.add_argument('--world_size', default=1, type=int)
    parser.add_argument('--num_classes', default=1000, type=int)
    parser.add_argument('--port', default=12341, type=int)
    parser.add_argument('--split', default=4, type=int)
    args = parser.parse_args()

    if 'LOCAL_RANK' not in os.environ:
        os.environ['LOCAL_RANK'] = str(args.local_rank)
        os.environ['RANK'] = str(args.local_rank)
        os.environ['WORLD_SIZE'] = '1'
        os.environ['MASTER_ADDR'] = 'localhost'
        os.environ['MASTER_PORT'] = str(args.port)


    cfg = Config.fromfile(args.config)
    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids[0:1]
        warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
                      'Because we only support single GPU mode in '
                      'non-distributed testing. Use the first GPU '
                      'in `gpu_ids` now.')
    else:
        cfg.gpu_ids = [args.gpu_id]
    cfg.device = args.device or auto_select_device()


    utils.init_distributed_mode(args)

    cudnn.benchmark = True

    if args.load_features:
        train_features = torch.load(os.path.join(args.load_features, "trainfeat.pth"))
        test_features = torch.load(os.path.join(args.load_features, "testfeat.pth"))
        train_labels = torch.load(os.path.join(args.load_features, "trainlabels.pth"))
        test_labels = torch.load(os.path.join(args.load_features, "testlabels.pth"))
    else:
        train_features, test_features, train_labels, test_labels = extract_feature_pipeline(args,cfg)


    if args.use_cuda:
        train_features = train_features.cuda()
        test_features = test_features.cuda()
        train_labels = train_labels.cuda()
        test_labels = test_labels.cuda()

    print("Features are ready!\nStart the k-NN classification.")

    k = 20
    top1 = knn_classifier(train_features, train_labels,test_features, test_labels, k, args.temperature,args.dataset)
    print(f"{k}-NN classifier result: Top1: {top1}")
    dist.barrier()
