import argparse
import os.path as osp

import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader

from cpea import CPEA
from models.backbones import BackBone
from dataloader.samplers import CategoriesSampler
from utils import pprint, ensure_path, Averager, count_acc, compute_confidence_interval, \
    calculate_keyPatches_index, calculate_keyPatches_num

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--max_epoch', type=int, default=100)
    parser.add_argument('--way', type=int, default=5)
    parser.add_argument('--test_way', type=int, default=5)
    parser.add_argument('--shot', type=int, default=1)
    parser.add_argument('--query', type=int, default=15)
    parser.add_argument('--lr', type=float, default=0.00001)
    parser.add_argument('--lr_mul', type=float, default=100)
    parser.add_argument('--step_size', type=int, default=5)
    parser.add_argument('--gamma', type=float, default=0.5)
    parser.add_argument('--model_type', type=str, default='small')
    parser.add_argument('--dataset', type=str, default='miniImageNet', choices=['ocean', 'miniImageNet','cifar','fc100','tieredImageNet'])
    parser.add_argument('--init_weights', type=str, default='./initialization/')
    parser.add_argument('--gpu', default='0')
    parser.add_argument('--exp', type=str, default='CPEA')
    args = parser.parse_args()
    pprint(vars(args))


    # args.init_weights = osp.join(args.init_weights, args.dataset, 'checkpoint1600.pth')
    args.init_weights = "F:/checkpoint/ViT-S-16/miniImageNet/checkpoint1600.pth"
    if args.dataset == 'miniImageNet':
        from dataloader.mini_imagenet import MiniImageNet as Dataset
    else:
        raise ValueError('Non-supported Dataset.')

    valset = Dataset('val', args)
    val_sampler = CategoriesSampler(valset.label, 500, args.test_way, args.shot + args.query)
    val_loader = DataLoader(dataset=valset, batch_sampler=val_sampler, pin_memory=True)

    model = BackBone(args)

    print('Using {}'.format(args.model_type))

    # load pre-trained models (no FC weights)
    model_dict = model.state_dict()
    print(model_dict.keys())
    if args.init_weights is not None:
        pretrained_dict = torch.load(args.init_weights, map_location='cpu')['teacher']
        print(pretrained_dict.keys())
        pretrained_dict = {k.replace('backbone', 'encoder'): v for k, v in pretrained_dict.items()}
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
        print(pretrained_dict.keys())
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)

    if torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True
        model = model.cuda()

    with torch.no_grad():
        for i, batch in enumerate(val_loader, 1):
            if torch.cuda.is_available():
                data, _ = [_.cuda() for _ in batch]
            else:
                data = batch[0]
            # support集的样本个数 k
            k = args.test_way * args.shot
            data_shot, data_query = data[:k], data[k:]
            feat_shot, feat_query = model(data_shot, data_query)
            # 5-way 1-shot 设置 feat.shape
            # torch.Size([5, 197, 384])
            # torch.Size([75, 197, 384])

            # calculate support_set and query_set's keyPatches_indices by DESC
            score_support, support_indices, score_query, query_indices = calculate_keyPatches_index(model, args.shot)

            # according to indices calculate the key patches
            support_keyPatch_nums = calculate_keyPatches_num(score_support, support_indices)
            query_keyPatch_nums = calculate_keyPatches_num(score_query, query_indices)
            print(support_keyPatch_nums)
            print(query_keyPatch_nums)

            if i == 2:
                break


