import argparse
import os.path as osp
import time

import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm

from models.backbones import BackBone
from dataloader.samplers import CategoriesSampler
from utils import pprint, ensure_path, Averager, count_acc, compute_confidence_interval, \
    calculate_keyPatches_index, calculate_keyPatches_num, get_patches_imp, get_similarity_map,\
    get_emd_distance, sum_min

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--max_epoch', type=int, default=100)
    parser.add_argument('--way', type=int, default=5)
    parser.add_argument('--test_way', type=int, default=5)
    parser.add_argument('--shot', type=int, default=1)
    parser.add_argument('--query', type=int, default=15)
    parser.add_argument('--lr', type=float, default=0.00001)
    parser.add_argument('--lr_mul', type=float, default=100)
    parser.add_argument('--step_size', type=int, default=5)
    parser.add_argument('--alpha', type=float, default=2.)
    parser.add_argument('--model_type', type=str, default='small')
    parser.add_argument('--dataset', type=str, default='miniImageNet',
                        choices=['ocean', 'miniImageNet', 'cifar', 'fc100', 'tieredImageNet'])
    parser.add_argument('--init_weights', type=str, default='./initialization/')
    args = parser.parse_args()
    pprint(vars(args))

    # args.init_weights = osp.join(args.init_weights, args.dataset, 'checkpoint1600.pth')
    args.init_weights = "F:/checkpoint/ViT-S-16/miniImageNet/checkpoint1600.pth"
    if args.dataset == 'miniImageNet':
        from dataloader.mini_imagenet import MiniImageNet as Dataset
    else:
        raise ValueError('Non-supported Dataset.')

    valset = Dataset('val', args)
    val_sampler = CategoriesSampler(valset.label, 500, args.way, args.shot + args.query)
    val_loader = DataLoader(dataset=valset, batch_sampler=val_sampler, pin_memory=True,num_workers=4)

    model = BackBone(args)

    print('Using {}'.format(args.model_type))

    # load pre-trained models (no FC weights)
    model_dict = model.state_dict()
    if args.init_weights is not None:
        pretrained_dict = torch.load(args.init_weights, map_location='cpu')['teacher']
        pretrained_dict = {k.replace('backbone', 'encoder'): v for k, v in pretrained_dict.items()}
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
    print('Model loaded successfully!')
    if torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True
        model = model.cuda()
    print('Start')
    start_time = time.time()
    Acc_list = []
    with torch.no_grad():
        for i, batch in enumerate(val_loader, 1):
            if torch.cuda.is_available():
                data = batch[0].cuda()
            else:
                data = batch[0]
            # support集的样本个数 k
            k = args.way * args.shot
            data_shot, data_query = data[:k], data[k:]
            feat_shot, feat_query = model(data_shot, data_query)
            print(f'第{i}次抽样完成！shape{feat_shot.shape}')

            # 5-way 1-shot 设置 feat.shape
            # torch.Size([5, 197, 384])
            # torch.Size([75, 197, 384])
            weight_s, weight_q = get_patches_imp(model)
            proto = feat_shot[:, 1:] + args.alpha * (feat_shot[:, 0:1].repeat(1, feat_shot.shape[1] - 1, 1))
            feat_query = feat_query[:, 1:] + args.alpha * (feat_query[:, 0:1].repeat(1, feat_query.shape[1] - 1, 1))
            similarity_matrix = get_similarity_map(proto, feat_query, way=args.way)
            acc = sum_min(similarity_matrix)
            Acc_list.append(acc)
            print('batch {} acc:{}'.format(i, acc))
    end_time = time.time()
    print('----------------------------')
    print('The Average acc is {}'.format(sum(Acc_list) / len(Acc_list)))
    print('Total time {}s'.format((end_time-start_time)/1000))
    print('Done!')
