import argparse

import torch.nn as nn
import tqdm
from torch.utils.data import DataLoader

from Models.dataloader.samplers import CategoriesSampler
from Models.models.Network import DeepEMD
from Models.utils import *
from Models.io_utils import *
from Models.dataloader.data_utils import *
from config_utils import eval_config

DATA_DIR = './datasets'
# DATA_DIR='/home/zhangchi/dataset'
MODEL_DIR = 'deepemd_trained_model/miniimagenet/fcn/max_acc.pth'


parser = eval_config()

parser.add_argument('-data_dir', type=str, default=DATA_DIR)
parser.add_argument('-model_dir', type=str, default=MODEL_DIR)
args = parser.parse_args()
if args.feature_pyramid is not None:
    args.feature_pyramid = [int(x) for x in args.feature_pyramid.split(',')]
args.patch_list = [int(x) for x in args.patch_list.split(',')]
parse_tune_pretrain(args)

if args.origin:
    print("使用论文参数")
    args.model_dir = osp.join(args.model_dir)
    print("model dir:", args.model_dir)

else:
    format_model_name(args)
    # 不再meta train 5way 5shot，（浪费时间）
    # 不管测试时是5shot 还是1shot，均使用1shot训练后的模型(停用，现使用对应模型)
        # if args.sfc_update_step == 100:
    if args.use_deformconv and args.modulated_deformconv:
        args.model_name += "_modulated_deform"
    elif args.use_deformconv and not args.modulated_deformconv:
        args.model_name += "_deform"

    if args.sche == "StepLR":
        args.model_dir = 'checkpoint/meta_train/miniimagenet/{model_name}/{shot}shot-{way}way_opencv_StepLR/max_acc.pth'.format(
            model_name=args.model_name, shot=1, way=5)
    elif args.sche == "CosineLR":
        args.model_dir = 'checkpoint/meta_train/miniimagenet/{model_name}/{shot}shot-{way}way_opencv_CosineLR/max_acc.pth'.format(
            model_name=args.model_name, shot=1, way=5)
    # else:
    #     args.model_dir = 'checkpoint/meta_train/miniimagenet/{model_name}/{shot}shot-{way}way_opencv/max_acc.pth'.format(
    #         model_name=args.model_name, shot=1, way=5, sfc_update_step=args.sfc_update_step) 

if os.path.exists(args.model_dir):
    print("测试阶段使用此处的模型:{}".format(args.model_dir))
else:
    raise ValueError("未找到预训练模型:{}".format(args.model_dir))

if args.origin:
    args.res_save_path = "result/{dataset}/resnet_origin/{shot}shot-{way}way/".format(
        sfclr=args.sfc_lr, dataset=args.dataset, shot=args.shot, way=args.way)

else :
    if args.sfc_update_step == 100:
        if args.use_specific_status:
            args.res_save_path = "result/{dataset}/{model_name}/{shot}shot-{way}way_{sche}_use_specific_status/".format(
                sfclr=args.sfc_lr, dataset=args.dataset, model_name=args.model_name, shot=args.shot, way=args.way, sche=args.sche)
        else:
            args.res_save_path = "result/{dataset}/{model_name}/{shot}shot-{way}way_{sche}/".format(
                sfclr=args.sfc_lr, dataset=args.dataset, model_name=args.model_name, shot=args.shot, way=args.way, sche=args.sche)
    else:
        if args.use_specific_status:
            args.res_save_path = "result/{dataset}/{model_name}/{shot}shot-{way}way_{sche}_SFC{sfc_update_step}_use_specific_status/".format(
                sfclr=args.sfc_lr, dataset=args.dataset, model_name=args.model_name, shot=args.shot, way=args.way, sfc_update_step=args.sfc_update_step, sche=args.sche)
        else:
            args.res_save_path = "result/{dataset}/{model_name}/{shot}shot-{way}way_{sche}_SFC{sfc_update_step}/".format(
                sfclr=args.sfc_lr, dataset=args.dataset, model_name=args.model_name, shot=args.shot, way=args.way, sfc_update_step=args.sfc_update_step, sche=args.sche)


if os.path.exists(args.res_save_path):
    pass
else:
    os.makedirs(args.res_save_path)

pprint(vars(args))

if os.path.exists(args.model_dir):
    print("使用模型路径:{}".format(args.model_dir))
else:
    raise ValueError("找不到模型参数文件:", args.model_dir)

set_seed(args.seed)
num_gpu = set_gpu(args)
Dataset = set_up_datasets(args)


# model
model = DeepEMD(args)
model = load_model(model, args.model_dir)
model = nn.DataParallel(model, list(range(num_gpu)))
model = model.cuda()
model.eval()

# test dataset
test_set = Dataset(args.set, args)
sampler = CategoriesSampler(
    test_set.label, args.test_episode, args.way, args.shot + args.query)
loader = DataLoader(test_set, batch_sampler=sampler,
                    num_workers=0, pin_memory=True)
tqdm_gen = tqdm.tqdm(loader)

# label of query images
ave_acc = Averager()
test_acc_record = np.zeros((args.test_episode,))
label = torch.arange(args.way).repeat(args.query)
label = label.type(torch.cuda.LongTensor)

with torch.no_grad():
    for i, batch in enumerate(tqdm_gen, 1):
        # "_"取出label，但后续均未使用，每一个batch都重置了label
        data, _ = [_.cuda() for _ in batch]
        k = args.way * args.shot
        model.module.mode = 'encoder'
        data = model(data) # [100, 640, 5, 5]
        # shot: 5,3,84,84  query:75,3,84,84
        data_shot, data_query = data[:k], data[k:]
        model.module.mode = 'meta'
        if args.shot > 1:
            data_shot = model.module.get_sfc(data_shot)
        logits = model((data_shot.unsqueeze(0).repeat(
            num_gpu, 1, 1, 1, 1), data_query))
        acc = count_acc(logits, label) * 100
        ave_acc.add(acc)
        test_acc_record[i - 1] = acc
        m, pm = compute_confidence_interval(test_acc_record[:i])
        tqdm_gen.set_description(
            'batch {}: This episode:{:.2f}  average: {:.4f}+{:.4f}'.format(i, acc, m, pm))

    m, pm = compute_confidence_interval(test_acc_record)
    result_list = ['test Acc {:.4f}'.format(ave_acc.item())]
    result_list.append('Test Acc {:.4f} + {:.4f}'.format(m, pm))
    print(result_list[0])
    print(result_list[1])
    # TODO
    save_list_to_txt(os.path.join(args.res_save_path, 'results.txt'), result_list)
