# 将模型应用于半监督场景

import argparse
import math

import torch.nn as nn
import tqdm
from torch.utils.data import DataLoader
from tqdm.utils import _environ_cols_wrapper

from Models.dataloader.samplers import CategoriesSampler
from Models.models.Network import DeepEMD
from Models.utils import *
from Models.io_utils import *
from Models.dataloader.data_utils import *
from Models.models.ici import ICI
from config_utils import eval_config

DATA_DIR = './datasets'
# DATA_DIR='/home/zhangchi/dataset'
MODEL_DIR = 'deepemd_trained_model/miniimagenet/fcn/max_acc.pth'


parser = eval_config()
parser.add_argument('-data_dir', type=str, default=DATA_DIR)
parser.add_argument('-model_dir', type=str, default=MODEL_DIR)
# ====================================半监督学习参数====================================
parser.add_argument("--unlabel", type=int, default=0, help="每个类别使用的无标记样本的数目, 0代表transduction setting")
parser.add_argument('--ici_classifier', type=str, default='lr',help='lr/svm.')
parser.add_argument('--ici_step', type=int, default=5,
                        help='Select how many unlabeled data for each class in one iteration.')
parser.add_argument('--ici_dim', type=int, default=10, help='Reduced dimension.')
parser.add_argument('--ici_embed', type=str, default='pca', help='Dimensionality reduction algorithm.')
parser.add_argument("--ici_use_glmnet", action="store_true", help="是否使用glmnet计算 path, 否则使用 scikit-learn")
# ====================================半监督学习参数====================================

args = parser.parse_args()
if args.feature_pyramid is not None:
    args.feature_pyramid = [int(x) for x in args.feature_pyramid.split(',')]
args.patch_list = [int(x) for x in args.patch_list.split(',')]
parse_tune_pretrain(args)

args.device = torch.device(
    "cuda:0" if torch.cuda.is_available() else "cpu")
    
if args.origin:
    print("使用论文参数")
    args.model_dir = osp.join(args.model_dir)
    print("model dir:", args.model_dir)

else:
    format_model_name(args)
    # 不再meta train 5way 5shot，（浪费时间）
    # 不管测试时是5shot 还是1shot，均使用1shot训练后的模型(停用，现使用对应模型)
        # if args.sfc_update_step == 100:
    if args.use_deformconv and args.modulated_deformconv:
        args.model_name += "_modulated_deform"
    elif args.use_deformconv and not args.modulated_deformconv:
        args.model_name += "_deform"

    if args.sche == "StepLR":
        args.model_dir = 'checkpoint/meta_train/miniimagenet/{model_name}/{shot}shot-{way}way_opencv_StepLR/max_acc.pth'.format(
            model_name=args.model_name, shot=1, way=5)
    elif args.sche == "CosineLR":
        args.model_dir = 'checkpoint/meta_train/miniimagenet/{model_name}/{shot}shot-{way}way_opencv_CosineLR/max_acc.pth'.format(
            model_name=args.model_name, shot=1, way=5)
    # else:
    #     args.model_dir = 'checkpoint/meta_train/miniimagenet/{model_name}/{shot}shot-{way}way_opencv/max_acc.pth'.format(
    #         model_name=args.model_name, shot=1, way=5, sfc_update_step=args.sfc_update_step) 

if os.path.exists(args.model_dir):
    print("测试阶段使用此处的模型:{}".format(args.model_dir))
else:
    raise ValueError("未找到预训练模型:{}".format(args.model_dir))

if args.origin:
    args.res_save_path = "semi_result/{dataset}/resnet_origin/{shot}shot-{way}way/".format(
        dataset=args.dataset, shot=args.shot, way=args.way)

else :
    if args.sfc_update_step == 100:
        if args.use_specific_status:
            args.res_save_path = "semi_ici_result/{dataset}/{model_name}/{shot}shot-{way}way_{sche}_unlabel{unlabel}_use_specific_status/".format(
                dataset=args.dataset, model_name=args.model_name, shot=args.shot, way=args.way, sche=args.sche, unlabel=args.unlabel)
        else:
            args.res_save_path = "semi_ici_result/{dataset}/{model_name}/{shot}shot-{way}way_{sche}_unlabel{unlabel}/".format(
                dataset=args.dataset, model_name=args.model_name, shot=args.shot, way=args.way, sche=args.sche, unlabel=args.unlabel)
    else:
        if args.use_specific_status:
            args.res_save_path = "semi_ici_result/{dataset}/{model_name}/{shot}shot-{way}way_{sche}_unlabel{unlabel}_SFC{sfc_update_step}_use_specific_status/".format(
                dataset=args.dataset, model_name=args.model_name, shot=args.shot, way=args.way, sfc_update_step=args.sfc_update_step, sche=args.sche, unlabel=args.unlabel)
        else:
            args.res_save_path = "semi_ici_result/{dataset}/{model_name}/{shot}shot-{way}way_{sche}_unlabel{unlabel}_SFC{sfc_update_step}/".format(
                dataset=args.dataset, model_name=args.model_name, shot=args.shot, way=args.way, sfc_update_step=args.sfc_update_step, sche=args.sche, unlabel=args.unlabel)

args.res_save_path = os.path.join(args.res_save_path, "ici_step{}_dim{}_embed-{}".format(args.ici_step, args.ici_dim, args.ici_embed))

if os.path.exists(args.res_save_path):
    pass
else:
    os.makedirs(args.res_save_path)

pprint(vars(args))

if os.path.exists(args.model_dir):
    print("使用模型路径:{}".format(args.model_dir))
else:
    raise ValueError("找不到模型参数文件:", args.model_dir)

set_seed(args.seed)
num_gpu = set_gpu(args)
Dataset = set_up_datasets(args)


# model
model = DeepEMD(args)
model = load_model(model, args.model_dir)
model = nn.DataParallel(model, list(range(num_gpu)))
model = model.cuda()
model.eval()

ici = ICI(classifier=args.ici_classifier, num_class=args.way,
              step=args.ici_step, reduce=args.ici_embed, d=args.ici_dim, use_glmnet=args.ici_use_glmnet)
# test dataset
# 注意 Dataset 与 CategoriesSampler 在 semi setting 下接口需对应
test_set = Dataset(args.set, args) 
sampler = CategoriesSampler(
    test_set.label, args.test_episode, args.way, args.shot + args.query + args.unlabel)
loader = DataLoader(test_set, batch_sampler=sampler,
                    num_workers=0, pin_memory=True)
tqdm_gen = tqdm.tqdm(loader)

# label of query images
ave_acc = Averager()
test_acc_record = np.zeros((args.test_episode,))

iterations = math.ceil(args.unlabel/args.ici_step) + \
    2 if args.unlabel != 0 else math.ceil(15/args.ici_step) + 2
acc_list = [[] for _ in range(iterations)]
with torch.no_grad():
    for i, batch in enumerate(tqdm_gen, 1):
        # "_"取出label，但后续均未使用，每一个batch都重置了label
        data, indicator = [_.cuda() for _ in batch]
        targets = torch.arange(args.way).repeat(args.shot+args.query+args.unlabel).long()[
            indicator[:args.way*(args.shot+args.query+args.unlabel)] != 0]
        k = args.way * args.shot
        data = data[indicator != 0].to(args.device)
        train_inputs = data[:k]
        train_targets = targets[:k].cpu().numpy()
        test_inputs = data[k:k+args.query*args.way]
        test_targets = targets[k:k+args.query*args.way].cpu().numpy()

        train_embeddings = get_embedding(model, train_inputs, args.device)
        ici.fit(train_embeddings, train_targets)
        test_embeddings = get_embedding(model, test_inputs, args.device)
        if args.unlabel != 0:
            unlabel_inputs = data[k+args.query*args.way:]
            unlabel_embeddings = get_embedding(
                model, unlabel_inputs, args.device)
        else:
            unlabel_embeddings = None
        acc = ici.predict(test_embeddings, unlabel_embeddings,
                          True, test_targets, plot=True)
        for j in range(min(iterations-1,len(acc))):
            acc_list[j].append(acc[j] * 100)
        acc_list[-1].append(acc[-1] * 100)
        # model.module.mode = 'encoder'
        # data = model(data)
        # # shot: 5,3,84,84  query:75,3,84,84
        # data_shot, data_query = data[:k], data[k:]
        # model.module.mode = 'meta'
        # if args.shot > 1:
        #     data_shot = model.module.get_sfc(data_shot)
        # logits = model((data_shot.unsqueeze(0).repeat(
        #     num_gpu, 1, 1, 1, 1), data_query))
        # acc = count_acc(logits, label) * 100
        ave_acc.add(acc[-1] * 100)
        test_acc_record[i - 1] = acc[-1] * 100
        m, pm = compute_confidence_interval(test_acc_record[:i])
        tqdm_gen.set_description(
            'batch {}: This episode:{:.2f}  average: {:.4f}+{:.4f}'.format(i, acc[-1], m, pm))

    mean_list = []
    ci_list = []
    for item in acc_list:
        mean, ci = compute_confidence_interval(item)
        mean_list.append(mean)
        ci_list.append(ci)

    m, pm = compute_confidence_interval(test_acc_record)
    result_list = ['test Acc {:.4f}'.format(ave_acc.item())]
    result_list.append('Test Acc {:.4f} + {:.4f}'.format(m, pm))
    result_list.append("Test Acc Mean{}".format(' '.join([str(i*100)[:5] for i in mean_list])))
    result_list.append("Test Acc ci{}".format(' '.join([str(i)[:5] for i in ci_list])))
    print(result_list[0])
    print(result_list[1])
    print("Test Acc Mean{}".format(
        ' '.join([str(i*100)[:5] for i in mean_list])))
    print("Test Acc ci{}".format(' '.join([str(i)[:5] for i in ci_list])))
    # TODO
    save_list_to_txt(os.path.join(args.res_save_path, 'results.txt'), result_list)
