# 将模型应用于半监督场景

import argparse
import math
import scipy.stats

import torch.nn as nn
from torch.nn import modules
from torchvision import transforms
import tqdm
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm.utils import _environ_cols_wrapper

from Models.dataloader.samplers import CategoriesSampler
from Models.models.Network import DeepEMD
from Models.utils import *
from Models.io_utils import *
from Models.dataloader.data_utils import *
# from Models.models.ici import ICI
from config_utils import eval_config
from Models.models.mixmatch_utils import SimpleDataset
from Models.models.fixmatch_utils import TransformFixMatch
DATA_DIR = './datasets'
# DATA_DIR='/home/zhangchi/dataset'
MODEL_DIR = 'deepemd_trained_model/miniimagenet/fcn/max_acc.pth'


parser = eval_config()
parser.add_argument('-data_dir', type=str, default=DATA_DIR)
parser.add_argument('-model_dir', type=str, default=MODEL_DIR)
# ====================================半监督学习参数====================================
parser.add_argument("--unlabel", type=int, default=0, help="每个类别使用的无标记样本的数目, 0代表transduction setting")
setting = parser.add_mutually_exclusive_group()
setting.add_argument("--inductive", action='store_true', help="当unlabel为0时起效，将任务设置为inductive setting， semi training使用的无标记样本为 support 样本")
setting.add_argument("--selfsup", action='store_true', help="将使用无监督样本与有标记样本的并集")
# parser.add_argument('--ici_classifier', type=str, default='lr',help='lr/svm.')
parser.add_argument('--semi_step', type=int, default=1,
                        help='Select how many unlabeled data for each class in one iteration.')
parser.add_argument('--show_detail', action="store_true", help="采用半监督模式时, 是否显示每次选取样本对于query精度的影响")
parser.add_argument('--semi_version', type=str, default='v1', choices=['v1', 'v2'], help="v1 代表按类别选取固定数目的样本填充支撑集， v2 代表按logits排序选取固定顺序")
# parser.add_argument('--ici_dim', type=int, default=10, help='Reduced dimension.')
# parser.add_argument('--ici_embed', type=str, default='pca', help='Dimensionality reduction algorithm.')
# parser.add_argument("--ici_use_glmnet", action="store_true", help="是否使用glmnet计算 path, 否则使用 scikit-learn")
parser.add_argument("--fixmatch", action="store_true", help="使用fixmath finetune")
# parser.add_argument("--fixmatch_lambda_u", type=float, default=1, help="fixmatch 中未标记样本的最终权重") # 默认为5 参考transmatch
parser.add_argument("--msi", type=int, default=5, help="设置max semi iter, 最好为 shot 的整数倍")
parser.add_argument("--finetune_bs", type=int, default=4)
parser.add_argument("--finetune_iters", type=int, default=100, dest="fts")
parser.add_argument("--entro_threshold", type=float, default=1.3, help="entropy 阈值")
# ====================================半监督学习参数====================================

args = parser.parse_args()
if args.feature_pyramid is not None:
    args.feature_pyramid = [int(x) for x in args.feature_pyramid.split(',')]
args.patch_list = [int(x) for x in args.patch_list.split(',')]
parse_tune_pretrain(args)

args.device = torch.device(
    "cuda:0" if torch.cuda.is_available() else "cpu")
    
if args.origin:
    print("使用论文参数")
    args.model_dir = osp.join(args.model_dir)
    print("model dir:", args.model_dir)

else:
    format_model_name(args)
    # 不再meta train 5way 5shot，（浪费时间）
    # 不管测试时是5shot 还是1shot，均使用1shot训练后的模型(停用，现使用对应模型)
        # if args.sfc_update_step == 100:
    if args.use_deformconv and args.modulated_deformconv:
        args.model_name += "_modulated_deform"
    elif args.use_deformconv and not args.modulated_deformconv:
        args.model_name += "_deform"

    if args.sche == "StepLR":
        args.model_dir = 'checkpoint/meta_train/miniimagenet/{model_name}/{shot}shot-{way}way_opencv_StepLR/max_acc.pth'.format(
            model_name=args.model_name, shot=1, way=5)
    elif args.sche == "CosineLR":
        args.model_dir = 'checkpoint/meta_train/miniimagenet/{model_name}/{shot}shot-{way}way_opencv_CosineLR/max_acc.pth'.format(
            model_name=args.model_name, shot=1, way=5)
    # else:
    #     args.model_dir = 'checkpoint/meta_train/miniimagenet/{model_name}/{shot}shot-{way}way_opencv/max_acc.pth'.format(
    #         model_name=args.model_name, shot=1, way=5, sfc_update_step=args.sfc_update_step) 

if os.path.exists(args.model_dir):
    print("测试阶段使用此处的模型:{}".format(args.model_dir))
else:
    raise ValueError("未找到预训练模型:{}".format(args.model_dir))

if args.origin:
    args.res_save_path = "semi_logits_result_sfclr{sfclr}_no_reinit_two_phase_entro/{dataset}/resnet_origin/{shot}shot-{way}way/".format(
        sfclr=args.sfc_lr, dataset=args.dataset, shot=args.shot, way=args.way)

else :
    if args.use_specific_status:
        args.res_save_path = "semi_logits_result_sfclr{sfclr}_no_reinit_two_phase_entro/{dataset}/{model_name}/{shot}shot-{way}way_{sche}_unlabel{unlabel}_fts{fts}_msi{msi}_use_specific_status/".format(
            sfclr=args.sfc_lr, dataset=args.dataset, model_name=args.model_name, shot=args.shot, way=args.way, fts=args.fts, msi=args.msi, sche=args.sche, unlabel=args.unlabel)
    else:
        args.res_save_path = "semi_logits_result_sfclr{sfclr}_no_reinit_two_phase_entro/{dataset}/{model_name}/{shot}shot-{way}way_{sche}_unlabel{unlabel}-fts{fts}_msi{msi}/".format(
            sfclr=args.sfc_lr, dataset=args.dataset, model_name=args.model_name, shot=args.shot, way=args.way, fts=args.fts, msi=args.msi, sche=args.sche, unlabel=args.unlabel)

args.res_save_path = os.path.join(args.res_save_path, "step{}_{}".format(args.semi_step, args.semi_version))

if args.inductive and args.unlabel==0:
    args.res_save_path += '_inductive'

if args.selfsup:
    args.res_save_path += "_selfsup"

if args.fixmatch:
    args.res_save_path += "_fixmatch"

if os.path.exists(args.res_save_path):
    pass
else:
    os.makedirs(args.res_save_path)

pprint(vars(args))

if os.path.exists(args.model_dir):
    print("使用模型路径:{}".format(args.model_dir))
else:
    raise ValueError("找不到模型参数文件:", args.model_dir)
global_writer.global_count = 0
global_writer.writer = SummaryWriter(osp.join(args.res_save_path, 'tf'))

set_seed(args.seed)
num_gpu = set_gpu(args)
Dataset = set_up_datasets(args)
# ----------------------------------数据部分----------------------------------
test_set = Dataset(args.set, args) 
sampler = CategoriesSampler(
    test_set.label, args.test_episode, args.way, args.shot + args.query + args.unlabel)
loader = DataLoader(test_set, batch_sampler=sampler,
                    num_workers=0, pin_memory=True)
transform_train = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomCrop(size=args.image_size,
                              padding=int(args.image_size*0.125),
                              padding_mode='reflect'),
    ])
# 验证时无需做其他变换
transform_val = transforms.Compose([
    ])
if args.fixmatch:
    unlabel_transform = TransformFixMatch(image_size=args.image_size)
# ----------------------------------数据部分----------------------------------

# model
model = DeepEMD(args)
model = load_model(model, args.model_dir)
model = nn.DataParallel(model, list(range(num_gpu)))
model = model.cuda()
model.eval()

# test dataset
# 注意 Dataset 与 CategoriesSampler 在 semi setting 下接口需对应
tqdm_gen = tqdm.tqdm(loader)
# label of query images
ave_acc = Averager()
test_acc_record = np.zeros((args.test_episode,))

# iterations = math.ceil(args.unlabel/args.ici_step) + \
#     2 if args.unlabel != 0 else math.ceil(15/args.ici_step) + 2
# max_semi_iter = 5
max_semi_iter = min(args.msi, math.ceil(args.unlabel/args.semi_step)) if args.unlabel != 0 else min(args.msi,math.ceil(args.query/args.semi_step))
acc_list = [[] for _ in range(max_semi_iter + 2)]
# with torch.no_grad():
for batch_index, batch in enumerate(tqdm_gen, 1):
    # "_"取出label，但后续均未使用，每一个batch都重置了label
    # 每个episode 重置 np.random.seed ，保证每次实验取数据过程一致
    args.seed += 1
    np.random.seed(args.seed)
    data_origin, indicator = [_.cuda() for _ in batch]
    targets = torch.arange(args.way).repeat(args.shot+args.query+args.unlabel).long()[
        indicator[:args.way*(args.shot+args.query+args.unlabel)] != 0]
    k = args.way * args.shot
    data_origin = data_origin[indicator != 0].to(args.device)
    targets = targets.to(args.device)
    # train_inputs = data[:k]
    # train_targets = targets[:k].cpu().numpy()
    # test_inputs = data[k:k+args.query*args.way]
    # test_targets = targets[k:k+args.query*args.way].cpu().numpy()

    model.module.mode = 'encoder'
    # data = model(data)
    # shot: 5,3,84,84  query:75,3,84,84
    data_support_origin, data_query_origin = data_origin[:k], data_origin[k:k+args.query*args.way]

    support_targets = targets[:k]
    query_targets = targets[k:k+args.query*args.way]
    if args.unlabel != 0:
        unlabel_inputs_origin = data_origin[k+args.query*args.way:]
    elif args.unlabel == 0 and not args.inductive:
        unlabel_inputs_origin = data_query_origin
    elif args.unlabel ==0 and args.inductive:
        unlabel_inputs_origin = data_support_origin

    if args.selfsup:
        unlabel_inputs_origin = torch.cat((data_support_origin, unlabel_inputs_origin))

    # 使用 Dataloader 与 Dataset 组合时会调用 torch.randperm 可能使结果无法复现
    few_shot_labeled_dataset = SimpleDataset(data=data_support_origin, target=support_targets, transform=transform_train)
    labeled_dataloader = DataLoader(few_shot_labeled_dataset, shuffle=True, batch_size=args.finetune_bs, num_workers=0, drop_last=True)
    if args.fixmatch:
        unlabel_dataset = SimpleDataset(data=unlabel_inputs_origin, target=None, transform=unlabel_transform)
        unlabel_dataloader = DataLoader(unlabel_dataset, shuffle=True, batch_size=args.finetune_bs, num_workers=0, drop_last=True)
    else:
        unlabel_dataset = None
        unlabel_dataloader = None
    # 初始化 SFC 时应该无需使用数据增强, transform_val 为空
    data_support_origin = transform_val(data_support_origin)
    data_query_origin = transform_val(data_query_origin)

    with torch.no_grad():
        # 由于前置 model.training 置为了 False, 此时可以不用进行相关 BN 层操作
        # model.apply(freeze_bn)
        data_support = model(data_support_origin)
        data_query = model(data_query_origin)
        # model.apply(activate_bn)

    # 初始化 SFC
    label_one_hot = label2onehot(support_targets, args.way).cuda()
    label_one_hot = label_one_hot.transpose(0, 1)
    support_shape = data_support.shape
    # init the proto
    SFC = torch.mm(label_one_hot, data_support.view(support_shape[0], -1))
    SFC = SFC.div(label_one_hot.sum(dim=1, keepdim=True)).expand_as(SFC)
    SFC = SFC.view(-1, *support_shape[1:])
    SFC = nn.Parameter(SFC.detach(), requires_grad=True)
    model.module.register_parameter("sfc", SFC)
    model.module.mode = 'meta'
    optimizer = torch.optim.SGD(
        [
        {'params': [SFC], "lr":args.sfc_lr, "dampening":0.9, "weight_decay": 0},
        {'params': model.module.encoder.parameters(), "lr": 0.001},
        ], momentum=0.9, nesterov=True, weight_decay=0.0005
    )
    support_set = []
    if args.show_detail:
        acc_list_temp = []
    # 先进行两次迭代，第一次代表只初始化，未Finetune，第二次迭代代表使用 support 集合进行初始化，初始迭代次数为100

    if args.show_detail:
        with torch.no_grad():
            logits = get_logits(model, SFC, data_query, num_gpu)
            pred = torch.argmax(logits, dim=1)
            acc_list_temp.append((pred == query_targets).type(torch.FloatTensor).mean().item())
    # SFC = model.module.get_sfc_for_semi_no_reinit(SFC, data_support, support_targets, optimizer, sfc_update_step=args.sfc_update_step)
    # the first phase 加入 100 次 aug finetune
    aug_finetune(args.fts, labeled_dataloader, model, optimizer, num_gpu, unlabel_dataloader, args.fixmatch)

    with torch.no_grad():
        data_support = get_features_for_origin_data(model, data_support_origin, dense=True)
        data_query = get_features_for_origin_data(model, data_query_origin, dense=True)
        unlabel_inputs = get_features_for_origin_data(model, unlabel_inputs_origin, dense=True)
    if args.show_detail:
        with torch.no_grad():
            logits = get_logits(model, SFC, data_query, num_gpu)
            pred = torch.argmax(logits, dim=1)
            acc_list_temp.append((pred == query_targets).type(torch.FloatTensor).mean().item())

    with torch.no_grad():
        logits_unlabel = get_logits(model, SFC, unlabel_inputs, num_gpu, bs=32)
    # the second phase
    for iter_num in range(1, max_semi_iter + 1):

        distribute = torch.softmax(logits_unlabel, dim=1)
        entro = scipy.stats.entropy(distribute.cpu().numpy(), axis=1)

        selected = np.zeros(args.way)
        selected_list = [[] for _ in range(args.way)]
        # selected_label = [[] for _ in range(args.way)]
        index = torch.arange(len(unlabel_inputs_origin))
        values, pseudo_y = logits_unlabel.max(1)
        logits_zip = list(zip(values, pseudo_y, index, entro))
        logits_rank = sorted(logits_zip, key=lambda x:x[0], reverse=True)
        if args.semi_version == 'v1':
            for i in range(len(logits_rank)):
            # 判断每个类别是否选取足够样本
            # selected 为一个 list, 长度为args.way, 其中每个元素为该类别下选中的样本
            # 可能存在某一类别无法添加新样本了， v1 每次迭代按每个类别赋予 step 样本数量
                if logits_rank[i][2] in support_set:
                    continue
                if logits_rank[i][3] >= args.entro_threshold:
                    continue
                if selected[logits_rank[i][1]] < args.semi_step:
                    selected[logits_rank[i][1]] += 1
                    selected_list[logits_rank[i][1]].append(logits_rank[i][2].item())
                    support_set.append(logits_rank[i][2].item())
                if np.sum(selected >= args.semi_step) == args.way:
                    break
            
        elif args.semi_version == 'v2':
            # v2 为按照logits排序， 添加固定样本数量(为step * way)
            count = 0
            for i in range(len(logits_rank)):
                if logits_rank[i][2] in support_set:
                    continue
                if logits_rank[i][3] >= args.entro_threshold:
                    continue
                selected[logits_rank[i][1]] += 1
                selected_list[logits_rank[i][1]].append(logits_rank[i][2].item())
                support_set.append(logits_rank[i][2].item())
                count += 1
                if count >= (args.semi_step * args.way):
                    break
        # selected 为一个 list, 长度为args.way, 其中每个元素为该类别下one-hot标签
        # for i, n in enumerate(selected_list):
        #     if n:
        #         selected_label[i].append(label2onehot(i))
        # for i, label_temp in enumerate(selected_label):
        #     if label_temp:
        #         selected_label[i] = torch.stack(label_temp)

        selected_sample = torch.cat([unlabel_inputs[samples] for samples in selected_list if samples], dim=0)
        presudo_label = torch.cat([pseudo_y[samples] for samples in selected_list if samples], dim=0)
        data_support = torch.cat((data_support, selected_sample), dim=0) # [args.shots*args.way + args.step*i*args.way, 640, 5, 5]
        support_targets = torch.cat((support_targets, presudo_label), dim=0) # [args.shots*args.way + args.step*i*args.way,]

        SFC = model.module.get_sfc_for_semi_no_reinit(SFC, data_support, support_targets, optimizer, sfc_update_step=10)
        if args.show_detail:
            with torch.no_grad():
                logits = get_logits(model, SFC, data_query, num_gpu, bs=32)
                pred = torch.argmax(logits, dim=1)
                acc_list_temp.append((pred == query_targets).type(torch.FloatTensor).mean().item())

    if True:
        model = load_model(model, args.model_dir, print_mode=False)

    for j in range(min(max_semi_iter+1,len(acc_list_temp)-1)):
        acc_list[j].append(acc_list_temp[j] * 100)
    acc_list[-1].append(acc_list_temp[-1] * 100)
    acc = count_acc(logits, query_targets) * 100
    ave_acc.add(acc)
    test_acc_record[batch_index - 1] = acc
    m, pm = compute_confidence_interval(test_acc_record[:batch_index])
    tqdm_gen.set_description(
        'batch {}: This episode:{:.2f}  average: {:.4f}+{:.4f}'.format(batch_index, acc, m, pm))

mean_list = []
ci_list = []
for item in acc_list:
    mean, ci = compute_confidence_interval(item)
    mean_list.append(mean)
    ci_list.append(ci)

m, pm = compute_confidence_interval(test_acc_record)
result_list = ['test Acc {:.4f}'.format(ave_acc.item())]
result_list.append('Test Acc {:.4f} + {:.4f}'.format(m, pm))
result_list.append("Test Acc Mean{}".format(' '.join([str(i*100)[:5] for i in mean_list])))
result_list.append("Test Acc ci{}".format(' '.join([str(i)[:5] for i in ci_list])))
a = ""
for i in acc_list:
    a += "\n" + str(["{:.2f}".format(j) for j in i])
result_list.append('acc_list{}'.format(a))
print(result_list[0])
print(result_list[1])
print("Test Acc Mean{}".format(
    ' '.join([str(i*100)[:5] for i in mean_list])))
print("Test Acc ci{}".format(' '.join([str(i)[:5] for i in ci_list])))
# TODO
save_list_to_txt(os.path.join(args.res_save_path, 'results.txt'), result_list)

