# 将模型应用于半监督场景

import argparse
import math
from random import shuffle
from re import A, L
from struct import iter_unpack
from numpy.lib.function_base import select
from torch import optim
import torch.nn.functional as F

import torch.nn as nn
from torchvision import transforms
import tqdm
from torch.utils.data import DataLoader, dataloader
from tqdm.utils import _environ_cols_wrapper
from torch.utils.tensorboard import SummaryWriter

from Models.dataloader.samplers import CategoriesSampler
from Models.models.Network import DeepEMD
from Models.models.mixmatch_utils import TransformTwice, SimpleDataset
from Models.utils import *
from Models.io_utils import *
from Models.dataloader.data_utils import *
from Models.models import SemiLoss, WeightEMA
# from Models.models.ici import ICI
from config_utils import eval_config, train_config
import Models.models.global_writer as global_writer

DATA_DIR = './datasets'
# DATA_DIR='/home/zhangchi/dataset'
MODEL_DIR = 'deepemd_trained_model/miniimagenet/fcn/max_acc.pth'


parser = eval_config()
parser.add_argument('-data_dir', type=str, default=DATA_DIR)
parser.add_argument('-model_dir', type=str, default=MODEL_DIR)
# ====================================半监督学习参数====================================
parser.add_argument("--unlabel", type=int, default=0,
                    help="每个类别使用的无标记样本的数目, 0代表transduction setting")
parser.add_argument("--inductive", action='store_true', help="当unlabel为0时起效，将任务设置为inductive setting， semi training使用的无标记样本为 support 样本")
# parser.add_argument('--ici_classifier', type=str, default='lr',help='lr/svm.')
# parser.add_argument('--semi_step', type=int, default=1,
#                         help='Select how many unlabeled data for each class in one iteration.')
# parser.add_argument('--semi_version', type=str, default='v1', choices=['v1', 'v2'], help="v1 代表按类别选取固定数目的样本填充支撑集， v2 代表按logits排序选取固定顺序")
parser.add_argument('--show_detail', action="store_true",
                    help="采用半监督模式时, 是否显示每一定次迭代对于query精度的影响")
parser.add_argument('-mse', '--max_semi_epoch', type=int,
                    default=30, dest='mse', help="每个 episode 更新 epoch 数")
parser.add_argument('-temp', '--temperature', type=float,
                    default=0.5, dest='temp', help='sharpen 中使用的 temperature')
parser.add_argument("--add_ema", action='store_true', help="是否使用 ema 模型") # 主要用于平滑预测结果
parser.add_argument("--ema_decay", type=float, default=None, help="指定 ema 平滑程度")
parser.add_argument("--mixmatch", action="store_true", help="是否使用 mixmatch 扩充")
parser.add_argument("--mixmatch_bs", type=int, default=4, help="mixmatch 时使用的 batchsize")
parser.add_argument("--mixmatch_lambda_u", type=float, default=5, help="mixmatch 中未标记样本的最终权重") # 默认为5 参考transmatch
parser.add_argument("--mixmatch_alpha", type=float, default=0.75, help="beta 分布的参数")
parser.add_argument("--change_BN", action="store_true", dest="cgbn", help="finetune 阶段是否改变BN层的 running mean与 running var")
parser.add_argument("--remove_mixup", action="store_true", help="验证 mixup 损失的影响")
parser.add_argument("--remove_argumentation", action="store_true", help="验证数据增强的影响")
# ====================================半监督学习参数====================================

args = parser.parse_args()
if args.feature_pyramid is not None:
    args.feature_pyramid = [int(x) for x in args.feature_pyramid.split(',')]
args.patch_list = [int(x) for x in args.patch_list.split(',')]
parse_tune_pretrain(args)
# 设置 mixmatch 标签供数据读取使用
assert args.mixmatch, "args.mixmatch 需要为 True"

args.device = torch.device(
    "cuda:0" if torch.cuda.is_available() else "cpu")

if args.origin:
    print("使用论文参数")
    args.model_dir = osp.join(args.model_dir)
    print("model dir:", args.model_dir)

else:
    format_model_name(args)
    # 不再meta train 5way 5shot，（浪费时间）
    # 不管测试时是5shot 还是1shot，均使用1shot训练后的模型(停用，现使用对应模型)
    # if args.sfc_update_step == 100:
    if args.use_deformconv and args.modulated_deformconv:
        args.model_name += "_modulated_deform"
    elif args.use_deformconv and not args.modulated_deformconv:
        args.model_name += "_deform"

    if args.sche == "StepLR":
        args.model_dir = 'checkpoint/meta_train/miniimagenet/{model_name}/{shot}shot-{way}way_opencv_StepLR/max_acc.pth'.format(
            model_name=args.model_name, shot=1, way=5)
    elif args.sche == "CosineLR":
        args.model_dir = 'checkpoint/meta_train/miniimagenet/{model_name}/{shot}shot-{way}way_opencv_CosineLR/max_acc.pth'.format(
            model_name=args.model_name, shot=1, way=5)
    # else:
    #     args.model_dir = 'checkpoint/meta_train/miniimagenet/{model_name}/{shot}shot-{way}way_opencv/max_acc.pth'.format(
    #         model_name=args.model_name, shot=1, way=5, sfc_update_step=args.sfc_update_step)

if os.path.exists(args.model_dir):
    print("测试阶段使用此处的模型:{}".format(args.model_dir))
else:
    raise ValueError("未找到预训练模型:{}".format(args.model_dir))

if args.origin:
    args.res_save_path = "semi_logits_result_mixmatch_sfclr{sfclr}/{dataset}/resnet_origin/{shot}shot-{way}way_lambdau-{lambdau}_alpha-{alpha}_mse-{mse}_bs-{bs}".format(
        sfclr=args.sfc_lr, dataset=args.dataset, shot=args.shot, way=args.way, lambdau=args.mixmatch_lambda_u, mse=args.mse, alpha=args.mixmatch_alpha, bs=args.mixmatch_bs)

else:
    if args.use_specific_status:
        args.res_save_path = "semi_logits_result_mixmatch_sfclr{sfclr}/{dataset}/{model_name}/{shot}shot-{way}way_{sche}_unlabel{unlabel}_use_specific_status_lambdau-{lambdau}_alpha-{alpha}_mse-{mse}_bs-{bs}".format(
            sfclr=args.sfc_lr, dataset=args.dataset, model_name=args.model_name, shot=args.shot, way=args.way, sche=args.sche, unlabel=args.unlabel, lambdau=args.mixmatch_lambda_u, mse=args.mse, alpha=args.mixmatch_alpha, bs=args.mixmatch_bs)
    else:
        args.res_save_path = "semi_logits_result_mixmatch_sfclr{sfclr}/{dataset}/{model_name}/{shot}shot-{way}way_{sche}_unlabel{unlabel}_lambdau-{lambdau}_alpha-{alpha}_mse-{mse}_bs-{bs}".format(
            sfclr=args.sfc_lr, dataset=args.dataset, model_name=args.model_name, shot=args.shot, way=args.way, sche=args.sche, unlabel=args.unlabel, lambdau=args.mixmatch_lambda_u, mse=args.mse, alpha=args.mixmatch_alpha, bs=args.mixmatch_bs)

if args.cgbn:
    args.res_save_path += "_cgbn"

if args.add_ema:
    args.res_save_path += "_ema-{}".format(args.ema_decay)

if args.inductive and args.unlabel==0:
    args.res_save_path += '_inductive'

if args.remove_mixup:
    args.res_save_path += '_remove-mixup'

if args.remove_argumentation:
    args.res_save_path += '_remove-argumentation'

if os.path.exists(args.res_save_path):
    pass
else:
    os.makedirs(args.res_save_path)

global_writer.global_count = 0
global_writer.writer = SummaryWriter(osp.join(args.res_save_path, 'tf'))

pprint(vars(args))

if os.path.exists(args.model_dir):
    print("使用模型路径:{}".format(args.model_dir))
else:
    raise ValueError("找不到模型参数文件:", args.model_dir)

set_seed(args.seed)
num_gpu = set_gpu(args)
Dataset = set_up_datasets(args)
# ----------------------------------数据部分----------------------------------
test_set = Dataset(args.set, args)
sampler = CategoriesSampler(
    test_set.label, args.test_episode, args.way, args.shot + args.query + args.unlabel)
loader = DataLoader(test_set, batch_sampler=sampler,
                    num_workers=0, pin_memory=True)
pad = int(args.image_size / 8)
if args.remove_argumentation:
    transform_train = transforms.Compose([
    ])
else:
    transform_train = transforms.Compose([
            transforms.Pad((pad,pad,pad,pad), padding_mode="reflect"),
            transforms.RandomCrop(args.image_size),
            transforms.RandomHorizontalFlip(),
        ])
transform_val = transforms.Compose([
    ])
unlabel_transform = TransformTwice(transform_train)
# ----------------------------------数据部分----------------------------------

# ----------------------------------模型部分----------------------------------
# model
model = DeepEMD(args)
model = load_model(model, args.model_dir)
model = nn.DataParallel(model, list(range(num_gpu)))
model = model.cuda()
model.eval()

# if args.add_ema:
#     ema_model = DeepEMD(args)
#     for param in ema_model.parameters():
#         param.detach_()
#     ema_optimizer= WeightEMA(model, ema_model, alpha=args.ema_decay)
#     ema_model = nn.DataParallel(ema_model, list(range(num_gpu)))

# 注意 Dataset 与 CategoriesSampler 在 semi setting 下接口需对应
# ----------------------------------模型部分----------------------------------
tqdm_gen = tqdm.tqdm(loader)

# label of query images
ave_acc = Averager()
test_acc_record = np.zeros((args.test_episode,))

# iterations = math.ceil(args.unlabel/args.ici_step) + \
#     2 if args.unlabel != 0 else math.ceil(15/args.ici_step) + 2
# max_semi_iter = 5
iter_num_per_epoch = max(args.shot * args.way // args.mixmatch_bs, 10)
max_semi_epoch = args.mse
val_period = 1
acc_list = [[] for _ in range(int(max_semi_epoch / val_period) + 1)]

# 需要梯度更新
# with torch.no_grad():
for batch_index, batch in enumerate(tqdm_gen, 1):
    # "_"取出label，但后续均未使用，每一个batch都重置了label
    data_origin, indicator = [_.cuda() for _ in batch]
    targets = torch.arange(args.way).repeat(args.shot+args.query+args.unlabel).long()[
        indicator[:args.way*(args.shot+args.query+args.unlabel)] != 0]
    k = args.way * args.shot
    data_origin = data_origin[indicator != 0].to(args.device)
    targets = targets.to(args.device)

    if args.show_detail:
        acc_list_temp = []

    model.module.mode = 'encoder'
    # 先不进行特征提取
    # data = model(data_origin)
    # shot: 5,3,84,84  query:75,3,84,84
    # 初始化 SFC 需前向传播一次
    data_support_origin, data_query_origin = data_origin[:k], data_origin[k:k+args.query*args.way]
    support_targets = targets[:k]
    query_targets = targets[k:k+args.query*args.way]
    if args.unlabel != 0:
        unlabel_inputs_origin = data_origin[k+args.query*args.way:]
    elif args.unlabel == 0 and not args.inductive:
        unlabel_inputs_origin = data_query_origin
    elif args.unlabel ==0 and args.inductive:
        unlabel_inputs_origin = data_support_origin

    # 使用 Dataloader 与 Dataset 组合时会调用 torch.randperm 可能使结果无法复现
    few_shot_labeled_dataset = SimpleDataset(data=data_support_origin, target=support_targets, transform=transform_train)
    few_shot_unlabeled_dataset = SimpleDataset(data=unlabel_inputs_origin, target=None, transform=unlabel_transform)
    labeled_dataloader = DataLoader(few_shot_labeled_dataset, shuffle=True, batch_size=args.mixmatch_bs, num_workers=0, drop_last=True)
    unlabeled_dataloader = DataLoader(few_shot_unlabeled_dataset, shuffle=True, batch_size=args.mixmatch_bs, num_workers=0, drop_last=True)

    # 初始化 SFC 时应该无需使用数据增强
    data_support_origin = transform_val(data_support_origin)
    data_query_origin = transform_val(data_query_origin)

    with torch.no_grad():
        # 由于前置 model.training 置为了 False, 此时可以不用进行相关 BN 层操作
        # model.apply(freeze_bn)
        data_support = model(data_support_origin)
        data_query = model(data_query_origin)
        # model.apply(activate_bn)
    
    # 初始化 SFC
    label_one_hot = label2onehot(support_targets, args.way).cuda()
    label_one_hot = label_one_hot.transpose(0, 1)
    support_shape = data_support.shape
    # init the proto
    SFC = torch.mm(label_one_hot, data_support.view(support_shape[0], -1))
    SFC = SFC.div(label_one_hot.sum(dim=1, keepdim=True)).expand_as(SFC)
    SFC = SFC.view(-1, *support_shape[1:])
    SFC = nn.Parameter(SFC.detach(), requires_grad=True)
    model.module.register_parameter("sfc", SFC)

    model.module.mode = 'meta'
    optimizer = torch.optim.SGD(
        [
            {'params': [SFC], "lr":args.sfc_lr, "dampening":0.9, "weight_decay": 0},
            {'params': model.module.encoder.parameters(), "lr": 0.001}
        ], momentum=0.9, nesterov=True, weight_decay=0.0005
    )

    criterion = SemiLoss(max_semi_epoch, lambda_u=args.mixmatch_lambda_u)
    # 初始化，未迭代之前
    if args.show_detail:
        with torch.no_grad():
            logits = get_logits(model, SFC, data_query, num_gpu)
            pred = torch.argmax(logits, dim=1)
            acc_list_temp.append((pred == query_targets).type(
                torch.FloatTensor).mean().item())

    for epoch in range(1, max_semi_epoch+1):
        if args.cgbn:
            model.train()
        train_mixmatch_one_epoch(epoch, iter_num_per_epoch, args.way, labeled_dataloader, unlabeled_dataloader,
                                    model, criterion, optimizer, args.mixmatch_alpha, args.temp, num_gpu, args.cgbn, args.remove_mixup)

        if args.show_detail and epoch % val_period == 0:
            with torch.no_grad():
                if args.cgbn:
                    model.eval()
                logits = get_logits_for_origin_data(model, data_query_origin, num_gpu)
                pred = torch.argmax(logits, dim=1)
                acc_list_temp.append((pred == query_targets).type(
                    torch.FloatTensor).mean().item())

    # 当每个 episode finetune extractor 时， episode 结束需返回至原始状态
    if args.mixmatch:
        model = load_model(model, args.model_dir, print_mode=False)

    for j in range(min(max_semi_epoch // val_period, len(acc_list_temp)-1)):
        acc_list[j].append(acc_list_temp[j] * 100)
    acc_list[-1].append(acc_list_temp[-1] * 100)
    acc = count_acc(logits, query_targets) * 100
    ave_acc.add(acc)
    test_acc_record[batch_index - 1] = acc
    m, pm = compute_confidence_interval(test_acc_record[:batch_index])
    tqdm_gen.set_description(
        'batch {}: This episode:{:.2f}  average: {:.4f}+{:.4f}'.format(batch_index, acc, m, pm))

mean_list = []
ci_list = []
for item in acc_list:
    mean, ci = compute_confidence_interval(item)
    mean_list.append(mean)
    ci_list.append(ci)

m, pm = compute_confidence_interval(test_acc_record)
result_list = ['test Acc {:.4f}'.format(ave_acc.item())]
result_list.append('Test Acc {:.4f} + {:.4f}'.format(m, pm))
result_list.append("Test Acc Mean{}".format(
    ' '.join([str(i*100)[:5] for i in mean_list])))
result_list.append("Test Acc ci{}".format(
    ' '.join([str(i)[:5] for i in ci_list])))
a = ""
for i in acc_list:
    a += "\n" + str(["{:.2f}".format(j) for j in i])
result_list.append('acc_list{}'.format(a))
print(result_list[0])
print(result_list[1])
print("Test Acc Mean{}".format(
    ' '.join([str(i*100)[:5] for i in mean_list])))
print("Test Acc ci{}".format(' '.join([str(i)[:5] for i in ci_list])))
# TODO
save_list_to_txt(os.path.join(args.res_save_path, 'results.txt'), result_list)
