import copy
from itertools import count
import os
import pickle

import click
import torch
from torch import optim
from torch.utils.data.dataloader import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import save_image
from tqdm import tqdm

import dnnlib
import legacy
from segmentation.dataset import MR_slice
from segmentation.utils import *
from segmentation.dataset import apply_trans
from scipy.optimize import curve_fit


@click.command()
# train
@click.option('--n_train',                  help='N train in a row', type=int, default=3, show_default=True)
@click.option('--mode',                     help='do what', type=click.Choice(['shots', 'SegNet', 'U-Net']), required=True)
@click.option('--save_dir',                 help='Network pickle save', required=True, metavar='DIR')
@click.option('--out_dir',                 help='result save', metavar='DIR')
@click.option('--data_dir',                 help='Training data save', metavar='DIR')
@click.option('--supr_dir',                 help='Training data save', metavar='DIR')
@click.option('--shots',                    help='how many shots training', type=int, default=1, show_default=True)
@click.option('--threshold',                help='shots saving threshold', type=int, default=1, show_default=True)
@click.option('--multi_class',              help='multi_class?', is_flag=True)
@click.option('--combine',                  help='combine 3,42 2,41?', is_flag=True)
@click.option('--test',                     help='do test?', is_flag=True)
@click.option('--aug',                      help='aug?', is_flag=True)
@click.option('--dropout',                      help='aug?', is_flag=True)
@click.option('--cutout',                      help='aug?', is_flag=True)
@click.option('--resize',                      help='aug?', is_flag=True)
@click.option('--self_attn',                      help='self_attn?', is_flag=True)
# getShots
@click.option('--supervised_pkl',           help='Supervised network pickle save', metavar='DIR')
# getSeg
@click.option('--iters',                   help='Number of iters', type=int, default=1000, show_default=True)
@click.option('--batch_size',               help='Batch size for training', type=int, default=10, show_default=True)
@click.option('--which_net',                help='network', type=click.Choice(['S', 'M', 'L', 'BiFPN']), default='S')
@click.option('--which_repre_layers',       help='Which representation layers for train, default=[4, 8, 16, 32, 64, 128, 256]', type=IntList())
@click.option('--split',                    help='Split of training, validation, test set', nargs=3, default=[7, 1, 2])
@click.option('--w_steps',                  help='how many w project step', type=int, default=1000, show_default=True)
@click.option('--length',                   help='how many slices each sample', type=int, default=64, show_default=True)
# 
@click.option('--n_iters_iou',                    help='how many shots training', type=int, default=100, show_default=True)
@click.option('--n_iters_fit',                help='how many shots training', type=int, default=1000, show_default=True)
@click.option('--n_iters_correct',                help='how many shots training', type=int, default=50, show_default=True)
@click.option('--correct_loss',                help='how many shots training', type=int, default=50, show_default=True)
@click.option('--r',                              help='threshold for f(t)', type=float, default=1, show_default=True)
@click.option('--ct',                              help='threshold for confidence', type=float, default=1, show_default=True)
@click.option('--lr_seg',                              help='threshold for confidence', type=float, default=0.0001, show_default=True)
@click.option('--correct_class'                )
# 
@click.option('--seed',                     help='Random seed', type=int, required=True)
# @click.option('--resolution',               help='Resolution of image', type=int, default=256, show_default=True)
# @click.option('--split',                    help='Split of training, validation, test set', nargs=3, default=[7, 1, 2])
def main(mode, **kwargs):
        get_UNet(**kwargs)

def visualize_(out):
    B = out.shape[0]
    H = out.shape[-1]
    count = 0
    visual = torch.zeros((B,3,H,H)).to(device=out.device)
    for k in colors.keys():
        c = torch.tensor(colors[k]).to(device=out.device).view(1,3,1,1)/255
        m = torch.where(out==k, 1, 0).unsqueeze(1).repeat(1,3,1,1)*c
        if m.sum()>0: count += 1
        visual = torch.where(m>0, m, visual)

    return visual, count


def apply_trans_batch(image, mask, chn):
    B = image.shape[0]
    image = (image+1)/2
    for i in range(B):
        image_, mask_ = apply_trans(torch.cat([image[i], mask[i].unsqueeze(0)], dim=0)).split([chn, 1], dim=0)
        image[i], mask[i] = image_, mask_.squeeze(0).to(torch.long)

    image = (image-0.5)/0.5

    return image, mask


def update_iou_stat(predict, gt, TP, P, T, num_classes = 17):
    """
    :param predict: the pred of each batch,  should be numpy array, after take the argmax   b,h,w
    :param gt: the gt label of the batch, should be numpy array     b,h,w
    :param TP: True positive
    :param P: positive prediction
    :param T: True seg
    :param num_classes: number of classes in the dataset
    :return: TP, P, T
    """
    cal = gt > 0

    mask = (predict == gt) * cal

    for i in range(num_classes):
        P[i] += np.sum((predict == i) * cal)
        T[i] += np.sum((gt == i) * cal)
        TP[i] += np.sum((gt == i) * mask)

    return TP, P, T


def compute_iou(TP, P, T, classes, out_chn=17):
    """
    :param TP:
    :param P:
    :param T:
    :param num_classes: number of classes in the dataset
    :return: IoU
    """
    IoU = [0]*out_chn
    for i in classes:
        # IoU.append(TP[i] / (T[i] + P[i] - TP[i] + 1e-10))
        IoU[i] = (TP[i] / (T[i] + P[i] - TP[i] + 1e-10))
    return IoU


# f(t) in paper
def curve_func(x, a, b, c):
    return a * (1 - np.exp(-1 / c * x ** b))


def fit(func, x, y):
    popt, pcov = curve_fit(func, x, y, p0=(1, 1, 1), method='trf', sigma=np.geomspace(1, .1, len(y)),
                           absolute_sigma=True, bounds=([0, 0, 0], [1, 1, np.inf]))
    return tuple(popt)


def derivation(x, a, b, c):
    x = x + 1e-6  # numerical robustness
    return a * b * 1 / c * np.exp(-1 / c * x ** b) * (x ** (b - 1))


def label_update_epoch(ydata_fit, threshold=0.9, eval_interval=100, num_iter_per_epoch=1000):
    # ydata_fit 一直记录的IOU值，需要满足曲线f(t)
    # x轴的值，如果num_iter_per_epoch//eval_interval==0，个数=len(ydata_fit)
    xdata_fit = np.linspace(0, len(ydata_fit) * eval_interval / num_iter_per_epoch, len(ydata_fit))
    # 拟合f(t)的参数值
    a, b, c = fit(curve_func, xdata_fit, ydata_fit)

    # early learning
    epoch = np.arange(1, 16)
    # y_hat = curve_func(epoch, a, b, c)

    # 前(16)个epoch曲线斜率相对一开始的变化
    relative_change = abs(abs(derivation(epoch, a, b, c)) - abs(derivation(1, a, b, c))) / abs(derivation(1, a, b, c))
    relative_change[relative_change > 1] = 0
    update_epoch = np.sum(relative_change <= threshold) + 1
    return update_epoch  # , a, b, c


def if_update(iou_value, current_epoch, threshold=0.90, eval_interval=100, num_iter_per_epoch=1000):
    update_epoch = label_update_epoch(iou_value, threshold=threshold, eval_interval=eval_interval, num_iter_per_epoch=num_iter_per_epoch)
    return current_epoch >= update_epoch  # , update_epoch


def get_UNet(
    n_train,
    save_dir,
    out_dir,
    data_dir,
    supr_dir,
    which_net,
    which_repre_layers,
    shots,
    threshold,
    iters, 
    batch_size,
    split,
    length,
    w_steps,
    combine,
    seed,
    multi_class,
    aug,
    dropout,
    cutout,
    resize,
    self_attn,
    n_iters_iou,
    n_iters_fit,
    n_iters_correct,
    r,
    ct,
    correct_loss,
    lr_seg,
    correct_class,
    **kwargs
):

    import warnings
    warnings.filterwarnings('ignore')

    # make dir =====================================================================
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
        print(f'make output dir: {out_dir}')
    id = len(os.listdir(out_dir))
    dataset_name = 'CANDI' if 'CANDI' in data_dir else 'OASIS'
    correct_class = [int(x) for x in correct_class.split(',')]
    n_correct_class = len(correct_class)
    assert 0 not in correct_class
    folder_dir = os.path.join(out_dir, f'exp{id}-cr-{dataset_name}-{correct_loss}-{n_iters_iou}-{lr_seg}-{n_correct_class}')

    for flag, flag_name in zip([dropout, cutout, aug, resize], ['dropout', 'cutout', 'aug', 'resize']):
        if flag:
            folder_dir += f'-{flag_name}'

    assert not os.path.exists(folder_dir)
    os.mkdir(folder_dir)
    os.mkdir(os.path.join(folder_dir, 'checkpoint'))
    os.mkdir(os.path.join(folder_dir, 'visualization'))
    os.mkdir(os.path.join(folder_dir, 'runs'))
    print('make dir done!')

    with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
        log.write(f'n_iters_iou: {n_iters_iou}\n')
        log.write(f'n_iters_fit: {n_iters_fit}\n')
        log.write(f'n_iters_correct: {n_iters_correct}\n')
        log.write(f'r: {r}\n')
        log.write(f'ct: {ct}\n')
        log.write(f'lr_seg: {lr_seg}\n')
        log.write('=================================\n')

    # init generator ===============================================================
    with dnnlib.util.open_url(os.path.join(save_dir, 'network-snapshot-best.pkl')) as f:
        snapshot_data = legacy.load_network_pkl(f)
        G = snapshot_data['G_ema'].eval().requires_grad_(False).cuda()
        del snapshot_data
    get_representation = init_get_representation(G, which_repre_layers, 256, 'random')
    print('load generator done!')

    # init Seg ===============================================================
    pkl_name = f'Seg_{which_net}'
    if dropout:
        pkl_name += '_drop'
    if cutout:
        pkl_name += '_cut'
    pkl_name += f'_{shots}s_cb{combine}_{which_repre_layers}.pkl'

    # init Seg ===============================================================
    with open(supr_dir, 'rb') as f:
        Supr_net = pickle.load(f)['net'].eval().requires_grad_(False).cuda()
    print('load Supr_net done!')

    # init dataaset ===============================================================
    # split 2:1 first, then choose n shot for training, ensure same test set with U-Net training
    split_length = len(os.listdir(os.path.join(data_dir, 'images')))//128
    split = [int(s) for s in split]
    split = [split_length*s//sum(split) for s in split]
    split[1] = split_length - split[0] - split[2]
    print(f'dataset split: {split}')

    out_ch = 17
    sum_dice = 0
    sum_dice_class = [0]*(out_ch-1)
    correct_counter = {
        1:2,
        2:1
    }

    import random
    for i_train in range(n_train):
        torch.manual_seed(i_train)
        random.seed(28*i_train)
        samples = list(range(split_length))
        random.shuffle(samples)

        with open(os.path.join(save_dir, f'handmark_{threshold}', pkl_name), 'rb') as f:
            Seg = pickle.load(f)['net'].eval().requires_grad_(False).cuda()
        print('load Seg done!')

        val_loader = DataLoader(
            MR_slice(
                data_dir=data_dir,
                aug=False,
                sample=samples[split[0]:split[0]+split[1]],
            ),
            batch_size=4, drop_last=False, shuffle=False, num_workers=4, pin_memory=True
        )

        test_loader = DataLoader(
            MR_slice(
                data_dir=data_dir,
                aug=False,
                sample=samples[split[0]+split[1]:split_length],
            ),
            batch_size=4, drop_last=False, shuffle=False, num_workers=4, pin_memory=True
        )

        print(f'val: {samples[split[0]:split[0]+split[1]]}\ntest: {samples[split[0]+split[1]:split_length]}')

        chn = 1
        print(f'image channel: {chn}')

        torch.manual_seed(seed)
        criterion = nn.CrossEntropyLoss() if multi_class else nn.BCEWithLogitsLoss()

        net = get_network('U-Net', chn, out_ch).cuda()
        # net = UNet_attn(chn, out_ch).cuda()
        resize_repre = which_net != 'BiFPN'
        print('load U-Net done!')

        optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999))
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=3)

        optimizer_seg = optim.Adam(Seg.parameters(), lr=lr_seg, betas=(0.9, 0.999), weight_decay=0.001)

        best_dice = -1
        best_dice_epoch = -1

        writer = SummaryWriter(os.path.join(folder_dir, 'runs', f'{i_train}_train'))

        Updated_class_list = set([0])
        # 记录各类每n个iter的IoU_npl，组成f(t)
        IoU_npl_dict = {}
        for i in range(out_ch):
            IoU_npl_dict[i] = []

        # 输出-SegLabel
        TP_npl = [0] * out_ch
        P_npl  = [0] * out_ch
        T_npl  = [0] * out_ch

        with tqdm(range(iters), initial=0, dynamic_ncols=True, smoothing=0.01, ascii=True) as pbar:
            for iter in range(iters):
                net.train()

                optimizer.zero_grad()
                # prepare image mask
                with torch.no_grad():
                    z = torch.randn(batch_size, G.z_dim).cuda()
                    w = G.mapping(z, None)  # [N, L, C]
                    image, representation = get_representation(w, resize_repre)
                    image = torch.clamp(image, -1, 1)
                    mask = Seg(representation)

                    del representation

                # # norm for ds
                # for s in range(image.shape[0]):
                #     image[s,0] = (image[s,0] - image[s,0].min())/(image[s,0].max()-image[s,0].min()+1e-7)*2-1

                if resize:
                    image = F.interpolate(image, (160,160), mode='bilinear')
                    mask = F.interpolate(mask, (160,160), mode='nearest')

                if multi_class:
                    mask = mask.argmax(1)

                if aug:
                    image, mask = apply_trans_batch(image, mask, chn)

                pred = net(image)

                loss = criterion(pred, mask)
                loss.backward()
                optimizer.step()

                if iter%100==0:
                    if chn==1:
                        save_image(torch.cat([(image.repeat(1,3,1,1)+1)/2, visualize(mask), visualize(pred.argmax(1))]), f'{folder_dir}/show.png', nrow=batch_size)
                    elif chn==3:
                        save_image(torch.cat([(image+1)/2, visualize(mask), visualize(pred.argmax(1))]), f'{folder_dir}/show.png', nrow=batch_size)

                # Accumulate IOU ===================================================
                # 累积iter的TP P T值，再一起算iou
                if len(Updated_class_list) < n_correct_class:
                    TP_npl, P_npl, T_npl = update_iou_stat(torch.argmax(pred, dim=1).detach().cpu().numpy(), mask.detach().cpu().numpy(), TP_npl, P_npl, T_npl, correct_class)
                # gt_supr = Supr_net(image)

                # IOU ===================================================
                if iter % n_iters_iou == 0:
                    # 各类累积值计算得到的iou
                    IoU_npl = compute_iou(TP_npl, P_npl, T_npl, correct_class)
                    # 清空累计值
                    TP_npl = [0] * out_ch
                    P_npl =  [0] * out_ch
                    T_npl =  [0] * out_ch

                    for i in correct_class:
                        # 记录iou值 用于画曲线
                        IoU_npl_dict[i].append(IoU_npl[i])
                        writer.add_scalar(f'class{i}', IoU_npl[i], global_step=iter)

                # Correct ===================================================
                if iter % n_iters_fit == 0 and len(Updated_class_list) < n_correct_class:
                    # 没更新过且准备更新的类
                    IoU_npl_indx = set()
                    for class_idx in correct_class:
                        # 每个类只更新一次
                        if not class_idx in Updated_class_list:
                            update_sign = if_update(np.array(IoU_npl_dict[class_idx]), iter//n_iters_fit, r, n_iters_iou, n_iters_fit)
                            if update_sign:
                                IoU_npl_indx.add(class_idx)

                    # if only the background class is selected, do not update or eval
                    if len(IoU_npl_indx) > 0:
                        for i_c, class_idx in enumerate(IoU_npl_indx):
                            print('Correct', class_idx, iter//n_iters_fit)
                            # 修正
                            # 用n个iter更新Seg
                            Seg = Seg.train().requires_grad_(True)
                            net.eval()
                            # for iter_seg in tqdm(range(n_iters_correct), total=n_iters_correct, desc='Correction', unit='batch', leave=False):
                            pbar_seg = tqdm(range(n_iters_correct), initial=0, dynamic_ncols=True, smoothing=0.01, ascii=True, total=n_iters_correct, desc='Correction', unit='batch', leave=False)
                            for iter_seg in pbar_seg:
                                optimizer_seg.zero_grad()

                                # prepare image mask
                                with torch.no_grad():
                                    z = torch.randn(batch_size, G.z_dim).cuda()
                                    w = G.mapping(z, None)  # [N, L, C]
                                    image, representation = get_representation(w, resize_repre)
                                    image = torch.clamp(image, -1, 1)
                                mask = Seg(representation)
                                del representation

                                with torch.no_grad():
                                    pred = net(image)
                                    pred_prob = torch.softmax(pred, dim=1)
                                    # 通道方向上置信度超过阈值才算
                                    confident = (pred_prob[:, 1:].max(dim=1).values > ct) | (pred_prob[:, 0] > ct)

                                    pred_argmax = pred.argmax(1)
                                    mask_argmax = mask.argmax(1)

                                    # 只能够对背景和目标类进行转化
                                    # 外加临界类 垃圾
                                    # 应该根据每个类指定counter区域 垃圾
                                    belong_pred = (pred_argmax==class_idx) | (pred_argmax==0)
                                    belong_mask = (mask_argmax==class_idx) | (mask_argmax==0)
                                    # B, H, W of T/F
                                    # 只有高置信度的背景和目标类区域能够修改
                                    target = confident & belong_pred & belong_mask
                                    # 怎么更新而不影响其他的类！
                                    # 现在只针对某一个类更新 挤压了其他类的空间 反而降低了效果
                                    # 用target覆盖Seg的生成标注形成新标注去训练，保持其他类

                                if correct_loss == 1:
                                    new_mask = mask_argmax
                                    new_mask = torch.where(target, class_idx, new_mask)
                                elif correct_loss == 2:
                                    mask = mask*target.unsqueeze(1)
                                    new_mask = target*class_idx
                                loss_correct = criterion(mask, new_mask)
                                
                                loss_correct.backward()
                                optimizer_seg.step()

                                # log ======================================================================
                                pbar_seg.set_description(' '.join([
                                    f'[class: {class_idx} | {i_c}/{len(IoU_npl_indx)}]',
                                    f'[{iter_seg}/{n_iters_correct}]',
                                    f'loss: {loss_correct.item():.3f}',
                                ]))

                            save_image(torch.cat([(image.repeat(1,3,1,1)+1)/2, visualize(mask_argmax), visualize(new_mask), (mask_argmax!=pred_argmax).int().unsqueeze(1).repeat(1,3,1,1)]), f'{folder_dir}/show_c{class_idx}.png', nrow=batch_size)
                            save_image(torch.cat([(image.repeat(1,3,1,1)+1)/2, visualize(belong_mask*class_idx), visualize(belong_pred*class_idx)]), f'{folder_dir}/show_cc{class_idx}.png', nrow=batch_size)
                            Updated_class_list.add(class_idx)
                            Seg = Seg.eval().requires_grad_(False)
                            net.train()

                # evaluation every iter ===================================================
                if iter%100==0:
                    with torch.no_grad():
                        dice, dice_std, dice_class = eval_dice_3d(net, val_loader, False, '', None, multi_class, True)

                    scheduler.step(dice)

                    if dice > best_dice:
                        best_dice = dice
                        best_dice_epoch = iter
                        with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_best.pth'), 'wb') as f:
                            pickle.dump({'net': copy.deepcopy(net).eval().requires_grad_(False).cpu()}, f)

                    writer.add_scalar('epoch_loss', loss, global_step=iter)
                    writer.add_scalar('lr', optimizer.param_groups[0]['lr'], global_step=iter)
                    writer.add_scalar('mdice', dice, global_step=iter)

                # log ======================================================================
                pbar.set_description(' '.join([
                    f'[{i_train}/{n_train}]',
                    f'[{iter}/{iters}]',
                    f'loss: {loss.item():.3f}',
                    f'mdice: {dice:.3f}',
                    f'iou: {sum(IoU_npl)/n_correct_class:.3f}',
                    f'best_dice: {best_dice:.3f}',
                    f'b_dice_e: {best_dice_epoch:.3f}'
                ]))
                pbar.update(1)
                # visualization ============================================================
                # save_image(val_visualization, os.path.join(folder_dir, 'visualization', f'{i_train}_{iter}.png'), nrow=2*batch_size)

        # 保存改过的Seg
        with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_seg.pth'), 'wb') as f:
            pickle.dump({'net': copy.deepcopy(Seg).eval().requires_grad_(False).cpu()}, f)

        # test part of i train
        with torch.no_grad():
            with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_best.pth'), 'rb') as f:
                net = pickle.load(f)['net'].eval().requires_grad_(False).cuda()

            dice, dice_std, dice_class = eval_dice_3d(net, test_loader, True, folder_dir, i_train, multi_class, True)

            for d in dice_class:
                print(f'{d:.3f}', end=', ')
            print(f'\ndice: {dice:.3f} std: {dice_std:.3f}\n')

        with open(os.path.join(folder_dir, 'INFO.txt'), 'a') as log:
            log.write(f'train [{i_train}] Best iou of validation: [{best_dice:.5f}] in iter: [{best_dice_epoch}]'+'\n')
            log.write(f'mdice: [{dice:.5f}]\n')

        with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
            # mdice
            for d in dice_class:
                log.write(f'{d:.3f}, ')
            log.write('\n')
            log.write(f'{dice:.3f}\n')

            sum_dice += dice
            sum_dice_class = [x+y for x,y in zip(sum_dice_class, dice_class)]

            log.write('=================================\n')

        with torch.no_grad():
            z = torch.randn(16, G.z_dim).cuda()
            w = G.mapping(z, None)  # [N, L, C]
            image, representation = get_representation(w, resize_repre)
            image = torch.clamp(image, -1, 1)
            # 改进的mask
            mask = Seg(copy.deepcopy(representation))
            with open(os.path.join(save_dir, f'handmark_{threshold}', pkl_name), 'rb') as f:
                Seg = pickle.load(f)['net'].eval().requires_grad_(False).cuda()
            # 改进前的mask
            mask_og = Seg(representation)
            del representation
            mask_supr = Supr_net(image)
            save_image(torch.cat([(image.repeat(1,3,1,1)+1)/2, visualize(mask_og.argmax(1)), visualize(mask.argmax(1)), visualize(mask_supr.argmax(1))]), f'{folder_dir}/{i_train}.png', nrow=16)

    with open(os.path.join(folder_dir, 'INFO.txt'), 'a') as log:
        log.write(f'Mean test miou: [{sum_dice/n_train}]\n')

    with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
        log.write('Mean:\n')
        sum_dice_class = [x/n_train for x in sum_dice_class]

        # mdice
        for d in sum_dice_class:
            log.write(f'{d:.3f}, ')
        log.write('\n')
        log.write(f'{sum_dice/n_train:.3f}\n')

if __name__ == "__main__":
    main()
#----------------------------------------------------------------------------
