import copy
from itertools import count
import os
import pickle

import click
import torch
from torch import optim
from torch.utils.data.dataloader import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import save_image
from tqdm import tqdm

import dnnlib
import legacy
from segmentation.dataset import Handmask, Triplet, MR_slice
from segmentation.utils import *
from segmentation.dataset import apply_trans
from segmentation.network_ds import UNet_ds, Unet2D
from torchinterp1d import interp1d
from bezier_curve import bezier_curve
import random


@click.command()
# train
@click.option('--n_train',                  help='N train in a row', type=int, default=3, show_default=True)
@click.option('--mode',                     help='do what', type=click.Choice(['shots', 'SegNet', 'U-Net']), required=True)
@click.option('--save_dir',                 help='Network pickle save', required=True, metavar='DIR')
@click.option('--out_dir',                 help='result save', metavar='DIR')
@click.option('--train_dir',                 help='Training data save', metavar='DIR')
@click.option('--test_dir',                 help='Training data save', metavar='DIR')
@click.option('--shots',                    help='how many shots training', type=int, default=1, show_default=True)
@click.option('--threshold',                help='shots saving threshold', type=int, default=1, show_default=True)
@click.option('--multi_class',              help='multi_class?', is_flag=True)
@click.option('--combine',                  help='combine 3,42 2,41?', is_flag=True)
@click.option('--test',                     help='do test?', is_flag=True)
@click.option('--aug',                      help='aug?', is_flag=True)
@click.option('--dropout',                      help='aug?', is_flag=True)
@click.option('--cutout',                      help='aug?', is_flag=True)
@click.option('--resize',                      help='aug?', is_flag=True)
@click.option('--myds',                      help='self_attn?', is_flag=True)
# getShots
@click.option('--supervised_pkl',           help='Supervised network pickle save', metavar='DIR')
# getSeg
@click.option('--epochs',                   help='Number of epochs', type=int, default=1000, show_default=True)
@click.option('--batch_size',               help='Batch size for training', type=int, default=10, show_default=True)
@click.option('--which_net',                help='network', type=click.Choice(['S', 'M', 'L', 'BiFPN']), default='S')
@click.option('--which_repre_layers',       help='Which representation layers for train, default=[4, 8, 16, 32, 64, 128, 256]', type=IntList())
@click.option('--split',                    help='Split of training, validation, test set', nargs=3, default=[7, 1, 2])
@click.option('--w_steps',                  help='how many w project step', type=int, default=1000, show_default=True)
@click.option('--length',                   help='how many slices each sample', type=int, default=64, show_default=True)
# 
@click.option('--seed',                     help='Random seed', type=int, required=True)
# @click.option('--resolution',               help='Resolution of image', type=int, default=256, show_default=True)
# @click.option('--split',                    help='Split of training, validation, test set', nargs=3, default=[7, 1, 2])
def main(mode, **kwargs):
    assert mode=='U-Net'
    get_UNet(**kwargs)


def visualize_(out):
    B = out.shape[0]
    H = out.shape[-1]
    count = 0
    visual = torch.zeros((B,3,H,H)).to(device=out.device)
    for k in colors.keys():
        c = torch.tensor(colors[k]).to(device=out.device).view(1,3,1,1)/255
        m = torch.where(out==k, 1, 0).unsqueeze(1).repeat(1,3,1,1)*c
        if m.sum()>0: count += 1
        visual = torch.where(m>0, m, visual)

    return visual, count


def apply_trans_batch(image, mask, chn):
    B = image.shape[0]
    image = (image+1)/2
    for i in range(B):
        image_, mask_ = apply_trans(torch.cat([image[i], mask[i].unsqueeze(0)], dim=0)).split([chn, 1], dim=0)
        image[i], mask[i] = image_, mask_.squeeze(0).to(torch.long)

    image = (image-0.5)/0.5

    return image, mask


def my_nonlinear_transformation(slices, domain):
    assert slices.max() <= 1
    assert slices.min() >= -1
    """
    slices, nonlinear_slices_2, nonlinear_slices_4 are source-similar images
    nonlinear_slices_1, nonlinear_slices_3, nonlinear_slices_5 are source-dissimilar images
    """
    n = random.randint(0, 2)

    # eps = 0.0001
    # eps = 0.115
    if domain == 0:
        if n==0:
            return slices
        elif n==1:
            points_2 = [[-1, -1], [-0.5, 0.5], [0.5, -0.5], [1, 1]]
            xvals_2, yvals_2 = bezier_curve(points_2, nTimes=100000)
            xvals_2 = np.sort(xvals_2)
            yvals_2 = np.sort(yvals_2)
            xvals_2, yvals_2 = torch.tensor([xvals_2, yvals_2], device=slices.device)
            nonlinear_slices_2 = interp1d(xvals_2, yvals_2, slices)
            return nonlinear_slices_2
        elif n==2:
            points_4 = [[-1, -1], [-0.75, 0.75], [0.75, -0.75], [1, 1]]
            xvals_4, yvals_4 = bezier_curve(points_4, nTimes=100000)
            xvals_4 = np.sort(xvals_4)
            yvals_4 = np.sort(yvals_4)
            xvals_4, yvals_4 = torch.tensor([xvals_4, yvals_4], device=slices.device)
            nonlinear_slices_4 = interp1d(xvals_4, yvals_4, slices)
            return nonlinear_slices_4

    elif domain == 1:
        if n==0:
            points_1 = [[-1, -1], [-1, -1], [1, 1], [1, 1]]
            xvals_1, yvals_1 = bezier_curve(points_1, nTimes=100000)
            xvals_1 = np.sort(xvals_1)
            xvals_1, yvals_1 = torch.tensor([xvals_1, yvals_1], device=slices.device)
            nonlinear_slices_1 = interp1d(xvals_1, yvals_1, slices)
            nonlinear_slices_1[nonlinear_slices_1 == 1] = -1
            # nonlinear_slices_1[nonlinear_slices_1 > 1-eps] = -1
            return nonlinear_slices_1

        elif n==1:
            points_3 = [[-1, -1], [-0.5, 0.5], [0.5, -0.5], [1, 1]]
            xvals_3, yvals_3 = bezier_curve(points_3, nTimes=100000)
            xvals_3 = np.sort(xvals_3)
            xvals_3, yvals_3 = torch.tensor([xvals_3, yvals_3], device=slices.device)
            nonlinear_slices_3 = interp1d(xvals_3, yvals_3, slices)
            nonlinear_slices_3[nonlinear_slices_3 == 1] = -1
            # nonlinear_slices_3[nonlinear_slices_3 > 1-eps] = -1
            return nonlinear_slices_3

        elif n==2:
            points_5 = [[-1, -1], [-0.75, 0.75], [0.75, -0.75], [1, 1]]
            xvals_5, yvals_5 = bezier_curve(points_5, nTimes=100000)
            xvals_5 = np.sort(xvals_5)
            xvals_5, yvals_5 = torch.tensor([xvals_5, yvals_5], device=slices.device)
            nonlinear_slices_5 = interp1d(xvals_5, yvals_5, slices)
            nonlinear_slices_5[nonlinear_slices_5 == 1] = -1
            # nonlinear_slices_5[nonlinear_slices_5 > 1-eps] = -1

            return nonlinear_slices_5


def get_UNet(
    n_train,
    save_dir,
    out_dir,
    train_dir,
    test_dir,
    which_net,
    which_repre_layers,
    shots,
    threshold,
    epochs, 
    batch_size,
    split,
    length,
    w_steps,
    combine,
    seed,
    multi_class,
    aug,
    dropout,
    cutout,
    resize,
    myds,
    **kwargs
):

    import warnings
    warnings.filterwarnings('ignore')

    condition = 0 if 'OASIS' in test_dir else 1
    # eval = eval_dice_3d_myds if myds else eval_dice_3d_ds
    eval = eval_dice_3d

    # make dir =====================================================================
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
        print(f'make output dir: {out_dir}')
    id = len(os.listdir(out_dir))
    folder_dir = os.path.join(out_dir, f'exp{id}-{shots}shot-{threshold}thres-{which_net}')

    for flag, flag_name in zip([dropout, cutout, aug, resize], ['dropout', 'cutout', 'aug', 'resize']):
        if flag:
            folder_dir += f'-{flag_name}'

    assert not os.path.exists(folder_dir)
    os.mkdir(folder_dir)
    os.mkdir(os.path.join(folder_dir, 'checkpoint'))
    os.mkdir(os.path.join(folder_dir, 'visualization'))
    os.mkdir(os.path.join(folder_dir, 'runs'))
    print('make dir done!')

    with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
        log.write(f'train: {train_dir}\n')
        log.write(f'test: {test_dir}\n')

    # init generator ===============================================================
    with dnnlib.util.open_url(os.path.join(save_dir, 'network-snapshot-best.pkl')) as f:
        snapshot_data = legacy.load_network_pkl(f)
        G = snapshot_data['G_ema'].eval().requires_grad_(False).cuda()
        del snapshot_data
    get_representation = init_get_representation(G, which_repre_layers, 256, 'random')
    print('load generator done!')

    # init Seg ===============================================================
    pkl_name = f'Seg_{which_net}'
    if dropout:
        pkl_name += '_drop'
    if cutout:
        pkl_name += '_cut'
    pkl_name += f'_{shots}s_cb{combine}_{which_repre_layers}.pkl'

    with open(os.path.join(save_dir, f'handmark_{threshold}', pkl_name), 'rb') as f:
        Seg = pickle.load(f)['net'].eval().requires_grad_(False).cuda()
    print('load Seg done!')

    # init dataaset ===============================================================
    # split 2:1 first, then choose n shot for training, ensure same test set with U-Net training
    split_length = len(os.listdir(os.path.join(test_dir, 'images')))//128
    # split = [int(s) for s in split]
    # split = [split_length*s//sum(split) for s in split]
    # split[1] = split_length - split[0] - split[2]
    # print(f'dataset split: {split}')

    out_ch = 17
    sum_dice = 0
    sum_dice_class = [0]*(out_ch-1)

    # resize_repre = which_net != 'BiFPN'
    # for d in range(10):
    #     # prepare image mask
    #     with torch.no_grad():
    #         z = torch.randn(15, G.z_dim).cuda()
    #         w = G.mapping(z, None)  # [N, L, C]
    #         image, representation = get_representation(w, resize_repre)
    #         image = torch.clamp(image, -0.95, 1)
    #         # mask = Seg(representation)
    #         for s in range(image.shape[0]):
    #             # image[s,0] = image[s,0] - image[s,0].min() - 1
    #             image[s,0] = (image[s,0] - image[s,0].min())/(image[s,0].max()-image[s,0].min())*2-1
    #             image[s,0] = my_nonlinear_transformation(image[s,0], 1)
    #         image = F.interpolate(image, (160,160), mode='bilinear')
    #         save_image((image+1)/2, f'show_{d}.png', nrow=5)
    # exit()

    import random
    for i_train in range(n_train):
        torch.manual_seed(i_train)
        random.seed(28*i_train)
        samples = list(range(split_length))
        # random.shuffle(samples)

        test_loader = DataLoader(
            MR_slice(
                data_dir=test_dir,
                aug=False,
                sample=samples
            ),
            batch_size=batch_size, drop_last=False, shuffle=False, num_workers=4, pin_memory=True
        )
        print(f'test len: {test_loader.__len__()}')
        print(f'test on {test_dir}')

        chn = 1
        print(f'image channel: {chn}')

        torch.manual_seed(seed)
        criterion = nn.CrossEntropyLoss()

        net = UNet(chn, out_ch).cuda()
        # net = UNet_ds(chn, out_ch).cuda()
        # net = Unet2D(c=chn, num_classes=out_ch, norm='dsbn', num_domains=3).cuda()
        resize_repre = which_net != 'BiFPN'
        print('load U-Net done!')

        optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999))
        # scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=3)
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=4120, gamma=0.5)

        best_dice = -1
        best_dice_epoch = -1
        losses = [0, 0]

        writer = SummaryWriter(os.path.join(folder_dir, 'runs', f'{i_train}_train'))

        with tqdm(range(epochs), initial=0, dynamic_ncols=True, smoothing=0.01, ascii=True) as pbar:
            for epoch in range(epochs):
                net.train()

                # for domain in range(2):
                optimizer.zero_grad()
                # prepare image mask
                with torch.no_grad():
                    z = torch.randn(batch_size, G.z_dim).cuda()
                    w = G.mapping(z, None)  # [N, L, C]
                    image, representation = get_representation(w, resize_repre)
                    # 去掉大于0的背景
                    image = torch.clamp(image, -0.95, 1)
                    mask = Seg(representation)

                    del representation
                
                # nolinear transformation
                for s in range(image.shape[0]):
                    if image[s,0].max()==image[s,0].min():
                        image[s,0] = torch.fill_(image[s,0], -1)
                    else:
                        image[s,0] = (image[s,0] - image[s,0].min())/(image[s,0].max()-image[s,0].min())*2-1
                    # image[s,0] = my_nonlinear_transformation(image[s,0], domain)

                if resize:
                    image = F.interpolate(image, (160,160), mode='bilinear')
                    mask = F.interpolate(mask, (160,160), mode='nearest')

                if multi_class:
                    mask = mask.argmax(1)

                if aug:
                    image, mask = apply_trans_batch(image, mask, chn)


                pred = net(image)
                # pred = net(image, domain=domain)

                loss = criterion(pred, mask)
                loss.backward()
                optimizer.step()

                losses[0] = loss.item()
                if epoch%3000==0:
                    save_image(torch.cat([(image.repeat(1,3,1,1)+1)/2, visualize(mask), visualize(pred.argmax(1))]), f'{folder_dir}/show.png', nrow=batch_size)
                scheduler.step(1)

                # # evaluation every epoch ===================================================
                # if epoch%250==0:
                #     if chn==1:
                #         save_image(torch.cat([(image.repeat(1,3,1,1)+1)/2, visualize(mask), visualize(pred.argmax(1))]), f'{folder_dir}/show.png', nrow=batch_size)
                #     elif chn==3:
                #         save_image(torch.cat([(image+1)/2, visualize(mask), visualize(pred.argmax(1))]), f'{folder_dir}/show.png', nrow=batch_size)

                #     with torch.no_grad():
                #         dice, dice_std, dice_class = eval(net, val_loader, False, folder_dir, None, multi_class, True, condition)

                #     scheduler.step(dice)

                #     if dice > best_dice:
                #         best_dice = dice
                #         best_dice_epoch = epoch
                #         with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_best.pth'), 'wb') as f:
                #             pickle.dump({'net': copy.deepcopy(net).eval().requires_grad_(False).cpu()}, f)

                #     writer.add_scalar('loss0', losses[0], global_step=epoch)
                #     writer.add_scalar('loss1', losses[1], global_step=epoch)
                #     writer.add_scalar('lr', optimizer.param_groups[0]['lr'], global_step=epoch)
                #     writer.add_scalar('mdice', dice, global_step=epoch)

                # log ======================================================================
                pbar.set_description(' '.join([
                    f'[{i_train}/{n_train}]',
                    f'[{epoch}/{epochs}]',
                    f'loss0: {losses[0]:.3f}',
                    f'loss1: {losses[1]:.3f}',
                    # f'mdice: {dice:.3f}',
                    # f'best_dice: {best_dice:.3f}',
                    # f'b_dice_e: {best_dice_epoch:.3f}'
                ]))
                pbar.update(1)
                # visualization ============================================================
                # save_image(val_visualization, os.path.join(folder_dir, 'visualization', f'{i_train}_{epoch}.png'), nrow=2*batch_size)

        with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_best.pth'), 'wb') as f:
            pickle.dump({'net': copy.deepcopy(net).eval().requires_grad_(False).cpu()}, f)
        # # test part of i train
        # with torch.no_grad():
        #     with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_best.pth'), 'rb') as f:
        #         net = pickle.load(f)['net'].eval().requires_grad_(False).cuda()

            dice, dice_std, dice_class = eval(net, test_loader, True, folder_dir, i_train, multi_class, True, condition)

            for d in dice_class:
                print(f'{d:.3f}', end=', ')
            print(f'\ndice: {dice:.3f} std: {dice_std:.3f}\n')

        with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
            # mdice
            for d in dice_class:
                log.write(f'{d:.3f}, ')
            log.write('\n')
            log.write(f'{dice:.3f}\n')

            sum_dice += dice
            sum_dice_class = [x+y for x,y in zip(sum_dice_class, dice_class)]

            log.write('=================================\n')

    with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
        log.write('Mean:\n')
        sum_dice_class = [x/n_train for x in sum_dice_class]

        # mdice
        for d in sum_dice_class:
            log.write(f'{d:.3f}, ')
        log.write('\n')
        log.write(f'{sum_dice/n_train:.3f}\n')

if __name__ == "__main__":
    main()
#----------------------------------------------------------------------------
