import copy
from itertools import count
import os
import pickle

import click
import torch
from torch import optim
from torch.utils.data.dataloader import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import save_image
from tqdm import tqdm

import dnnlib
import legacy
from segmentation.dataset import Handmask_2d, US
from segmentation.utils import *
from segmentation.dataset import apply_trans

@click.command()
# train
@click.option('--n_train',                  help='N train in a row', type=int, default=3, show_default=True)
@click.option('--mode',                     help='do what', type=click.Choice(['shots', 'SegNet', 'U-Net']), required=True)
@click.option('--save_dir',                 help='Network pickle save', required=True, metavar='DIR')
@click.option('--out_dir',                 help='result save', metavar='DIR')
@click.option('--data_dir',                 help='Training data save', metavar='DIR')
@click.option('--shots',                    help='how many shots training', type=int, default=1, show_default=True)
@click.option('--threshold',                help='shots saving threshold', type=int, default=1, show_default=True)
@click.option('--multi_class',              help='multi_class?', is_flag=True)
@click.option('--combine',                  help='combine 3,42 2,41?', is_flag=True)
@click.option('--test',                     help='do test?', is_flag=True)
@click.option('--aug',                      help='aug?', is_flag=True)
@click.option('--dropout',                      help='aug?', is_flag=True)
@click.option('--cutout',                      help='aug?', is_flag=True)
@click.option('--resize',                      help='aug?', is_flag=True)
@click.option('--self_attn',                      help='self_attn?', is_flag=True)
# getShots
@click.option('--supervised_pkl',           help='Supervised network pickle save', metavar='DIR')
# getSeg
@click.option('--epochs',                   help='Number of epochs', type=int, default=1000, show_default=True)
@click.option('--batch_size',               help='Batch size for training', type=int, default=10, show_default=True)
@click.option('--which_net',                help='network', type=click.Choice(['S', 'M', 'L', 'BiFPN']), default='S')
@click.option('--which_repre_layers',       help='Which representation layers for train, default=[4, 8, 16, 32, 64, 128, 256]', type=IntList())
@click.option('--split',                    help='Split of training, validation, test set', nargs=3, default=[7, 1, 2])
@click.option('--w_steps',                  help='how many w project step', type=int, default=1000, show_default=True)
@click.option('--length',                   help='how many slices each sample', type=int, default=64, show_default=True)
# 
@click.option('--seed',                     help='Random seed', type=int, required=True)
# @click.option('--resolution',               help='Resolution of image', type=int, default=256, show_default=True)
# @click.option('--split',                    help='Split of training, validation, test set', nargs=3, default=[7, 1, 2])
def main(mode, **kwargs):
    if mode=='shots':
        get_shots(**kwargs)
    elif mode=='SegNet':
        get_SegNet(**kwargs)
    elif mode=='U-Net':
        get_UNet(**kwargs)

def visualize_(out):
    B = out.shape[0]
    H = out.shape[-1]
    count = 0
    visual = torch.zeros((B,3,H,H)).to(device=out.device)
    for k in colors.keys():
        c = torch.tensor(colors[k]).to(device=out.device).view(1,3,1,1)/255
        m = torch.where(out==k, 1, 0).unsqueeze(1).repeat(1,3,1,1)*c
        if m.sum()>0: count += 1
        visual = torch.where(m>0, m, visual)

    return visual, count

def get_shots(
    save_dir,
    shots,
    seed,
    supervised_pkl,
    multi_class,
    threshold,
    **kwargs
):
    assert not os.path.exists(os.path.join(save_dir, f'handmark_{threshold}'))

    for folder in [f'images', f'masks', f'ws']:
        os.makedirs(os.path.join(save_dir, f'handmark_{threshold}', folder))

    # init generator ===============================================================
    with dnnlib.util.open_url(os.path.join(save_dir, 'network-snapshot-best.pkl')) as f:
        snapshot_data = legacy.load_network_pkl(f)
        G = snapshot_data['G_ema'].eval().requires_grad_(False).cuda()
        del snapshot_data
    print('G loaded')

    # init unet ===============================================================
    with open(supervised_pkl, 'rb') as f:
        net = pickle.load(f)['net'].eval().requires_grad_(False).cuda()
    print('U-Net loaded')

    ws = []
    imgs = []
    ms = []
    visuals = []

    log = open(os.path.join(save_dir, f'handmark_{threshold}', 'count.txt'), 'a')
    t = 0
    while len(ws)<shots:
        z_samples = np.random.RandomState(t*(seed+1)).randn(1, G.z_dim)
        w_samples = G.mapping(torch.from_numpy(z_samples).cuda(), None)  # [N, L, C]

        images = G.synthesis(w_samples, noise_mode='const')
        images = torch.clamp(images, -1, 1)

        with torch.no_grad():
            masks = net(images)
            masks = (torch.sigmoid(masks)>0.5).to(torch.float16)

            # if multi_class:
            #     masks = masks.argmax(1)
            #     visual, count = visualize_(masks)

        # if count >= threshold:
            # log.write(f'{len(ws)} - {count}\n')
        ws.append(w_samples)
        imgs.append(images)
        ms.append(masks)
            # visuals.append(visual)

        # print(f'\r{len(ws)}/{shots} count: {count} total: {t}', end='')
        t += 1
    print()
    log.close()

    # save ws
    ws = torch.cat(ws)
    for i in range(shots):
        with open(os.path.join(save_dir, f'handmark_{threshold}', f'ws', f'{i}.pkl'), 'wb') as f:
            w = ws[i, :1, :].detach().cpu()
            pickle.dump({'w': w}, f)               # [N, 1, C]
    print(f'{shots}ws {ws.shape} are saved!')

    # save image
    imgs = (torch.cat(imgs)+1)/2
    for i in range(shots):
        save_image(imgs[i], os.path.join(save_dir, f'handmark_{threshold}', f'images', f'{i}.png'), padding=0)
    print(f'images {imgs.shape} are saved!')

    # save mask
    ms = torch.cat(ms)
    for i in range(shots):
        save_image(ms[i], os.path.join(save_dir, f'handmark_{threshold}', f'masks', f'{i}.png'), padding=0)

    # # save visual
    # visuals = torch.cat(visuals)
    # save_image(visuals, os.path.join(save_dir, f'handmark_{threshold}', f'visual.png'))



# for w, mask
def get_SegNet(
    save_dir,
    data_dir,
    which_net,
    which_repre_layers,
    shots,
    threshold,
    epochs,
    batch_size,
    seed,
    test,
    w_steps,
    length,
    split,
    multi_class,
    combine,
    self_attn,
    **kwargs
):
    assert set(which_repre_layers).issubset(set([4, 8, 16, 32, 64, 128, 256]))
    channels_of_layer = {4:512, 8:512, 16:512, 32:512, 64:256, 128:128, 256:64}
    # make dir =====================================================================
    folder_dir = os.path.join(save_dir, f'handmark_{threshold}')
    if not os.path.exists(os.path.join(folder_dir, 'visualization')):
        os.mkdir(os.path.join(folder_dir, 'visualization'))

    torch.manual_seed(seed)
    # init dataaset ===============================================================
    dataset = Handmask_2d(folder_dir, multi_class, combine)
    dataset, _ = torch.utils.data.random_split(dataset, [shots, dataset.__len__()-shots], torch.Generator().manual_seed(216))
    train_loader = DataLoader(dataset, batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
    print(f'train on {dataset.__len__()}shots; thres: {threshold}')

    # init parameters ===============================================================
    in_ch = sum([channels_of_layer[reso] for reso in which_repre_layers])
    print(f'sum channels of representation is {in_ch}')

    criterion = nn.CrossEntropyLoss() if multi_class else nn.BCEWithLogitsLoss()

    # init generator ===============================================================
    with dnnlib.util.open_url(os.path.join(save_dir, 'network-snapshot-best.pkl')) as f:
        snapshot_data = legacy.load_network_pkl(f)
        G = snapshot_data['G_ema'].eval().requires_grad_(False).cuda()
        del snapshot_data
    get_representation = init_get_representation(G, which_repre_layers, 256, 'const')
    print('load generator done!')

    # train for n_train ===============================================================
    out_ch = 1
    # if which_net == 'BiFPN':
    #     net = BiFPN(out_ch=out_ch, n_block=5, self_attention=self_attn).cuda().train()
    # else:
    #     net = SegNet_S(in_ch, out_ch, which_net)
    net = BiFPN(out_ch=out_ch, n_block=7).cuda().train()
    resize_repre = which_net != 'BiFPN'

    optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), weight_decay=0.001)
    scheduler = optim.lr_scheduler.StepLR(optimizer, 50, 0.9)

    writer = SummaryWriter(os.path.join(folder_dir, 'runs'))

    with tqdm(range(epochs), initial=0, dynamic_ncols=True, smoothing=0.01, ascii=True) as pbar:
        for epoch in range(epochs):
            epoch_loss = 0
            
            for iter, (w, _, mask) in enumerate(train_loader):
                optimizer.zero_grad()

                w = w.cuda()
                mask = mask.cuda()

                _, representation = get_representation(w, resize_repre)

                pred = net(representation)

                loss = criterion(pred, mask)
                epoch_loss += loss.item()

                loss.backward()
                optimizer.step()

            scheduler.step()

            # log ======================================================================
            epoch_loss = epoch_loss / len(train_loader)
            pbar.set_description(' '.join([
                f'[{epoch}/{epochs}]',
                f'loss: {epoch_loss:.3f}',
            ]))
            pbar.update(1)

            writer.add_scalar('epoch_loss', epoch_loss, global_step=epoch)
            writer.add_scalar('lr', optimizer.param_groups[0]['lr'], global_step=epoch)

    print('train done!')

    with open(os.path.join(folder_dir, f'Seg_{which_net}_{shots}s_cb{combine}_{which_repre_layers}.pkl'), 'wb') as f:
        pickle.dump({'net': copy.deepcopy(net).eval().requires_grad_(False).cpu()}, f)               # [N, 1, C]
    print(f'net is saved!')

    with torch.no_grad():
        to_save = []
        for iter, (w, image, mask) in enumerate(train_loader):
            w = w.cuda()
            mask = mask.cuda()
            image = image.cuda()

            recon, representation = get_representation(w, resize_repre)

            pred = net(representation)

            recon = (recon+1)/2

            if multi_class:
                # B, 5, H, W -> B, 3, H, W
                pred = visualize(pred.argmax(1))
                mask = visualize(mask)
                
                to_save.append(torch.cat([
                    torch.cat([image.repeat(1,3,1,1).unsqueeze(1), mask.unsqueeze(1)], dim=1).view(-1, 3, 256, 256),
                    torch.cat([recon.repeat(1,3,1,1).unsqueeze(1), pred.unsqueeze(1)], dim=1).view(-1, 3, 256, 256)
                ], dim=0))
            else:
                pred = (torch.sigmoid(pred)>0.5).to(torch.int8)
                to_save.append(torch.cat([
                    torch.cat([image.repeat(1,3,1,1).unsqueeze(1), mask.repeat(1,3,1,1).unsqueeze(1)], dim=1).view(-1, 3, 256, 256),
                    torch.cat([recon.repeat(1,3,1,1).unsqueeze(1), pred.repeat(1,3,1,1).unsqueeze(1)], dim=1).view(-1, 3, 256, 256)
                ], dim=0))
        save_image(torch.cat(to_save, dim=0), os.path.join(folder_dir, f'visual.png'), nrow=10)
        print('visualization is saved!')


def apply_trans_batch(image, mask, chn):
    B = image.shape[0]
    image = (image+1)/2
    for i in range(B):
        image_, mask_ = apply_trans(torch.cat([image[i], mask[i]], dim=0)).split([chn, 1], dim=0)
        image[i], mask[i] = image_, mask_.squeeze(0).to(torch.long)

    image = (image-0.5)/0.5

    return image, mask


def get_UNet(
    n_train,
    save_dir,
    out_dir,
    data_dir,
    which_net,
    which_repre_layers,
    shots,
    threshold,
    epochs, 
    batch_size,
    split,
    length,
    w_steps,
    combine,
    seed,
    multi_class,
    aug,
    dropout,
    cutout,
    resize,
    self_attn,
    **kwargs
):

    import warnings
    warnings.filterwarnings('ignore')

    # make dir =====================================================================
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
        print(f'make output dir: {out_dir}')
    id = len(os.listdir(out_dir))
    folder_dir = os.path.join(out_dir, f'exp{id}-{shots}shot-{threshold}thres-{which_net}')

    for flag, flag_name in zip([dropout, cutout, aug, resize], ['dropout', 'cutout', 'aug', 'resize']):
        if flag:
            folder_dir += f'-{flag_name}'

    assert not os.path.exists(folder_dir)
    os.mkdir(folder_dir)
    os.mkdir(os.path.join(folder_dir, 'checkpoint'))
    os.mkdir(os.path.join(folder_dir, 'visualization'))
    os.mkdir(os.path.join(folder_dir, 'runs'))
    print('make dir done!')

    # init generator ===============================================================
    with dnnlib.util.open_url(os.path.join(save_dir, 'network-snapshot-best.pkl')) as f:
        snapshot_data = legacy.load_network_pkl(f)
        G = snapshot_data['G_ema'].eval().requires_grad_(False).cuda()
        del snapshot_data
    get_representation = init_get_representation(G, which_repre_layers, 256, 'random')
    print('load generator done!')

    # init Seg ===============================================================
    pkl_name = f'Seg_{which_net}'
    if dropout:
        pkl_name += '_drop'
    if cutout:
        pkl_name += '_cut'
    pkl_name += f'_{shots}s_cb{combine}_{which_repre_layers}.pkl'

    with open(os.path.join(save_dir, f'handmark_{threshold}', pkl_name), 'rb') as f:
        Seg = pickle.load(f)['net'].eval().requires_grad_(False).cuda()
    print('load Seg done!')

    # init dataaset ===============================================================
    # split 2:1 first, then choose n shot for training, ensure same test set with U-Net training
    split_length = len(os.listdir(os.path.join(data_dir, 'images')))
    split = [int(s) for s in split]
    split = [split_length*s//sum(split) for s in split]
    split[1] = split_length - split[0] - split[2]
    print(f'dataset split: {split}')

    out_ch = 1
    sum_evals = 0

    import random
    for i_train in range(n_train):
        torch.manual_seed(i_train)
        random.seed(28*i_train)
        samples = list(range(split_length))
        random.shuffle(samples)

        val_loader = DataLoader(
            US(
                data_dir=data_dir,
                aug=False,
                sample=samples[split[0]:split[0]+split[1]],
            ),
            batch_size=4, shuffle=False, num_workers=4, pin_memory=True, drop_last=False
        )
        test_loader = DataLoader(
            US(
                data_dir=data_dir,
                aug=False,
                sample=samples[split[0]+split[1]:split_length],
            ),
            batch_size=4, shuffle=False, num_workers=4, pin_memory=True, drop_last=False
        )

        print(f'val: {samples[split[0]:split[0]+split[1]]}\ntest: {samples[split[0]+split[1]:split_length]}')

        chn = 1
        print(f'image channel: {chn}')

        torch.manual_seed(seed)
        criterion = nn.CrossEntropyLoss() if multi_class else nn.BCEWithLogitsLoss()

        net = get_network('U-Net', chn, out_ch).cuda()
        # net = UNet_attn(chn, out_ch).cuda()
        resize_repre = which_net != 'BiFPN'
        print('load U-Net done!')

        optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999))
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=1)

        best_dice = -1
        best_dice_epoch = -1

        writer = SummaryWriter(os.path.join(folder_dir, 'runs', f'{i_train}_train'))

        with tqdm(range(epochs), initial=0, dynamic_ncols=True, smoothing=0.01, ascii=True) as pbar:
            for epoch in range(epochs):
                net.train()

                optimizer.zero_grad()
                # prepare image mask
                with torch.no_grad():
                    z = torch.randn(batch_size, G.z_dim).cuda()
                    w = G.mapping(z, None)  # [N, L, C]
                    image, representation = get_representation(w, resize_repre)
                    image = torch.clamp(image, -1, 1)
                    mask = Seg(representation)
                    mask = (torch.sigmoid(mask)>0.5).to(torch.float16)
                    del representation

                if resize:
                    image = F.interpolate(image, (160,160), mode='bilinear')
                    mask = F.interpolate(mask, (160,160), mode='nearest')

                if multi_class:
                    mask = mask.argmax(1)

                if aug:
                    image, mask = apply_trans_batch(image, mask, chn)

                if epoch%10==0:
                    save_image(torch.cat([(image+1)/2, mask], dim=0), 'show.png', nrow=batch_size)

                pred = net(image)

                loss = criterion(pred, mask)
                loss.backward()
                optimizer.step()

                # evaluation every epoch ===================================================
                if epoch%50==0:
                    if chn==1:
                        save_image(torch.cat([(image+1)/2, mask, pred]), f'{folder_dir}/show.png', nrow=batch_size)
                    elif chn==3:
                        save_image(torch.cat([(image+1)/2, visualize(mask), visualize(pred.argmax(1))]), f'{folder_dir}/show.png', nrow=batch_size)

                    with torch.no_grad():
                        dice = eval_dice_2d(net, val_loader, False, '', None)[0].item()

                    scheduler.step(dice)

                    if dice > best_dice:
                        best_dice = dice
                        best_dice_epoch = epoch
                        with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_best.pth'), 'wb') as f:
                            pickle.dump({'net': copy.deepcopy(net).eval().requires_grad_(False).cpu()}, f)

                    writer.add_scalar('epoch_loss', loss, global_step=epoch)
                    writer.add_scalar('lr', optimizer.param_groups[0]['lr'], global_step=epoch)
                    writer.add_scalar('mdice', dice, global_step=epoch)

                # log ======================================================================
                pbar.set_description(' '.join([
                    f'[{i_train}/{n_train}]',
                    f'[{epoch}/{epochs}]',
                    f'loss: {loss.item():.3f}',
                    f'mdice: {dice:.3f}',
                    f'best_dice: {best_dice:.3f}',
                    f'b_dice_e: {best_dice_epoch:.3f}'
                ]))
                pbar.update(1)
                # visualization ============================================================
                # save_image(val_visualization, os.path.join(folder_dir, 'visualization', f'{i_train}_{epoch}.png'), nrow=2*batch_size)

        # test part of i train
        with torch.no_grad():
            with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_best.pth'), 'rb') as f:
                net = pickle.load(f)['net'].eval().requires_grad_(False).cuda()

            evaluations = eval_dice_2d(net, test_loader, True, folder_dir, i_train)

        with open(os.path.join(folder_dir, 'INFO.txt'), 'a') as log:
            log.write(f'train [{i_train}] Best dice of validation: [{best_dice:.5f}] in Epoch: [{best_dice_epoch}]'+'\n')
            for i, e in enumerate(['dice', 'accu', 'iou', 'spec', 'sens', 'ppv', 'npv']):
                log.write(f'{evaluations[i].item():.5f}, ')
            log.write('\n')

        with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
            for i, e in enumerate(['dice', 'accu', 'iou', 'spec', 'sens', 'ppv', 'npv']):
                log.write(f'{evaluations[i].item():.5f}, ')
            log.write('\n')

            sum_evals += evaluations
            log.write('=================================\n')

    with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
        log.write('Mean:\n')
        # mdice
        evaluations = sum_evals/n_train
        for i, e in enumerate(['dice', 'accu', 'iou', 'spec', 'sens', 'ppv', 'npv']):
            log.write(f'{evaluations[i].item():.3f}, ')
        log.write('\n')
if __name__ == "__main__":
    main()
#----------------------------------------------------------------------------
