import copy
import os
import pickle
import random

import click
import torch
from torch import optim
from torch.utils.data.dataloader import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import save_image
from tqdm import tqdm
from torch.utils.data import Dataset

import dnnlib
import legacy
from segmentation.dataset import Triplet, Handmask
from segmentation.utils import *
from scipy.ndimage import distance_transform_edt as distance
from skimage import segmentation as skimage_seg

@click.command()
# train
@click.option('--mode',                     help='do what', type=click.Choice(['shots', 'SegNet', 'U-Net']), required=True)
@click.option('--save_dir',                 help='Network pickle save', required=True, metavar='DIR')
@click.option('--data_dir',                 help='Training data save', metavar='DIR')
@click.option('--shots',                    help='how many shots training', type=int, default=1, show_default=True)
@click.option('--threshold',                help='shots saving threshold', type=int, default=1, show_default=True)
@click.option('--multi_class',              help='multi_class?', is_flag=True)
@click.option('--combine',                  help='combine 3,42 2,41?', is_flag=True)
@click.option('--test',                     help='do test?', is_flag=True)
@click.option('--aug',                      help='aug?', is_flag=True)
@click.option('--dropout',                      help='aug?', is_flag=True)
@click.option('--cutout',                      help='aug?', is_flag=True)
@click.option('--self_attn',                      help='self_attn?', is_flag=True)
# getShots
@click.option('--supervised_pkl',           help='Supervised network pickle save', metavar='DIR')
# getSeg
@click.option('--epochs',                   help='Number of epochs', type=int, default=1000, show_default=True)
@click.option('--batch_size',               help='Batch size for training', type=int, default=10, show_default=True)
@click.option('--which_net',                help='network', type=click.Choice(['S', 'M', 'L', 'BiFPN']), default='S')
@click.option('--which_repre_layers',       help='Which representation layers for train, default=[4, 8, 16, 32, 64, 128, 256]', type=IntList())
@click.option('--split',                    help='Split of training, validation, test set', nargs=3, default=[7, 1, 2])
@click.option('--w_steps',                  help='how many w project step', type=int, default=1000, show_default=True)
@click.option('--length',                   help='how many slices each sample', type=int, default=64, show_default=True)
# 
@click.option('--seed',                     help='Random seed', type=int, required=True)
# @click.option('--resolution',               help='Resolution of image', type=int, default=256, show_default=True)
# @click.option('--split',                    help='Split of training, validation, test set', nargs=3, default=[7, 1, 2])
def main(mode, **kwargs):
    # random.seed(0)
    get_SegNet(**kwargs)



def drop_out(repre):
    # 7 repres
    i = random.randint(0, 6)
    repre[i] = torch.zeros_like(repre[i], requires_grad=False, device=torch.device('cuda'))

f = nn.Upsample(scale_factor=2, mode='nearest')
def cut_out(repre, batch_size):
    x = torch.randint(0,4,[5,2])
    mask = torch.ones((batch_size, 1, 4,4), requires_grad=False, device=torch.device('cuda'))
    for i in range(batch_size):
        mask[i, :, x[i,0], x[i,1]] = 0

    for i in range(7):
        if i: mask = f(mask)
        repre[i] *= mask
        # k = 2**i
        # x_ = x*k
        # y_ = y*k
        # repre[i][x_:x_+k][y_:y_+k] = torch.zeros((k, k), requires_grad=False, device=torch.device('cuda'))
    return mask.to(dtype=torch.int8).squeeze(1)


# for w, mask
def get_SegNet(
    save_dir,
    data_dir,
    which_net,
    which_repre_layers,
    shots,
    threshold,
    epochs,
    batch_size,
    seed,
    test,
    w_steps,
    length,
    split,
    multi_class,
    combine,
    dropout,
    cutout,
    self_attn,
    **kwargs
):
    random.seed(28)
    samples = list(range(103))
    random.shuffle(samples)
    best_miou = 0

    random.seed(0)

    assert set(which_repre_layers).issubset(set([4, 8, 16, 32, 64, 128, 256]))
    channels_of_layer = {4:512, 8:512, 16:512, 32:512, 64:256, 128:128, 256:64}
    # make dir =====================================================================
    folder_dir = os.path.join(save_dir, f'handmark_{threshold}')
    if not os.path.exists(os.path.join(folder_dir, 'visualization')):
        os.mkdir(os.path.join(folder_dir, 'visualization'))

    torch.manual_seed(seed)

    pkl_name = f'Seg_{which_net}'
    if dropout:
        pkl_name += '_drop'
    if cutout:
        pkl_name += '_cut'
    pkl_name += f'_{shots}s_cb{combine}_{which_repre_layers}.pkl'

    # init dataaset ===============================================================
    dataset = Handmask(folder_dir, multi_class, combine)
    dataset, _ = torch.utils.data.random_split(dataset, [shots, dataset.__len__()-shots], torch.Generator().manual_seed(216))
    train_loader = DataLoader(dataset, batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
    print(f'train on {dataset.__len__()}shots')

    # init parameters ===============================================================
    in_ch = sum([channels_of_layer[reso] for reso in which_repre_layers])
    print(f'sum channels of representation is {in_ch}')

    criterion = nn.CrossEntropyLoss() if multi_class else nn.BCEWithLogitsLoss()
    l1 = nn.L1Loss()

    # init generator ===============================================================
    with dnnlib.util.open_url(os.path.join(save_dir, 'network-snapshot-best.pkl')) as f:
        snapshot_data = legacy.load_network_pkl(f)
        G = snapshot_data['G_ema'].eval().requires_grad_(False).cuda()
        del snapshot_data
    get_representation = init_get_representation(G, which_repre_layers, 256, 'const')
    print('load generator done!')

    # train for n_train ===============================================================
    out_ch = 17
    # net = BiFPN(out_ch=out_ch, n_block=5, self_attention=self_attn).cuda().train()
    net = get_network(which_net, in_ch, out_ch).cuda().train()
    resize_repre = which_net != 'BiFPN'

    optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), weight_decay=0.001)
    scheduler = optim.lr_scheduler.StepLR(optimizer, 50, 0.9)

    writer = SummaryWriter(os.path.join(folder_dir, 'runs'))

    with tqdm(range(epochs), initial=0, dynamic_ncols=True, smoothing=0.01, ascii=True) as pbar:
        for epoch in range(epochs):
            epoch_loss = 0
            
            for iter, (w, _, mask) in enumerate(train_loader):
                optimizer.zero_grad()

                w = w.cuda()
                mask = mask.cuda()

                _, representation = get_representation(w, resize_repre)

                if dropout:
                    drop_out(representation)
                
                if cutout:
                    m = cut_out(representation, batch_size)
                    mask *= m

                # for i in range(6):
                #     representation[i] = nn.Upsample(scale_factor=2**(6-i), mode='nearest')(representation[i].mean(1, keepdim=True))
                #     print(representation[i].shape)
                # representation[6] = representation[6].mean(1, keepdim=True)

                # for i in range(7):
                #     mi = torch.min(representation[i])
                #     ma = torch.max(representation[i])
                #     representation[i] = (representation[i]-mi)/(ma-mi)

                # representation = torch.cat(representation)
                # save_image(representation, 'representation.png', nrow=5)
                # save_image(mask.unsqueeze(1)*1.0, 'mask.png', nrow=5)
                # exit()

                pred = net(representation)

                loss = criterion(pred, mask)
                epoch_loss += loss.item()

                loss.backward()
                optimizer.step()

            scheduler.step()

            # if test and epoch>=499 and (epoch+1)%100==0:
            #     del representation

            #     split = [int(s) for s in split]
            #     split = [103*s//sum(split) for s in split]
            #     split[1] = 103 - split[0] - split[2]
            #     # print(f'dataset split: {split}')

            #     test_loader = DataLoader(
            #         Triplet(
            #             data_dir=data_dir,
            #             save_dir=save_dir,
            #             multi_class=multi_class,
            #             aug=False,
            #             sample=samples[split[0]+split[1]:103],
            #             combine=combine,
            #             return_w=True,
            #             length=64,
            #             w_steps=1000
            #         ),
            #         5, drop_last=False, shuffle=False, num_workers=4, pin_memory=True
            #     )

            #     with torch.no_grad():
            #         dice, accuracy, iou, wdice, wacc, wiou = eval_net_few_shot(net, get_representation, test_loader, True, folder_dir, 0, resize_repre, multi_class)
                
            #     # print(dice)
            #     # print(accuracy)
            #     # print(iou)
            #     # print(wdice)
            #     # print(wacc)
            #     # print(wiou)
            #     miou = iou.mean()
            #     print(f'test miou : {miou.item():.5f}')

            #     with open(os.path.join(folder_dir, 'auto_shot_test.txt'), 'a') as log:
            #         # log.write(f'test dice: [{dice}]'+'\n')
            #         # log.write(f'test accu: [{accuracy}]'+'\n')
            #         # log.write(f'test iou : [{iou}]'+'\n')
            #         # log.write(f'test wdice : [{wdice.item():.5f}]'+'\n')
            #         # log.write(f'test wacc : [{wacc.item():.5f}]'+'\n')
            #         # log.write(f'test wiou : [{wiou.item():.5f}]'+'\n')
            #         log.write(f'test miou : [{miou.item():.5f}]'+'\n')
                
            #     if miou>best_miou:
            #         best_miou = miou
            #         with open(os.path.join(folder_dir, pkl_name), 'wb') as f:
            #             pickle.dump({'net': copy.deepcopy(net).eval().requires_grad_(False).cpu()}, f)               # [N, 1, C]
            #         print(f'net is saved! test miou : {miou.item():.5f}')

            # log ======================================================================
            epoch_loss = epoch_loss / len(train_loader)
            pbar.set_description(' '.join([
                f'[{epoch}/{epochs}]',
                f'loss: {epoch_loss:.3f}',
            ]))
            pbar.update(1)

            writer.add_scalar('epoch_loss', epoch_loss, global_step=epoch)
            writer.add_scalar('lr', optimizer.param_groups[0]['lr'], global_step=epoch)

    print('train done!')

    with open(os.path.join(folder_dir, pkl_name), 'wb') as f:
        pickle.dump({'net': copy.deepcopy(net).eval().requires_grad_(False).cpu()}, f)               # [N, 1, C]
    print(f'net is saved!')

    with torch.no_grad():
        with open(os.path.join(save_dir, f'handmark_{threshold}', pkl_name), 'rb') as f:
            net = pickle.load(f)['net'].eval().requires_grad_(False).cuda()
        to_save = []
        for iter, (w, image, mask) in enumerate(train_loader):
            w = w.cuda()
            mask = mask.cuda()
            image = image.cuda()

            recon, representation = get_representation(w, resize_repre)

            pred = net(representation)

            recon = (recon+1)/2

            if multi_class:
                # B, 5, H, W -> B, 3, H, W
                pred = visualize(pred.argmax(1))
                mask = visualize(mask)
                
                to_save.append(torch.cat([
                    torch.cat([image.repeat(1,3,1,1).unsqueeze(1), mask.unsqueeze(1)], dim=1).view(-1, 3, 256, 256),
                    torch.cat([recon.repeat(1,3,1,1).unsqueeze(1), pred.unsqueeze(1)], dim=1).view(-1, 3, 256, 256)
                ], dim=0))
            else:
                pred = torch.where(pred>0, 1.0, 0.0)
                to_save.append(get_save_image(image.detach().cpu(), mask.detach().cpu(), recon.detach().cpu(), pred.detach().cpu()))

        save_image(torch.cat(to_save, dim=0), os.path.join(folder_dir, f'visual.png'), nrow=10)
        print('visualization is saved!')

    # if test:
    #     del representation
    #     random.seed(28)
    #     samples = list(range(103))
    #     random.shuffle(samples)

    #     split = [int(s) for s in split]
    #     split = [103*s//sum(split) for s in split]
    #     split[1] = 103 - split[0] - split[2]
    #     print(f'dataset split: {split}')

    #     test_loader = DataLoader(
    #         Triplet(
    #             data_dir=data_dir,
    #             save_dir=save_dir,
    #             multi_class=multi_class,
    #             aug=False,
    #             sample=samples[split[0]+split[1]:103],
    #             combine=combine,
    #             return_w=True,
    #             length=64,
    #             w_steps=1000
    #         ),
    #         5, drop_last=False, shuffle=False, num_workers=4, pin_memory=True
    #     )

    #     with torch.no_grad():
    #         dice, accuracy, iou, wdice, wacc, wiou = eval_net_few_shot(net, get_representation, test_loader, True, folder_dir, 0, resize_repre, multi_class)
        
    #     print(dice)
    #     print(accuracy)
    #     print(iou)
    #     print(wdice)
    #     print(wacc)
    #     print(wiou)
    #     print(iou.mean())

    #     with open(os.path.join(folder_dir, 'auto_shot_test.txt'), 'a') as log:
    #         log.write(f'test dice: [{dice}]'+'\n')
    #         log.write(f'test accu: [{accuracy}]'+'\n')
    #         log.write(f'test iou : [{iou}]'+'\n')
    #         log.write(f'test wdice : [{wdice.item():.5f}]'+'\n')
    #         log.write(f'test wacc : [{wacc.item():.5f}]'+'\n')
    #         log.write(f'test wiou : [{wiou.item():.5f}]'+'\n')
    #         log.write(f'test miou : [{iou.mean().item():.5f}]'+'\n')


if __name__ == "__main__":
    main()
#----------------------------------------------------------------------------
