import copy
import os
import pickle

import click
import torch
from torch import optim
from torch.utils.data.dataloader import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import save_image
from tqdm import tqdm
from torch.utils.data import Dataset

import dnnlib
import legacy
from segmentation.dataset import Triplet
from segmentation.utils import *
from scipy.ndimage import distance_transform_edt as distance
from skimage import segmentation as skimage_seg

@click.command()
# train
@click.option('--mode',                     help='do what', type=click.Choice(['shots', 'SegNet', 'U-Net']), required=True)
@click.option('--save_dir',                 help='Network pickle save', required=True, metavar='DIR')
@click.option('--data_dir',                 help='Training data save', metavar='DIR')
@click.option('--shots',                    help='how many shots training', type=int, default=1, show_default=True)
@click.option('--multi_class',              help='multi_class?', is_flag=True)
@click.option('--combine',                  help='combine 3,42 2,41?', is_flag=True)
@click.option('--test',                     help='do test?', is_flag=True)
@click.option('--aug',                      help='aug?', is_flag=True)
@click.option('--self_attn',                      help='self_attn?', is_flag=True)
# getShots
@click.option('--supervised_pkl',           help='Supervised network pickle save', metavar='DIR')
# getSeg
@click.option('--epochs',                   help='Number of epochs', type=int, default=1000, show_default=True)
@click.option('--batch_size',               help='Batch size for training', type=int, default=10, show_default=True)
@click.option('--which_net',                help='network', type=click.Choice(['S', 'M', 'L', 'BiFPN']), default='S')
@click.option('--which_repre_layers',       help='Which representation layers for train, default=[4, 8, 16, 32, 64, 128, 256]', type=IntList())
@click.option('--split',                    help='Split of training, validation, test set', nargs=3, default=[7, 1, 2])
@click.option('--w_steps',                  help='how many w project step', type=int, default=1000, show_default=True)
@click.option('--length',                   help='how many slices each sample', type=int, default=64, show_default=True)
@click.option('--lambda_',                   help='how many slices each sample', type=float, default=0.1, show_default=True)
@click.option('--k_',                   help='how many slices each sample', type=float, default=1, show_default=True)
# 
@click.option('--seed',                     help='Random seed', type=int, required=True)
# @click.option('--resolution',               help='Resolution of image', type=int, default=256, show_default=True)
# @click.option('--split',                    help='Split of training, validation, test set', nargs=3, default=[7, 1, 2])
def main(mode, **kwargs):
    get_SegNet(**kwargs)


def compute_sdf(img_gt, c=5, k_=1):
    """
    compute the signed distance map of binary mask
    input: segmentation, shape = (batch_size, x, y, z)
    output: the Signed Distance Map (SDM)
    sdf(x) = 0; x in segmentation boundary
             -inf|x-y|; x in segmentation
             +inf|x-y|; x out of segmentation
    normalize sdf to [-1,1]
    """

    sdfs = []
    for i in range(c):
        posmask = np.where(img_gt==i, True, False).astype(np.bool)
        
        negmask = ~posmask
        posdis = distance(posmask)
        negdis = distance(negmask)
        boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8)
        sdf = (posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis)) - (negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis))
        sdf[boundary==1] = 0

        sdf = torch.tensor(sdf).unsqueeze(0)
        sdfs += [sdf]
        # assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis))
        # assert np.max(sdf) ==  1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis))
        
    normalized_sdf = torch.cat(sdfs).to(torch.float32)
    normalized_sdf = torch.sigmoid(torch.where(normalized_sdf>=0, 1-normalized_sdf, -1-normalized_sdf) * k_)

    return normalized_sdf


class Handmask(Dataset):
    def __init__(self, save_dir, multi_class=False, combine=False, k_=1):
        self.multi_class = multi_class
        self.combine = combine

        self.image_dir = os.path.join(save_dir, 'handmark', f'images')
        self.mask_dir = os.path.join(save_dir, 'handmark', f'masks')
        self.ws_dir = os.path.join(save_dir, 'handmark', f'ws')

        self.filenames = os.listdir(self.image_dir)
        self.filenames.sort(key=lambda x:int(x[:-4]))

        # shape information
        self.SI = {}
        self.c = 3 if combine else 5
        self.k_ = k_

    def __len__(self):
        return len(self.filenames)

    def __getitem__(self, index):
        filename = self.filenames[index]

        with open(os.path.join(self.ws_dir, f'{filename[:-4]}.pkl'), 'rb') as f:
            w = pickle.load(f)['w']

        # image = Image.open(os.path.join(self.image_dir, filename)).convert('RGB')
        # image = np.array(image).transpose([2, 0, 1])/25
        image = Image.open(f'{self.image_dir}/{filename}').convert('L')
        image = np.array(image)[None, :]/255
        image = torch.tensor(image, dtype=torch.float32)

        mask = np.array(Image.open(os.path.join(self.mask_dir, filename)).convert('L'))

        H, W = mask.shape
        if self.multi_class:
            m = torch.zeros(H, W)
            for label in zip([2,3,41,42], [1,2,3,4]):
                m = torch.tensor(np.where(mask==label[0], label[1], m), dtype=torch.long)
            mask = m

            if self.combine:
                mask = torch.tensor(np.where(mask==3, 1, mask))
                mask = torch.tensor(np.where(mask==4, 2, mask))
        else:
            mask = torch.tensor(mask/255, dtype=torch.float32).unsqueeze(0)  # H, W -> 1, H, W

        if index in self.SI:
            si = self.SI[index]
        else:
            si = compute_sdf(mask, self.c, self.k_)
            self.SI[index] = si

        return w, image, mask, si


# for w, mask
def get_SegNet(
    save_dir,
    data_dir,
    which_net,
    which_repre_layers,
    shots,
    epochs,
    batch_size,
    seed,
    test,
    w_steps,
    length,
    split,
    multi_class,
    combine,
    self_attn,
    lambda_,
    k_,
    **kwargs
):
    assert set(which_repre_layers).issubset(set([4, 8, 16, 32, 64, 128, 256]))
    channels_of_layer = {4:512, 8:512, 16:512, 32:512, 64:256, 128:128, 256:64}
    # make dir =====================================================================
    folder_dir = os.path.join(save_dir, 'handmark')
    if not os.path.exists(os.path.join(folder_dir, 'visualization')):
        os.mkdir(os.path.join(folder_dir, 'visualization'))

    torch.manual_seed(seed)

    # init dataaset ===============================================================
    dataset = Handmask(save_dir, multi_class, combine, k_)
    dataset, _ = torch.utils.data.random_split(dataset, [shots, dataset.__len__()-shots], torch.Generator().manual_seed(216))
    train_loader = DataLoader(dataset, batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
    print(f'train on {dataset.__len__()}shots')

    # init parameters ===============================================================
    in_ch = sum([channels_of_layer[reso] for reso in which_repre_layers])
    print(f'sum channels of representation is {in_ch}')

    criterion = nn.CrossEntropyLoss() if multi_class else nn.BCEWithLogitsLoss()
    l1 = nn.L1Loss()

    # init generator ===============================================================
    with dnnlib.util.open_url(os.path.join(save_dir, 'network-snapshot-best.pkl')) as f:
        snapshot_data = legacy.load_network_pkl(f)
        G = snapshot_data['G_ema'].eval().requires_grad_(False).cuda()
        del snapshot_data
    get_representation = init_get_representation(G, which_repre_layers, 256, 'const')
    print('load generator done!')

    # train for n_train ===============================================================
    out_ch = (3 if combine else 5) if multi_class else 1
    net = BiFPN(out_ch=out_ch, n_block=5, self_attention=self_attn).cuda().train()
    # net = get_network(which_net, in_ch, out_ch).cuda().train()
    resize_repre = which_net != 'BiFPN'

    optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), weight_decay=0.001)
    scheduler = optim.lr_scheduler.StepLR(optimizer, 50, 0.9)

    writer = SummaryWriter(os.path.join(folder_dir, 'runs'))

    with tqdm(range(epochs), initial=0, dynamic_ncols=True, smoothing=0.01, ascii=True) as pbar:
        for epoch in range(epochs):
            epoch_loss = 0
            epoch_loss_ = 0
            
            for iter, (w, _, mask, si) in enumerate(train_loader):
                optimizer.zero_grad()

                w = w.cuda()
                mask = mask.cuda()
                si = si.cuda()

                _, representation = get_representation(w, resize_repre)

                pred = net(representation)

                loss = criterion(pred, mask)

                pred = torch.softmax(pred, 1)
                si_loss = l1(pred, si) * lambda_
                loss += si_loss

                epoch_loss += loss.item()
                epoch_loss_ += si_loss.item()

                loss.backward()
                optimizer.step()

            scheduler.step()

            # log ======================================================================
            epoch_loss = epoch_loss / len(train_loader)
            epoch_loss_ = epoch_loss_ / len(train_loader)
            pbar.set_description(' '.join([
                f'[{epoch}/{epochs}]',
                f'loss: {epoch_loss:.4f}',
                f'si: {epoch_loss_:.4f}',
            ]))
            pbar.update(1)

            writer.add_scalar('epoch_loss', epoch_loss, global_step=epoch)
            writer.add_scalar('lr', optimizer.param_groups[0]['lr'], global_step=epoch)

    print('train done!')

    with open(os.path.join(folder_dir, f'Seg_{which_net}_sa{self_attn}_{shots}s_cb{combine}_{which_repre_layers}.pkl'), 'wb') as f:
        pickle.dump({'net': copy.deepcopy(net).eval().requires_grad_(False).cpu()}, f)               # [N, 1, C]
    print(f'net is saved!')

    with torch.no_grad():
        to_save = []
        for iter, (w, image, mask, _) in enumerate(train_loader):
            w = w.cuda()
            mask = mask.cuda()
            image = image.cuda()

            recon, representation = get_representation(w, resize_repre)

            pred = net(representation)

            recon = (recon+1)/2

            if multi_class:
                # B, 5, H, W -> B, 3, H, W
                pred = visualize(pred.argmax(1))
                mask = visualize(mask)
                
                to_save.append(torch.cat([
                    torch.cat([image.repeat(1,3,1,1).unsqueeze(1), mask.unsqueeze(1)], dim=1).view(-1, 3, 256, 256),
                    torch.cat([recon.repeat(1,3,1,1).unsqueeze(1), pred.unsqueeze(1)], dim=1).view(-1, 3, 256, 256)
                ], dim=0))
            else:
                pred = torch.where(pred>0, 1.0, 0.0)
                to_save.append(get_save_image(image.detach().cpu(), mask.detach().cpu(), recon.detach().cpu(), pred.detach().cpu()))

        save_image(torch.cat(to_save, dim=0), os.path.join(folder_dir, f'visual.png'), nrow=10)
        print('visualization is saved!')

    if test:
        del representation
        import random
        random.seed(28)
        samples = list(range(103))
        random.shuffle(samples)

        split = [int(s) for s in split]
        split = [103*s//sum(split) for s in split]
        split[1] = 103 - split[0] - split[2]
        print(f'dataset split: {split}')

        test_loader = DataLoader(
            Triplet(
                data_dir=data_dir,
                save_dir=save_dir,
                multi_class=multi_class,
                aug=False,
                sample=samples[split[0]+split[1]:103],
                combine=combine,
                return_w=True,
                length=64,
                w_steps=1000
            ),
            5, drop_last=False, shuffle=False, num_workers=4, pin_memory=True
        )

        with torch.no_grad():
            dice, accuracy, iou, wdice, wacc, wiou = eval_net_few_shot(net, get_representation, test_loader, True, folder_dir, 0, resize_repre, multi_class)
        
        print(dice)
        print(accuracy)
        print(iou)
        print(wdice)
        print(wacc)
        print(wiou)
        print(iou.mean())

        with open(os.path.join(folder_dir, 'auto_shot_test.txt'), 'a') as log:
            log.write(f'test dice: [{dice}]'+'\n')
            log.write(f'test accu: [{accuracy}]'+'\n')
            log.write(f'test iou : [{iou}]'+'\n')
            log.write(f'test wdice : [{wdice.item():.5f}]'+'\n')
            log.write(f'test wacc : [{wacc.item():.5f}]'+'\n')
            log.write(f'test wiou : [{wiou.item():.5f}]'+'\n')
            log.write(f'test miou : [{iou.mean().item():.5f}]'+'\n')


if __name__ == "__main__":
    main()
#----------------------------------------------------------------------------
