import copy
import os
import pickle

import click
import torch
from torch import optim
from torch.utils.data.dataloader import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import save_image
from tqdm import tqdm

import dnnlib
import legacy
from segmentation.dataset import ISIC2018Task1_w_mask
from segmentation.utils import *


class IntList(click.ParamType):
    name = 'list'

    def convert(self, value, param, ctx):
        _ = param, ctx
        if value is None or value.lower() == 'none' or value == '':
            return [4, 8, 16, 32, 64, 128, 256]
        return [int(v) for v in value.split(',')]

@click.command()
# train
@click.option('--n_train',                  help='N train in a row', type=int, default=3, show_default=True)
@click.option('--epochs',                   help='Number of epochs', type=int, default=1000, show_default=True)
@click.option('--batch_size',               help='Batch size for training', type=int, default=10, show_default=True)
@click.option('--resolution',               help='Resolution of image', type=int, default=256, show_default=True)
@click.option('--save_dir',                 help='Network pickle save', required=True, metavar='DIR')
@click.option('--split',                    help='Split of training, validation, test set', nargs=3, default=[7, 1, 2])
@click.option('--which_net',                help='network', type=click.Choice(['S', 'M', 'L']), default='S')
@click.option('--which_repre_layers',       help='Which representation layers for train, default=[4, 8, 16, 32, 64, 128, 256]', type=IntList())
@click.option('--shot',                     help='how many shot training', type=int, default=1, show_default=True)
@click.option('--multi_class',              help='multi_class?', is_flag=True)
def train(
    n_train,   
    epochs, 
    batch_size,
    resolution,
    save_dir,
    split,
    which_net,
    which_repre_layers,
    shot,
    multi_class
):

    assert set(which_repre_layers).issubset(set([4, 8, 16, 32, 64, 128, 256]))
    channels_of_layer = {4:512, 8:512, 16:512, 32:512, 64:256, 128:128, 256:64}
    # make dir =====================================================================
    folder_dir = make_save_dir(save_dir, which_net, shot, which_repre_layers)
    print('make dir done!')

    # init dataaset ===============================================================
    dataset = ISIC2018Task1_w_mask(save_dir)
    # dataset = ISIC2018Task1_w_mask(save_dir, which_set='training')
    # val_dataset = ISIC2018Task1_w_mask(save_dir, which_set='validation')
    
    # split 2:1 first, then choose n shot for training, ensure same test set with U-Net training
    split = [dataset.__len__()*s//sum(split) for s in split]
    split[1] = dataset.__len__() - split[0] - split[2]
    print(f'dataset split: {split}')
    # val_loader = DataLoader(val_dataset, batch_size, shuffle=False, num_workers=4, pin_memory=True)

    # init generator ===============================================================
    with dnnlib.util.open_url(os.path.join(save_dir, 'network-snapshot-best.pkl')) as f:
        snapshot_data = legacy.load_network_pkl(f)
        G = snapshot_data['G_ema'].eval().requires_grad_(False).cuda()
        del snapshot_data

    get_representation = init_get_representation(G, which_repre_layers, resolution, 'const')
    print('load generator done!')

    # init parameters ===============================================================
    in_ch = sum([channels_of_layer[reso] for reso in which_repre_layers])
    print(f'sum channels of representation is {in_ch}')
    total_metric = [0,0,0]
    criterion = nn.BCEWithLogitsLoss()

    # train for n_train ===============================================================
    for i_train in range(n_train):
        torch.manual_seed(i_train*28)

        out_ch = 4 if multi_class else 1
        net = get_network(which_net, in_ch, out_ch).cuda().train()
        resize_repre = which_net != 'BiFPN'

        train_loader, val_loader, test_loader = get_dataloader(shot, batch_size, dataset, split, i_train)

        optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), weight_decay=0.001)
        scheduler = optim.lr_scheduler.StepLR(optimizer, 10, 0.9)

        best_iou = -1
        best_iou_epoch = -1

        writer = SummaryWriter(os.path.join(folder_dir, 'runs', f'{i_train}_train'))

        with tqdm(range(epochs), initial=0, dynamic_ncols=True, smoothing=0.01, ascii=True) as pbar:
            for epoch in range(epochs):
                net.train()
                epoch_loss = 0
                
                for iter, (w, _, mask) in enumerate(train_loader):
                    optimizer.zero_grad()

                    w = w.cuda()
                    mask = mask.cuda()

                    _, representation = get_representation(w, resize_repre)

                    pred = net(representation)

                    loss = criterion(pred, mask)
                    epoch_loss += loss.item()

                    loss.backward()
                    optimizer.step()

                # evaluation every epoch ===================================================
                with torch.no_grad():
                    dice, accuracy, iou = eval_net_few_shot(net, get_representation, val_loader, split[1], False, '', None)

                scheduler.step()

                if iou > best_iou:
                    best_iou = iou
                    best_iou_epoch = epoch
                    with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_best.pth'), 'wb') as f:
                        pickle.dump({'net': copy.deepcopy(net).eval().requires_grad_(False).cpu()}, f)

                # log ======================================================================
                epoch_loss = epoch_loss / len(train_loader)
                pbar.set_description(' '.join([
                    f'[{i_train}/{n_train}]',
                    f'[{epoch}/{epochs}]',
                    f'loss: {epoch_loss:.3f}',
                    f'iou: {iou.item():.3f}',
                    f'best_iou: {best_iou.item():.3f}',
                    f'b_iou_e: {best_iou_epoch:.3f}'
                ]))
                pbar.update(1)

                writer.add_scalar('epoch_loss', epoch_loss, global_step=epoch)
                writer.add_scalar('lr', optimizer.param_groups[0]['lr'], global_step=epoch)
                writer.add_scalar('dice', dice, global_step=epoch)
                writer.add_scalar('accu', accuracy, global_step=epoch)
                writer.add_scalar('iou', iou, global_step=epoch)

                # visualization ============================================================
                # save_image(val_visualization, os.path.join(folder_dir, 'visualization', f'{i_train}_{epoch}.png'), nrow=2*batch_size)

        # test part of i train
        with torch.no_grad():
            with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_best.pth'), 'rb') as f:
                net = pickle.load(f)['net'].eval().requires_grad_(False).cuda()

            dice, accuracy, iou = eval_net_few_shot(net, get_representation, test_loader, split[2], True, folder_dir, i_train)

            # save_image(test_visualization, os.path.join(folder_dir, 'visualization', f'{i_train}_test.png'), nrow=2*batch_size)
        
        with open(os.path.join(folder_dir, 'INFO.txt'), 'a') as log:
            log.write(f'train [{i_train}] Best iou of validation: [{best_iou.item():.5f}] in Epoch: [{best_iou_epoch}]'+'\n')
            log.write(f'test iou: [{iou.item():.5f}]'+'\n')

        with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
            n = 0
            for i in [dice, accuracy, iou]:
                log.write(str(i)+'\n')
                total_metric[n] += i.item()
                n += 1
            log.write('=================================\n')

    with open(os.path.join(folder_dir, 'INFO.txt'), 'a') as log:
        log.write(f'Mean test iou: [{total_metric[-1]/n_train:.4f}]'+'\n')

    with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
        log.write('Mean:\n')
        for metric in total_metric:
            log.write(str(metric/n_train)+'\n')


if __name__ == "__main__":
    # d = ISIC2018Task1_w_mask('save/00001-images-low_shot-kimg10000-batch32-color-translation-cutout')
    # l = DataLoader(d, 8, True, num_workers=3, pin_memory=True)
    # l = enumerate(l)
    # i, (w,img,  m) = l.__next__()

    # print(img.shape, img.dtype)
    # save_image(img, 'img.png')

    train()
#----------------------------------------------------------------------------
