import copy
import os
import pickle
import shutil
import random

import click
import torch
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from segmentation.dataset import US
from segmentation.utils import *


@click.command()
# train
@click.option('--n_train',                  help='N train in a row', type=int, default=3, show_default=True)
@click.option('--data_dir',                 help='Training data save', required=True, metavar='DIR')
@click.option('--out_dir',                  help='Training output save', metavar='DIR', default='.')
@click.option('--epochs',                   help='Number of epochs', type=int, default=300, show_default=True)
@click.option('--batch_size',               help='Batch size for training', type=int, default=10, show_default=True)
@click.option('--resolution',               help='Resolution of image', type=int, default=256, show_default=True)
@click.option('--split',                    help='Split of training, validation, test set', nargs=3, default=[7, 1, 2])
@click.option('--n_class',                  help='', type=int, default=1, show_default=True)
@click.option('--multi_class',              help='multi_class?', is_flag=True)
@click.option('--combine',                  help='combine 3,42 2,41?', is_flag=True)
@click.option('--aug',                      help='combine 3,42 2,41?', is_flag=True)
@click.option('--shot',                     help='how many shot training', type=int, default=500, show_default=True)
@click.option('--length',                   help='how many slices each sample', type=int, default=64, show_default=True)
@click.option('--w_steps',                  help='how many w project step', type=int, default=1000, show_default=True)
def train(
    n_train,
    epochs,
    batch_size,
    resolution,
    split,
    shot,
    multi_class,
    n_class,
    combine,
    aug,
    data_dir,
    out_dir,
    length,
    w_steps,
):

    # make dir =====================================================================
    # folder_dir = make_save_dir('./_/supervised', 'U-Net', shot, '')

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
        print(f'make output dir: {out_dir}')
    id = len(os.listdir(out_dir))
    folder_dir = os.path.join(out_dir, f'exp{id}-{shot}shot-reso{resolution}-aug{aug}')

    assert not os.path.exists(folder_dir)
    os.mkdir(folder_dir)
    os.mkdir(os.path.join(folder_dir, 'checkpoint'))
    os.mkdir(os.path.join(folder_dir, 'visualization'))
    os.mkdir(os.path.join(folder_dir, 'runs'))
    print('make dir done!')
    # init dataaset ===============================================================
    # dataset = Triplet(data_dir, multi_class)

    # # split 2:1 first, then choose n shot for training, ensure same test set with U-Net training
    # split = [dataset.__len__()*s//sum(split) for s in split]
    # split[1] = dataset.__len__() - split[0] - split[2]
    # print(f'dataset split: {split}')

    # split 2:1 first, then choose n shot for training, ensure same test set with U-Net training
    split_length = len(os.listdir(os.path.join(data_dir, 'images')))
    split = [int(s) for s in split]
    split = [split_length*s//sum(split) for s in split]
    split[1] = split_length - split[0] - split[2]
    print(f'dataset split: {split}')

    chn = 1
    out_ch = 1
    print(f'image channel: {chn}')

    # init parameters ===============================================================
    sum_evals = 0
    criterion = nn.BCEWithLogitsLoss()

    # train for n_train ===============================================================
    for i_train in range(n_train):
        # seed = i_train*28
        torch.manual_seed(i_train)
        random.seed(28*i_train)

        # out_ch = (3 if combine else 5) if multi_class else 1
        net = get_network('U-Net', 1, out_ch).cuda()

        # train_loader, val_loader, test_loader = get_dataloader(shot, batch_size, dataset, split, i_train)
        samples = list(range(split_length))
        random.shuffle(samples)

        train_loader = DataLoader(
            US(
                data_dir=data_dir,
                aug=aug,
                sample=samples[:shot],
            ),
            batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True
        )
        print('train set len: ', train_loader.__len__())
        val_loader = DataLoader(
            US(
                data_dir=data_dir,
                aug=False,
                sample=samples[split[0]:split[0]+split[1]],
            ),
            batch_size=5, shuffle=False, num_workers=4, pin_memory=True, drop_last=False
        )
        print('val set len: ', val_loader.__len__())
        test_loader = DataLoader(
            US(
                data_dir=data_dir,
                aug=False,
                sample=samples[split[0]+split[1]:split_length],
            ),
            batch_size=5, shuffle=False, num_workers=4, pin_memory=True, drop_last=False
        )
        print('test set len: ', test_loader.__len__())

        optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999))
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=1)

        # optimizer = optim.Adam(net.parameters(), lr=0.0002, betas=(0.9, 0.99))
        # scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=3, min_lr=0.0001)

        best_dice = -1
        best_dice_epoch = -1

        writer = SummaryWriter(os.path.join(folder_dir, 'runs', f'{i_train}_train'))

        with tqdm(range(epochs), initial=0, dynamic_ncols=True, smoothing=0.01, ascii=True) as pbar:
            for epoch in range(epochs):
                net.train()
                epoch_loss = 0
                
                for iter, (image, mask) in enumerate(train_loader):
                    optimizer.zero_grad()

                    image = image.cuda()
                    mask = mask.cuda()

                    # if chn==1:
                    #     save_image(torch.cat([(image.repeat(1,3,1,1)+1)/2, visualize(mask)]), 'show_supervised.png', nrow=batch_size)
                    # elif chn==3:
                    #     save_image(torch.cat([(image+1)/2, visualize(mask)]), 'show_supervised.png', nrow=batch_size)


                    image = (image-0.5)/0.5

                    pred = net(image)

                    loss = criterion(pred, mask)
                    epoch_loss += loss.item()

                    loss.backward()
                    optimizer.step()

                # evaluation every epoch ===================================================
                with torch.no_grad():
                    dice = eval_dice_2d(net, val_loader, False, '', None)[0].item()

                scheduler.step(dice)

                if dice > best_dice:
                    best_dice = dice
                    best_dice_epoch = epoch
                    with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_best.pth'), 'wb') as f:
                        pickle.dump({'net': copy.deepcopy(net).eval().requires_grad_(False).cpu()}, f)

                # log ======================================================================
                epoch_loss = epoch_loss / len(train_loader)
                pbar.set_description(' '.join([
                    f'[{i_train}/{n_train}]',
                    f'[{epoch}/{epochs}]',
                    f'loss: {epoch_loss:.3f}',
                    f'mdice: {dice:.3f}',
                    f'best_dice: {best_dice:.3f}',
                    f'b_dice_e: {best_dice_epoch:.3f}'
                ]))
                pbar.update(1)

                writer.add_scalar('epoch_loss', epoch_loss, global_step=epoch)
                writer.add_scalar('lr', optimizer.param_groups[0]['lr'], global_step=epoch)
                writer.add_scalar('mdice', dice, global_step=epoch)
                # visualization ============================================================
                # save_image(val_visualization, os.path.join(folder_dir, 'visualization', f'{i_train}_{epoch}.png'), nrow=2*batch_size)

        # test part of i train
        with torch.no_grad():
            with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_best.pth'), 'rb') as f:
                net = pickle.load(f)['net'].eval().requires_grad_(False).cuda()

            evaluations = eval_dice_2d(net, test_loader, True, folder_dir, i_train)

            print(f'evaluations')

        with open(os.path.join(folder_dir, 'INFO.txt'), 'a') as log:
            log.write(f'train [{i_train}] Best dice of validation: [{best_dice:.5f}] in Epoch: [{best_dice_epoch}]'+'\n')
            for i, e in enumerate(['dice', 'accu', 'iou', 'spec', 'sens', 'ppv', 'npv']):
                log.write(f'{evaluations[i].item():.5f}, ')
            log.write('\n')

        with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
            for i, e in enumerate(['dice', 'accu', 'iou', 'spec', 'sens', 'ppv', 'npv']):
                log.write(f'{evaluations[i].item():.5f}, ')
            log.write('\n')

            sum_evals += evaluations
            log.write('=================================\n')

    with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
        log.write('Mean:\n')
        # mdice
        evaluations = sum_evals/n_train
        for i, e in enumerate(['dice', 'accu', 'iou', 'spec', 'sens', 'ppv', 'npv']):
            log.write(f'{evaluations[i].item():.3f}, ')
        log.write('\n')

if __name__ == "__main__":
    train()
#----------------------------------------------------------------------------
