import copy
import os
import pickle
import shutil
import random

import click
import torch
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from segmentation.dataset import MR_slice
from segmentation.utils import *


@click.command()
# train
@click.option('--n_train',                  help='N train in a row', type=int, default=3, show_default=True)
@click.option('--data_dir',                 help='Training data save', required=True, metavar='DIR')
@click.option('--out_dir',                  help='Training output save', metavar='DIR', default='.')
@click.option('--epochs',                   help='Number of epochs', type=int, default=300, show_default=True)
@click.option('--batch_size',               help='Batch size for training', type=int, default=10, show_default=True)
@click.option('--resolution',               help='Resolution of image', type=int, default=256, show_default=True)
@click.option('--split',                    help='Split of training, validation, test set', nargs=3, default=[7, 1, 2])
@click.option('--n_class',                  help='', type=int, default=16, show_default=True)
@click.option('--multi_class',              help='multi_class?', is_flag=True)
@click.option('--combine',                  help='combine 3,42 2,41?', is_flag=True)
@click.option('--aug',                      help='combine 3,42 2,41?', is_flag=True)
@click.option('--shot',                     help='how many shot training', type=int, default=500, show_default=True)
@click.option('--length',                   help='how many slices each sample', type=int, default=128, show_default=True)
@click.option('--w_steps',                  help='how many w project step', type=int, default=1000, show_default=True)
def train(
    n_train,
    epochs,
    batch_size,
    resolution,
    split,
    shot,
    multi_class,
    n_class,
    combine,
    aug,
    data_dir,
    out_dir,
    length,
    w_steps,
):

    # make dir =====================================================================
    # folder_dir = make_save_dir('./_/supervised', 'U-Net', shot, '')

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
        print(f'make output dir: {out_dir}')
    id = len(os.listdir(out_dir))
    folder_dir = os.path.join(out_dir, f'exp{id}-{shot}shot-reso{resolution}-aug{aug}')
    if 'my' in data_dir:
        folder_dir += '-my'

    assert not os.path.exists(folder_dir)
    os.mkdir(folder_dir)
    os.mkdir(os.path.join(folder_dir, 'visualization'))
    os.mkdir(os.path.join(folder_dir, 'checkpoint'))
    os.mkdir(os.path.join(folder_dir, 'runs'))
    print('make dir done!')

    split_length = len(os.listdir(os.path.join(data_dir, 'images')))//128
    samples = list(range(split_length))

    if 'CANDI' in data_dir:
        test_dir = 'data/OASIS-128-160-norm'
    elif 'OASIS' in data_dir:
        test_dir = 'data/CANDI-128-160-norm'

    split_length_ = len(os.listdir(os.path.join(test_dir, 'images')))//128
    samples_ = list(range(split_length_))

    chn = 1
    out_ch = n_class + 1
    condition = 0 if 'OASIS' in test_dir else 1
    sum_dice = 0
    sum_dice_class = [0]*(out_ch-1)
    print(f'image channel: {chn}')

    print(f'train on {data_dir}')
    print(f'test on {test_dir}')

    criterion = nn.CrossEntropyLoss()

    # train for n_train ===============================================================
    for i_train in range(n_train):
        # seed = i_train*28
        torch.manual_seed(i_train)
        random.seed(28*i_train)

        # out_ch = (3 if combine else 5) if multi_class else 1
        net = UNet(chn, out_ch).cuda()

        # train_loader, val_loader, test_loader = get_dataloader(shot, batch_size, dataset, split, i_train)

        train_loader = DataLoader(
            MR_slice(
                data_dir=data_dir,
                aug=aug,
                sample=samples
            ),
            batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True
        )
        print('train set len: ', train_loader.__len__())

        test_loader = DataLoader(
            MR_slice(
                data_dir=test_dir,
                aug=False,
                sample=samples_
            ),
            batch_size=batch_size, drop_last=False, shuffle=False, num_workers=4, pin_memory=True
        )
        print(f'test len: {test_loader.__len__()}')

        optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999))
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)

        writer = SummaryWriter(os.path.join(folder_dir, 'runs', f'{i_train}_train'))

        with tqdm(range(epochs), initial=0, dynamic_ncols=True, smoothing=0.01, ascii=True) as pbar:
            for epoch in range(epochs):                
                for iter, (_, image, mask) in enumerate(train_loader):

                    image = image.cuda()
                    mask = mask.cuda()
                    image = (image-0.5)/0.5

                    pred = net(image)

                    loss = criterion(pred, mask)

                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

                    # log ======================================================================
                    pbar.set_description(' '.join([
                        f'[{i_train}/{n_train}]',
                        f'[{epoch}/{epochs}]',
                        f'loss: {loss.item():.3f}',

                    ]))

                    writer.add_scalar('loss', loss.item(), global_step=epoch)
                    writer.add_scalar('lr', optimizer.param_groups[0]['lr'], global_step=epoch)
                
                scheduler.step()
                pbar.update(1)

        with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_best.pth'), 'wb') as f:
            pickle.dump({'net': copy.deepcopy(net).eval().requires_grad_(False).cpu()}, f)
        
        dice, dice_std, dice_class = eval_dice_3d(net, test_loader, True, folder_dir, i_train, multi_class, True, condition)

        for d in dice_class:
            print(f'{d:.3f}', end=', ')
        print(f'\ndice: {dice:.3f} std: {dice_std:.3f}\n')

        with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
            # mdice
            for d in dice_class:
                log.write(f'{d:.3f}, ')
            log.write('\n')
            log.write(f'{dice:.3f}\n')

            sum_dice += dice
            sum_dice_class = [x+y for x,y in zip(sum_dice_class, dice_class)]

            log.write('=================================\n')

        save_image(torch.cat([(image.repeat(1,3,1,1)+1)/2, visualize(mask), visualize(pred.argmax(1))]), f'{folder_dir}/show.png', nrow=batch_size)

    with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
        log.write('Mean:\n')
        sum_dice_class = [x/n_train for x in sum_dice_class]

        # mdice
        for d in sum_dice_class:
            log.write(f'{d:.3f}, ')
        log.write('\n')
        log.write(f'{sum_dice/n_train:.3f}\n')
        log.write('=================================\n')



if __name__ == "__main__":
    train()
#----------------------------------------------------------------------------
