import os
import pickle
import dnnlib
import legacy
import torch
from torch.utils.data.dataloader import DataLoader, Dataset

from segmentation.dataset import Handmask, Triplet
from segmentation.utils import *


class WsAndImage(Dataset):
    def __init__(self, test_dir, sample=[], length=128):
        self.test_dir = test_dir

        self.ws_dir = os.path.join(test_dir, f'ws')
        self.image_dir = os.path.join(test_dir, f'images')
        self.mask_dir = os.path.join(test_dir, f'masks')

        self.filenames = os.listdir(self.image_dir)
        self.filenames.sort()

        self.filenames_ = []
        for s in sample:
            self.filenames_ += self.filenames[s*length: (s+1)*length]
        self.filenames = self.filenames_

    def __len__(self):
        return len(self.filenames)

    def __getitem__(self, index):
        filename = self.filenames[index]

        with open(f"{os.path.join(self.ws_dir, filename.split('.')[0])}.pkl", 'rb') as f:
            w = pickle.load(f)['w']

        image = Image.open(f'{self.image_dir}/{filename}').convert('L')
        # [C,H,W] and dynamic range [0,255], W & H must match G output resolution
        image = np.array(image)[None, :]/255
        image = torch.tensor(image, dtype=torch.float32)

        mask = Image.open(os.path.join(self.mask_dir, filename)).convert('L')
        mask = torch.tensor(np.array(mask), dtype=torch.long)

        return w, image, mask


def eval_dice_3d_ws(gan, get_representation, resize_repre,  net, loader, is_test, folder_dir, i_train, multi_class, ignore_bg=False, condition=2):
    assert ignore_bg
    net.eval()
    count = 0
    nSample = 0
    image_sample = []
    g_image_sample = []
    mask_sample = []
    pred_sample = []
    # nSample * parts
    overall = []
    for w, image, mask in tqdm(loader, total=len(loader), desc='Validation', unit='batch', leave=False):
        nSample += image.shape[0]

        w, image, mask = w.cuda(), image.cuda(), mask.cuda()
        image = (image-0.5)/0.5
        g_image, representation = get_representation(w, resize_repre)

        pred = net(representation)
        pred = F.interpolate(pred, (160,160), mode='nearest').argmax(1)
        g_image = F.interpolate(g_image, (160,160), mode='bilinear')

        image_sample.append(image)
        g_image_sample.append(g_image)
        mask_sample.append(mask)
        pred_sample.append(pred)

        if nSample==128:
            image_sample = torch.cat(image_sample)
            g_image_sample = torch.cat(g_image_sample)
            mask_sample = torch.cat(mask_sample)
            pred_sample = torch.cat(pred_sample)

            # # use on ds
            # if condition==1:
            #     pred_sample = changeLabel(pred_sample)
            # elif condition==0:
            #     mask_sample = changeLabel(mask_sample)

            classwise = compute_accuracy_brain(mask_sample.cpu().numpy(), pred_sample.cpu().numpy())
            overall.append(classwise)

            if is_test:
                image_sample = (image_sample+1)/2
                image_sample = image_sample.repeat(1,3,1,1)
                g_image_sample = (g_image_sample+1)/2
                g_image_sample = g_image_sample.repeat(1,3,1,1)
                pred_sample = visualize(changeLabel(pred_sample))
                mask_sample = visualize(changeLabel(mask_sample))

                save_image(
                    torch.cat([image_sample.unsqueeze(1), g_image_sample.unsqueeze(1), mask_sample.unsqueeze(1), pred_sample.unsqueeze(1)], dim=1).view(-1, 3, image_sample.shape[2], image_sample.shape[3]),
                    os.path.join(folder_dir, 'visualization', f'test{i_train}_{count}.png'),
                    nrow=4*8,
                    padding=0
                )

            nSample = 0
            count += 1
            image_sample = []
            g_image_sample = []
            mask_sample = []
            pred_sample = []

    overall = np.array(overall)
    overall_sample = overall.mean(1)

    # dice mean, dice std, dice class
    # item       item      array
    return overall_sample.mean(), overall_sample.std(), overall.mean(0)


def test_ws(
    test_dir,
    gan_dir,
    seg_dir,
    out_dir,
    n_train=3,
    split=[72,8,20],
    length=128,
    w_steps=1000,
    combine=True,
    multi_class=True,
):

    import warnings
    warnings.filterwarnings('ignore')

    condition = 0 if 'OASIS' in test_dir else 1
    condition_name = 'OASIS' if 'OASIS' in test_dir else 'CANDI'
    seg_name = 'L' if 'L' in seg_dir else 'BiFPN'

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
        print(f'make output dir: {out_dir}')
    id = len(os.listdir(out_dir))
    out_dir = os.path.join(out_dir, f"exp{id}-{condition_name}-{gan_dir.split('/')[1][:5]}-{seg_name}")

    assert not os.path.exists(out_dir)
    os.mkdir(out_dir)
    os.mkdir(os.path.join(out_dir, 'visualization'))
    print('make dir done!')

    # init generator ===============================================================
    with dnnlib.util.open_url(gan_dir) as f:
        snapshot_data = legacy.load_network_pkl(f)
        G = snapshot_data['G_ema'].eval().requires_grad_(False).cuda()
        del snapshot_data
    get_representation = init_get_representation(G, [4, 8, 16, 32, 64, 128, 256], 256, 'const')
    print('load generator done!')

    with open(seg_dir, 'rb') as f:
        Seg = pickle.load(f)['net'].eval().requires_grad_(False).cuda()
    print('load Seg done!')

    # out_dir = os.path.join(out_dir, f'exp{id}-{shots}shot-{threshold}thres-{which_net}')
    # init dataaset ===============================================================
    # split 2:1 first, then choose n shot for training, ensure same test set with U-Net training
    split_length = len(os.listdir(os.path.join(test_dir, 'images')))//128
    split = [int(s) for s in split]
    split = [split_length*s//sum(split) for s in split]
    split[1] = split_length - split[0] - split[2]
    print(f'dataset split: {split}')

    out_ch = 17
    sum_dice = 0
    sum_dice_class = [0]*(out_ch-1)

    import random
    for i_train in range(n_train):
        torch.manual_seed(i_train)
        random.seed(28*i_train)
        samples = list(range(split_length))
        random.shuffle(samples)

        test_loader = DataLoader(
            WsAndImage(
                test_dir=test_dir,
                sample=samples[split[0]+split[1]:split_length],
            ),
            batch_size=4, drop_last=False, shuffle=False, num_workers=4, pin_memory=True
        )

        print(f'test: {samples[split[0]+split[1]:split_length]}')

        chn = 1
        print(f'image channel: {chn}')

        with torch.no_grad():
            dice, dice_std, dice_class = eval_dice_3d_ws(G, get_representation, seg_name=='L', Seg, test_loader, True, out_dir, i_train, multi_class, True, condition)

            for d in dice_class:
                print(f'{d:.3f}', end=', ')
            print(f'\ndice: {dice:.3f} std: {dice_std:.3f}\n')

        with open(os.path.join(out_dir, 'INFO_ds.txt'), 'a') as log:
            log.write(f'mdice: [{dice:.5f}]\n')

        with open(os.path.join(out_dir, 'test_INFO_ds.txt'), 'a') as log:
            # mdice
            for d in dice_class:
                log.write(f'{d:.3f}, ')
            log.write('\n')
            log.write(f'{dice:.3f}\n')

            sum_dice += dice
            sum_dice_class = [x+y for x,y in zip(sum_dice_class, dice_class)]

            log.write('=================================\n')

    with open(os.path.join(out_dir, 'INFO_ds.txt'), 'a') as log:
        log.write(f'Mean test miou: [{sum_dice/n_train}]\n')

    with open(os.path.join(out_dir, 'test_INFO_ds.txt'), 'a') as log:
        log.write('Mean:\n')
        sum_dice_class = [x/n_train for x in sum_dice_class]

        # mdice
        for d in sum_dice_class:
            log.write(f'{d:.3f}, ')
        log.write('\n')
        log.write(f'{sum_dice/n_train:.3f}\n')

if __name__ == "__main__":
    os.environ["CUDA_VISIBLE_DEVICES"] = '7'
    # test_ws(
    #     test_dir='data/CANDI-128-160',
    #     gan_dir='save/00010-images-mirror-low_shot-kimg25000-batch32-color-translation-cutout/network-snapshot-best.pkl',
    #     seg_dir='save/00010-images-mirror-low_shot-kimg25000-batch32-color-translation-cutout/handmark_0/Seg_BiFPN_128s_cbTrue_[4, 8, 16, 32, 64, 128, 256].pkl',
    #     out_dir='save_seg/ws',
    #     n_train=1,
    # )
    test_ws(
        test_dir='data/OASIS-128-160',
        gan_dir='save/00011-GAN_OASIS-mirror-low_shot-kimg25000-batch32-color-translation-cutout/network-snapshot-best.pkl',
        seg_dir='save/00011-GAN_OASIS-mirror-low_shot-kimg25000-batch32-color-translation-cutout/handmark_0_old/Seg_BiFPN_128s_cbTrue_[4, 8, 16, 32, 64, 128, 256].pkl',
        out_dir='save_seg/ws',
        split=[355,1,1],
        n_train=1,
    )
    # get_UNet(
    #     test_dir='data/OASIS-128-160-norm',
    #     out_dir='save_seg/supervised/exp22-74shot-reso160-augTrue'
    # )
#----------------------------------------------------------------------------
