import os
import click
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
from torch_utils import misc
from torchvision import transforms
from torchvision.utils import save_image
from tqdm import tqdm

from segmentation.network import *
from segmentation.dataset import CANDI_label
from medpy.metric.binary import dc
from torch.nn import PairwiseDistance


def make_save_dir(save_dir, net_name, shot, representation_layers):
    root = os.path.join('save_seg', os.path.split(save_dir)[1])
    if not os.path.exists(root):
        os.makedirs(root)

    id = len(os.listdir(root))

    folder_dir = os.path.join(root, f'exp{id}-{shot}shot-{net_name}-{representation_layers}')
    assert not os.path.exists(folder_dir)

    os.mkdir(folder_dir)
    os.mkdir(os.path.join(folder_dir, 'checkpoint'))
    os.mkdir(os.path.join(folder_dir, 'visualization'))
    os.mkdir(os.path.join(folder_dir, 'runs'))

    return folder_dir


def get_dataloader(shot, batch_size, dataset, split, i_train):
    train_set, val_set, test_set = torch.utils.data.random_split(dataset, split, torch.Generator().manual_seed(28+10*i_train))
    train_set, _ = torch.utils.data.random_split(train_set, [shot, split[0]-shot])


    train_loader = DataLoader(train_set, batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
    val_loader = DataLoader(val_set, 5, shuffle=False, num_workers=4, pin_memory=True)
    test_loader = DataLoader(test_set, 5, shuffle=False, num_workers=4, pin_memory=True)

    return train_loader, val_loader, test_loader


def get_network(name, in_ch, out_ch):
    if name in ['S', 'M', 'L']:
        return SegNet_S(in_ch, out_ch, name)
    elif name=='U-Net':
        return UNet(in_ch, out_ch=out_ch)
    elif name=='BiFPN':
        return BiFPN(out_ch=out_ch, n_block=7)


def init_get_representation(G, which_repre_layers, resolution=256, noise_mode='const'):
    img_resolution_log2 = int(np.log2(resolution))
    block_resolutions = [2 ** i for i in range(2, img_resolution_log2 + 1)]
    
    def get_representation(ws, resize=True):
        if ws.shape[1]==1:
            ws = ws.repeat([1, G.synthesis.num_ws, 1])
        misc.assert_shape(ws, [None, G.synthesis.num_ws, G.synthesis.w_dim])
        ws = ws.to(torch.float32)

        w_idx = 0
        block_ws = []
        for res in block_resolutions:
            block = getattr(G.synthesis, f'b{res}')
            block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
            w_idx += block.num_conv

        representation = []
        x = img = None
        for res, cur_ws in zip(block_resolutions, block_ws):
            block = getattr(G.synthesis, f'b{res}')
            x, img = block(x, img, cur_ws, noise_mode=noise_mode)
            if x.shape[-1] in which_repre_layers:
                if resize:
                    representation += [transforms.Resize((resolution, resolution), Image.BILINEAR)(x.detach())]
                else:
                    representation += [x.detach()]

        if resize:
            return img, torch.cat(representation, dim=1)
        else:
            for i in range(len(representation)):
                representation[i] = representation[i].to(torch.float32)

            return img, representation

    return get_representation


def evaluate_bce(seg, gt, eps=1e-6, parts=None):
    tp = torch.sum((seg == 1) & (gt == 1), [2,3])
    tn = torch.sum((seg == 0) & (gt == 0), [2,3])
    fn = torch.sum((seg == 0) & (gt == 1), [2,3])
    fp = torch.sum((seg == 1) & (gt == 0), [2,3])

    evaluations = torch.tensor([
        torch.sum((2*tp + eps)/(2*tp + fn + fp + eps), 0),# dice
        torch.sum((tp+tn + eps) / (tp+tn+fn+fp+eps), 0),# accu
        torch.sum((tp + eps)/(tp+fn+fp+eps), 0),# iou
        torch.sum(tn/(tn+fp+eps), 0),# spec
        torch.sum(tp/(tp+fn+eps), 0),# sens
        torch.sum(tp/(tp+fp+eps), 0),# ppv
        torch.sum(tn/(tn+fn+eps), 0)# npv
    ])

    return evaluations


# def evaluate_ce(seg, gt):
#     C = seg.shape[1]
#     sum_dice, sum_accuracy, sum_iou = 0, 0, 0
#     for i in range(C):
#         dice, accuracy, iou = evaluate_bce(seg[:,i].unsqueeze(1), gt[:, i].unsqueeze(1))

#         sum_dice     += dice
#         sum_accuracy += accuracy
#         sum_iou      += iou

#     return sum_dice/C, sum_accuracy/C, sum_iou/C


def evaluate_ce(seg, gt, eps=1e-6, parts=5, ignore_bg=False, bg_label=0):
    # B,H,W
    B = seg.shape[0]
    seg = seg.argmax(dim=1)

    total = eps
    wdice = eps
    wacc  = eps
    wiou  = eps
    dices = []
    accus = []
    ious  = []

    for i in range(parts):
        if ignore_bg and i==bg_label:
            continue
        seg_part = torch.where(seg==i, 1, 0)
        gt_part = torch.where(gt==i, 1, 0)

        tp = torch.sum((seg_part == 1) & (gt_part == 1), [1,2])
        tn = torch.sum((seg_part == 0) & (gt_part == 0), [1,2])
        fn = torch.sum((seg_part == 0) & (gt_part == 1), [1,2])
        fp = torch.sum((seg_part == 1) & (gt_part == 0), [1,2]) # B

        dice = (2*tp + eps)/(2*tp + fn + fp + eps) 
        accuracy = (tp+tn + eps) / (tp+tn+fn+fp+eps)
        iou = (tp + eps)/(tp+fn+fp+eps)
        
        weight = torch.sum(gt_part, [1,2]) # B
        wdice += weight * dice
        wacc  += weight * accuracy
        wiou  += weight * iou 

        dices.append(dice.sum(0, keepdim=True))
        accus.append(accuracy.sum(0, keepdim=True))
        ious.append(iou.sum(0, keepdim=True))

        total += weight

    wdice = torch.sum(wdice / total, 0)
    wacc  = torch.sum(wacc  / total, 0)
    wiou  = torch.sum(wiou  / total, 0)

    dices = torch.cat(dices, dim=0)
    accus = torch.cat(accus, dim=0)
    ious = torch.cat(ious, dim=0)

    return dices, accus, ious, wdice, wacc, wiou


def eval_net_few_shot(net, get_representation, loader, is_test, folder_dir, i_train, resize=True, multi_class=False, ignore_bg=False, bg_label=0):
    net.eval()
    evaluate = evaluate_ce if multi_class else evaluate_bce
    sum_dice, sum_accuracy, sum_iou, sum_wdice, sum_wacc, sum_wiou = 0,0,0,0,0,0
    to_save = []
    count = 0
    nSample = 0
    for w, image, mask in tqdm(loader, total=len(loader), desc='Validation', unit='batch', leave=False):
        nSample += image.shape[0]
        w, image, mask = w.cuda(), image.cuda(), mask.cuda()

        recon, representation = get_representation(w, resize)

        pred = net(representation)
        
        if multi_class:
            dice, accuracy, iou, wdice, wacc, wiou = evaluate(pred, mask, parts=pred.shape[1], ignore_bg=ignore_bg, bg_label=bg_label)
            sum_wdice    += wdice
            sum_wacc     += wacc
            sum_wiou     += wiou
        else:
            dice, accuracy, iou = evaluate(pred, mask, ignore_bg=ignore_bg, bg_label=bg_label)

        sum_dice     += dice
        sum_accuracy += accuracy
        sum_iou      += iou

        if is_test:
            recon = (recon + 1)/2
            if multi_class:
                # B, 1, H, W -> B, 3, H, W
                image = image.repeat(1,3,1,1)
                recon = recon.repeat(1,3,1,1)
                # B, 5, H, W -> B, 3, H, W
                pred = visualize(pred.argmax(1))
                mask = visualize(mask)
                
                to_save.append(torch.cat([
                    torch.cat([image.unsqueeze(1), mask.unsqueeze(1)], dim=1).view(-1, 3, image.shape[2], image.shape[3]),
                    torch.cat([recon.unsqueeze(1), pred.unsqueeze(1)], dim=1).view(-1, 3, image.shape[2], image.shape[3])
                ], dim=0))
            else:
                pred = torch.where(pred>0, 1.0, 0.0)
                to_save.append(get_save_image(image.detach().cpu(), mask.detach().cpu(), recon.detach().cpu(), pred.detach().cpu()))

            if len(to_save) == 10:
                save_image(torch.cat(to_save, dim=0), os.path.join(folder_dir, 'visualization', f'test{i_train}_{count}.png'), nrow=10)
                to_save = []
                count += 1

    if is_test and len(to_save)>0:
        save_image(torch.cat(to_save, dim=0), os.path.join(folder_dir, 'visualization', f'test{i_train}_{count}.png'), nrow=10)

    return sum_dice/nSample, sum_accuracy/nSample, sum_iou/nSample, sum_wdice/nSample, sum_wacc/nSample, sum_wiou/nSample



def eval_net_supervised(net, loader, is_test, folder_dir, i_train, multi_class, ignore_bg=False, bg_label=0):
    net.eval()
    evaluate = evaluate_ce if multi_class else evaluate_bce
    sum_dice, sum_accuracy, sum_iou, sum_wdice, sum_wacc, sum_wiou = 0,0,0,0,0,0
    to_save = []
    count = 0
    nSample = 0
    for _, image, mask in tqdm(loader, total=len(loader), desc='Validation', unit='batch', leave=False):
        nSample += image.shape[0]
        image, mask = image.cuda(), mask.cuda()
        image = (image-0.5)/0.5

        pred = net(image)
        
        if multi_class:
            dice, accuracy, iou, wdice, wacc, wiou = evaluate(pred, mask, parts=pred.shape[1], ignore_bg=ignore_bg, bg_label=bg_label)
            sum_wdice    += wdice
            sum_wacc     += wacc
            sum_wiou     += wiou
        else:
            dice, accuracy, iou = evaluate(pred, mask, ignore_bg=ignore_bg, bg_label=bg_label)

        sum_dice     += dice
        sum_accuracy += accuracy
        sum_iou      += iou

        if is_test:
            image = (image+1)/2

            if multi_class:
                # B, 1, H, W -> B, 3, H, W
                image = image.repeat(1,3,1,1)
                # B, 5, H, W -> B, 3, H, W
                pred = visualize(pred.argmax(1))
                mask = visualize(mask)
                
                to_save.append(torch.cat([
                    torch.cat([image.unsqueeze(1), mask.unsqueeze(1)], dim=1).view(-1, 3, image.shape[2], image.shape[3]),
                    torch.cat([image.unsqueeze(1), pred.unsqueeze(1)], dim=1).view(-1, 3, image.shape[2], image.shape[3])
                ], dim=0))
            else:
                pred = torch.where(pred>0, 1.0, 0.0)
                to_save.append(get_save_image(image.detach().cpu(), mask.detach().cpu(), image.detach().cpu(), pred.detach().cpu()))

            if len(to_save) == 10:
                save_image(torch.cat(to_save, dim=0), os.path.join(folder_dir, 'visualization', f'test{i_train}_{count}.png'), nrow=10)
                to_save = []
                count += 1

    if is_test and len(to_save)>0:
        save_image(torch.cat(to_save, dim=0), os.path.join(folder_dir, 'visualization', f'test{i_train}_{count}.png'), nrow=10)

    return sum_dice/nSample, sum_accuracy/nSample, sum_iou/nSample, sum_wdice/nSample, sum_wacc/nSample, sum_wiou/nSample


def eval_dice_3d(net, loader, is_test, folder_dir, i_train, multi_class, ignore_bg=False, condition=0):
    assert ignore_bg
    net.eval()
    count = 0
    nSample = 0
    image_sample = []
    mask_sample = []
    pred_sample = []
    # nSample * parts
    overall = []
    for image, mask in tqdm(loader, total=len(loader), desc='Validation', unit='batch', leave=False):
        nSample += image.shape[0]

        image, mask = image.cuda(), mask.cuda()
        image = (image-0.5)/0.5
        pred = net(image).argmax(1)

        image_sample.append(image)
        mask_sample.append(mask)
        pred_sample.append(pred)
        
        if nSample==128:
            image_sample = torch.cat(image_sample)
            mask_sample = torch.cat(mask_sample)
            pred_sample = torch.cat(pred_sample)

            # # use on ds
            # if condition==1:
            #     pred_sample = changeLabel(pred_sample)
            # elif condition==0:
            #     mask_sample = changeLabel(mask_sample)

            classwise = compute_accuracy_brain(mask_sample.cpu().numpy(), pred_sample.cpu().numpy())
            overall.append(classwise)

            if is_test:
                image_sample = (image_sample+1)/2
                image_sample = image_sample.repeat(1,3,1,1)
                if condition:
                    pred_sample = changeLabel(pred_sample)
                    mask_sample = changeLabel(mask_sample)
                pred_sample = visualize(pred_sample)
                mask_sample = visualize(mask_sample)

                save_image(
                    torch.cat([image_sample.unsqueeze(1), mask_sample.unsqueeze(1), pred_sample.unsqueeze(1)], dim=1).view(-1, 3, image_sample.shape[2], image_sample.shape[3]),
                    os.path.join(folder_dir, 'visualization', f'test{i_train}_{count}.png'),
                    nrow=3*8,
                    padding=0
                )

            nSample = 0
            count += 1
            image_sample = []
            mask_sample = []
            pred_sample = []

    overall = np.array(overall)
    overall_sample = overall.mean(1)

    # dice mean, dice std, dice class
    # item       item      array
    return overall_sample.mean(), overall_sample.std(), overall.mean(0)


def eval_dice_2d(net, loader, is_test, folder_dir, i_train):
    net.eval()
    sum_evals = 0
    nSample = 0
    to_save = []
    for image, mask in tqdm(loader, total=len(loader), desc='Validation', unit='batch', leave=False):
        nSample += image.shape[0]

        image, mask = image.cuda(), mask.cuda()
        image = (image-0.5)/0.5
        pred = (torch.sigmoid(net(image))>0.5).to(torch.int8)

        evaluations = evaluate_bce(pred, mask)
        sum_evals += evaluations

        if is_test:
            image = (image+1)/2
            to_save.append(
                torch.cat([image.unsqueeze(1), mask.unsqueeze(1), pred.unsqueeze(1)], dim=1).view(-1, 1, image.shape[2], image.shape[3])
            )

    if is_test:
        save_image(
            torch.cat(to_save),
            os.path.join(folder_dir, 'visualization', f'test{i_train}.png'),
            nrow=3*8
        )

    return sum_evals/nSample


def reset_bn_statis(model, domain_id):
    for name, param in model.state_dict().items():
        if 'bns.{}.running_mean'.format(domain_id) in name:
            param.zero_()
        elif 'bns.{}.running_var'.format(domain_id) in name:
            param.fill_(1)


def get_bn_statis(model, domain_id):
    means = []
    vars = []
    for name, param in model.state_dict().items():
        if 'bns.{}.running_mean'.format(domain_id) in name:
            means.append(param.clone())
        elif 'bns.{}.running_var'.format(domain_id) in name:
            vars.append(param.clone())
    return means, vars


def cal_distance(means_1, means_2, vars_1, vars_2):
    pdist = PairwiseDistance(p=2)
    dis = 0
    for (mean_1, mean_2, var_1, var_2) in zip(means_1, means_2, vars_1, vars_2):
        dis += (pdist(mean_1.reshape(1, mean_1.shape[0]), mean_2.reshape(1, mean_2.shape[0])) + pdist(var_1.reshape(1, var_1.shape[0]), var_2.reshape(1, var_2.shape[0])))
    return dis.item()


def changeLabel(x:torch.tensor):
    # OASIS label to CANDI label
    l = [0,1,2,3,4,5,6,13,14,7,8,9,10,11,12,15,16]
    y = torch.zeros_like(x, dtype=x.dtype, device=x.device)
    for i in range(1, len(l)):
        y[torch.where(x==i)] = l[i]
    return y


def eval_dice_3d_ds(net, loader, is_test, folder_dir, i_train, multi_class, ignore_bg=False, condition=0):
    assert ignore_bg
    '''
    condition = 1
        change pred (OASIS->CANDI)
    condition = 0
        change mask (CANDI->OASIS)
    '''

    means_list = []
    vars_list = []
    # Get running means and running vars of DN
    for i in range(2):
        means, vars = get_bn_statis(net, i)
        means_list.append(means)
        vars_list.append(vars)

    net.eval()

    # Set 'train' mode for computing target BN statistics and better results
    for m in net.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.train()

    count = 0
    nSample = 0
    image_sample = []
    mask_sample = []
    pred_sample = []
    # nSample * parts
    overall = []
    for image, mask in tqdm(loader, total=len(loader), desc='Validation', unit='batch', leave=False):
        nSample += image.shape[0]

        image, mask = image.cuda(), mask.cuda()
        image = (image-0.5)/0.5

        # Get target BN statistics
        _ = net(image, domain=2)
        means, vars = get_bn_statis(net, 2)
        reset_bn_statis(net, 2)

        dis = 99999999
        # Select best result
        for domain_id in range(2):
            new_dis = cal_distance(means, means_list[domain_id], vars, vars_list[domain_id])
            if new_dis < dis:
                pred = net(image, domain=domain_id)
                dis = new_dis

        pred = pred.argmax(1)
        image_sample.append(image)
        mask_sample.append(mask)
        pred_sample.append(pred)
        
        if nSample==128:
            image_sample = torch.cat(image_sample)
            mask_sample = torch.cat(mask_sample)
            pred_sample = torch.cat(pred_sample)

            if condition:
                pred_sample = changeLabel(pred_sample)
            else:
                mask_sample = changeLabel(mask_sample)
            classwise = compute_accuracy_brain(mask_sample.cpu().numpy(), pred_sample.cpu().numpy())
            overall.append(classwise)

            if is_test:
                image_sample = (image_sample+1)/2
                image_sample = image_sample.repeat(1,3,1,1)
                pred_sample = visualize(pred_sample)
                mask_sample = visualize(mask_sample)

                save_image(
                    torch.cat([image_sample.unsqueeze(1), mask_sample.unsqueeze(1), pred_sample.unsqueeze(1)], dim=1).view(-1, 3, image_sample.shape[2], image_sample.shape[3]),
                    os.path.join(folder_dir, 'visualization_ds', f'test{i_train}_{count}.png'),
                    nrow=3*8
                )
            else:
                if count==0:
                    image_sample = (image_sample+1)/2
                    image_sample = image_sample.repeat(1,3,1,1)
                    pred_sample = visualize(pred_sample)
                    mask_sample = visualize(mask_sample)

                    save_image(
                        torch.cat([image_sample.unsqueeze(1), mask_sample.unsqueeze(1), pred_sample.unsqueeze(1)], dim=1).view(-1, 3, image_sample.shape[2], image_sample.shape[3]),
                        os.path.join(folder_dir, f'val.png'),
                        nrow=3*8
                    )                    

            nSample = 0
            count += 1
            image_sample = []
            mask_sample = []
            pred_sample = []

    overall = np.array(overall)
    overall_sample = overall.mean(1)

    # dice mean, dice std, dice class
    # item       item      array
    return overall_sample.mean(), overall_sample.std(), overall.mean(0)


def eval_dice_3d_myds(net, loader, is_test, folder_dir, i_train, myds, ignore_bg=False, condition=0):
    assert ignore_bg
    '''
    condition = 1
        change pred (OASIS->CANDI)
    condition = 0
        change mask (CANDI->OASIS)
    '''

    net.eval()

    # Set 'train' mode for computing target BN statistics and better results
    for m in net.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.train()

    count = 0
    nSample = 0
    image_sample = []
    mask_sample = []
    pred_sample = []
    # nSample * parts
    overall = []
    for image, mask in tqdm(loader, total=len(loader), desc='Validation', unit='batch', leave=False):
        nSample += image.shape[0]

        image, mask = image.cuda(), mask.cuda()
        image = (image-0.5)/0.5

        # Get target BN statistics
        pred = net(image, domain=2, myds=True)

        pred = pred.argmax(1)
        image_sample.append(image)
        mask_sample.append(mask)
        pred_sample.append(pred)
        
        if nSample==128:
            image_sample = torch.cat(image_sample)
            mask_sample = torch.cat(mask_sample)
            pred_sample = torch.cat(pred_sample)

            if condition:
                pred_sample = changeLabel(pred_sample)
            else:
                mask_sample = changeLabel(mask_sample)
            classwise = compute_accuracy_brain(mask_sample.cpu().numpy(), pred_sample.cpu().numpy())
            overall.append(classwise)

            if is_test:
                image_sample = (image_sample+1)/2
                image_sample = image_sample.repeat(1,3,1,1)
                pred_sample = visualize(pred_sample)
                mask_sample = visualize(mask_sample)

                save_image(
                    torch.cat([image_sample.unsqueeze(1), mask_sample.unsqueeze(1), pred_sample.unsqueeze(1)], dim=1).view(-1, 3, image_sample.shape[2], image_sample.shape[3]),
                    os.path.join(folder_dir, 'visualization_myds', f'test{i_train}_{count}.png'),
                    nrow=3*8
                )
            else:
                if count==0:
                    image_sample = (image_sample+1)/2
                    image_sample = image_sample.repeat(1,3,1,1)
                    pred_sample = visualize(pred_sample)
                    mask_sample = visualize(mask_sample)

                    save_image(
                        torch.cat([image_sample.unsqueeze(1), mask_sample.unsqueeze(1), pred_sample.unsqueeze(1)], dim=1).view(-1, 3, image_sample.shape[2], image_sample.shape[3]),
                        os.path.join(folder_dir, f'val.png'),
                        nrow=3*8
                    )                    

            nSample = 0
            count += 1
            image_sample = []
            mask_sample = []
            pred_sample = []

    overall = np.array(overall)
    overall_sample = overall.mean(1)

    # dice mean, dice std, dice class
    # item       item      array
    return overall_sample.mean(), overall_sample.std(), overall.mean(0)


def compute_accuracy_brain(prediction, gt):
    # class-wise accuracies
    accuracies_classwise = [0]*len(CANDI_label)
    
    for label_index in range(1, len(CANDI_label)+1):
        label = CANDI_label[label_index-1]
        # leave background
        if label_index == 0:
            continue

        gt_mask = (gt == label_index)
        predict_mask = (prediction == label_index)

        if np.sum(gt_mask) == 0:
            dice = 0
        else:        
            dice = dc(predict_mask, gt_mask)

        accuracies_classwise[label_index-1] += dice

    return accuracies_classwise


def rerange(image, mask):
    B, C, H, W = mask.shape

    # B, C, H, W -> B*C, 1, H, W -> B*C, 3, H, W -> B, 3*C, H, W
    mask = mask.view(B*C, 1, H, W).repeat(1, 3, 1, 1).view(B, 3*C, H, W)
    batch = torch.cat([image, mask], dim=1).view(-1, C+1, 3, H, W)

    return batch


def get_save_image(image1, mask1, image2, mask2):
    B, C, H, W = mask1.shape

    batch1 = rerange(image1, mask1)
    batch2 = rerange(image2, mask2)

    batch = torch.cat([batch1, batch2], dim=1).view(-1, 3, H, W)

    return batch


colors = {
    1:(95,205,228),# 内 白质
    2:(203,219,252),# 外 灰质
    3:(143,86,59),
    4:(223,113,38),
    5:(251,242,54),
    6:(153,229,80),
    7:(106,190,48),
    8:(55,148,110),
    9:(217,87,99),
    10:(217,160,102),
    11:(238,195,154),
    12:(91,110,225),
    13:(215,123,186),
    14:(118,66,138),
    15:(99,155,255),
    16:(155,173,183)
}


def visualize(out):
    B = out.shape[0]
    H = out.shape[-1]
    visual = torch.zeros((B,3,H,H)).to(device=out.device)
    for k in colors.keys():
        c = torch.tensor(colors[k]).to(device=out.device).view(1,3,1,1)/255
        m = torch.where(out==k, 1, 0).unsqueeze(1).repeat(1,3,1,1)*c
        visual = torch.where(m>0, m, visual)

    return visual


class IntList(click.ParamType):
    name = 'list'

    def convert(self, value, param, ctx):
        _ = param, ctx
        if value is None or value.lower() == 'none' or value == '':
            return [4, 8, 16, 32, 64, 128, 256]
        return [int(v) for v in value.split(',')]