import copy
from itertools import count
import os
import pickle

import click
import torch
from torch import optim
from torch.utils.data.dataloader import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import save_image
from tqdm import tqdm

import dnnlib
import legacy
from segmentation.dataset import MR_slice
from segmentation.utils import *
from segmentation.dataset import apply_trans
from medpy.metric.binary import dc
from scipy.optimize import curve_fit


def visualize_(out):
    B = out.shape[0]
    H = out.shape[-1]
    count = 0
    visual = torch.zeros((B,3,H,H)).to(device=out.device)
    for k in colors.keys():
        c = torch.tensor(colors[k]).to(device=out.device).view(1,3,1,1)/255
        m = torch.where(out==k, 1, 0).unsqueeze(1).repeat(1,3,1,1)*c
        if m.sum()>0: count += 1
        visual = torch.where(m>0, m, visual)

    return visual, count


def apply_trans_batch(image, mask, chn):
    B = image.shape[0]
    image = (image+1)/2
    for i in range(B):
        image_, mask_ = apply_trans(torch.cat([image[i], mask[i].unsqueeze(0)], dim=0)).split([chn, 1], dim=0)
        image[i], mask[i] = image_, mask_.squeeze(0).to(torch.long)

    image = (image-0.5)/0.5

    return image, mask


def update_iou_stat(predict, gt, TP, P, T, num_classes = 17):
    """
    :param predict: the pred of each batch,  should be numpy array, after take the argmax   b,h,w
    :param gt: the gt label of the batch, should be numpy array     b,h,w
    :param TP: True positive
    :param P: positive prediction
    :param T: True seg
    :param num_classes: number of classes in the dataset
    :return: TP, P, T
    """
    cal = gt > 0

    mask = (predict == gt) * cal

    for i in range(num_classes):
        P[i] += np.sum((predict == i) * cal)
        T[i] += np.sum((gt == i) * cal)
        TP[i] += np.sum((gt == i) * mask)

    return TP, P, T


def compute_iou(TP, P, T, num_classes = 17):
    """
    :param TP:
    :param P:
    :param T:
    :param num_classes: number of classes in the dataset
    :return: IoU
    """
    IoU = []
    for i in range(num_classes):
        IoU.append(TP[i] / (T[i] + P[i] - TP[i] + 1e-10))
    return IoU


# f(t) in paper
def curve_func(x, a, b, c):
    return a * (1 - np.exp(-1 / c * x ** b))


def fit(func, x, y):
    popt, pcov = curve_fit(func, x, y, p0=(1, 1, 1), method='trf', sigma=np.geomspace(1, .1, len(y)),
                           absolute_sigma=True, bounds=([0, 0, 0], [1, 1, np.inf]))
    return tuple(popt)


def derivation(x, a, b, c):
    x = x + 1e-6  # numerical robustness
    return a * b * 1 / c * np.exp(-1 / c * x ** b) * (x ** (b - 1))


def label_update_epoch(ydata_fit, threshold=0.9, eval_interval=100, num_iter_per_epoch=1000):
    # ydata_fit 一直记录的IOU值，需要满足曲线f(t)
    # x轴的值，如果num_iter_per_epoch//eval_interval==0，个数=len(ydata_fit)
    xdata_fit = np.linspace(0, len(ydata_fit) * eval_interval / num_iter_per_epoch, len(ydata_fit))
    # 拟合f(t)的参数值
    a, b, c = fit(curve_func, xdata_fit, ydata_fit)

    # early learning
    epoch = np.arange(1, 16)
    # y_hat = curve_func(epoch, a, b, c)

    # 前(16)个epoch曲线斜率相对一开始的变化
    relative_change = abs(abs(derivation(epoch, a, b, c)) - abs(derivation(1, a, b, c))) / abs(derivation(1, a, b, c))
    relative_change[relative_change > 1] = 0
    update_epoch = np.sum(relative_change <= threshold) + 1
    return update_epoch  # , a, b, c


def if_update(iou_value, current_epoch, threshold=0.90, eval_interval=100, num_iter_per_epoch=1000):
    update_epoch = label_update_epoch(iou_value, threshold=threshold, eval_interval=eval_interval, num_iter_per_epoch=num_iter_per_epoch)
    return current_epoch >= update_epoch  # , update_epoch


def get_UNet(
    n_train,
    save_dir,
    data_dir,
    supr_dir,
    which_net,
    which_repre_layers,
    shots,
    threshold,
    split,
    combine,
    multi_class,
    dropout,
    cutout,
    folder_dir
):
    import warnings
    warnings.filterwarnings('ignore')
    split_length = len(os.listdir(os.path.join(data_dir, 'images')))//128
    split = [int(s) for s in split]
    split = [split_length*s//sum(split) for s in split]
    split[1] = split_length - split[0] - split[2]
    print(f'dataset split: {split}')

    out_ch = 17
    sum_dice = 0
    sum_dice_class = [0]*(out_ch-1)
    # init generator ===============================================================
    with dnnlib.util.open_url(os.path.join(save_dir, 'network-snapshot-best.pkl')) as f:
        snapshot_data = legacy.load_network_pkl(f)
        G = snapshot_data['G_ema'].eval().requires_grad_(False).cuda()
        del snapshot_data
    get_representation = init_get_representation(G, which_repre_layers, 256, 'random')
    print('load generator done!')

    # init Seg ===============================================================
    pkl_name = f'Seg_{which_net}'
    if dropout:
        pkl_name += '_drop'
    if cutout:
        pkl_name += '_cut'
    pkl_name += f'_{shots}s_cb{combine}_{which_repre_layers}.pkl'

    # init Seg ===============================================================
    with open(supr_dir, 'rb') as f:
        unet = pickle.load(f)['net'].eval().requires_grad_(False).cuda()
    print('load Supr_net done!')
    with open(os.path.join(save_dir, f'handmark_{threshold}', pkl_name), 'rb') as f:
        Seg = pickle.load(f)['net'].eval().requires_grad_(False).cuda()
    print('load seg done!')
    # init dataaset ===============================================================

    eps = 1e-6
    bs = 32
    sum_king = 0    # Metrics

    import random
    for i_train in tqdm(range(n_train)):

        with torch.no_grad():
            z = torch.randn(bs, G.z_dim).cuda()
            w = G.mapping(z, None)  # [N, L, C]
            image, representation = get_representation(w, False)
            image = torch.clamp(image, -1, 1)

            # 改进的mask
            mask = Seg(representation)

            image = F.interpolate(image, (160,160), mode='bilinear')
            mask = F.interpolate(mask, (160,160), mode='nearest')

            mask_supr = unet(image)

            # B, H, W
            mask_eval = mask.argmax(1)
            mask_supr_eval = mask_supr.argmax(1)

            count = eps   # 记录每个sample出现了多少类 B
            sum_metrics = 0

            for c in range(1, 17):
                t1 = mask_eval==c
                t2 = mask_supr_eval==c
                # count += t2.sum(dim=(-2, -1))>0

                # B
                tp = torch.sum((t1 == 1) & (t2 == 1), [1,2])
                tn = torch.sum((t1 == 0) & (t2 == 0), [1,2])
                fn = torch.sum((t1 == 0) & (t2 == 1), [1,2])
                fp = torch.sum((t1 == 1) & (t2 == 0), [1,2])

                # Metrics, B
                evaluations = torch.cat([x.unsqueeze(0) for x in [
                    (2*tp + eps)/(2*tp + fn + fp + eps),# dice
                    (tp+tn + eps) / (tp+tn+fn+fp+eps),# accu
                    (tp + eps)/(tp+fn+fp+eps),# iou
                    tn/(tn+fp+eps),# spec
                    tp/(tp+fn+eps),# sens
                    tp/(tp+fp+eps),# ppv
                    tn/(tn+fn+eps)# npv                    
                ]])

                # Metrics, B for all classes
                sum_metrics += evaluations
            
            sum_metrics /= 17
            # sum_metrics /= count.unsqueeze(0)
            sum_king += sum_metrics.sum(1)

            # print(f'{sum_king/((i_train+1)*bs)}')

            del representation
            if i_train%10==0:
                save_image(torch.cat([(image.repeat(1,3,1,1)+1)/2, visualize(changeLabel(mask.argmax(1))), visualize(changeLabel(mask_supr.argmax(1)))]), f'{folder_dir}/{i_train}.png', nrow=bs, padding=0)

        # test part of i train

    print(f'{sum_king/((n_train)*bs)}')
    with open(os.path.join(folder_dir, 'test_INFO.txt'), 'a') as log:
        # mdice
        for d in sum_king/((n_train)*bs):
            log.write(f'{d:.3f}, ')
        log.write('\n')

        log.write('=================================\n')

if __name__ == "__main__":
    # 测试Seg与U-Net的分割
    os.environ["CUDA_VISIBLE_DEVICES"] = '5'
    # get_UNet(
    #     n_train=100,
    #     save_dir='save/00010-images-mirror-low_shot-kimg25000-batch32-color-translation-cutout',
    #     data_dir='data/CANDI-128-160',
    #     supr_dir='save_seg/supervised/exp0-72shot-reso160-augTrue/checkpoint/2_best.pth',
    #     folder_dir = 'save_seg/ws/eval_seg/CANDI',
    #     which_net='BiFPN',
    #     which_repre_layers=[4,8,16,32,64,128,256],
    #     shots=128,
    #     threshold=0,
    #     split=[72,8,20],
    #     combine=True,
    #     multi_class=True,
    #     dropout=False,
    #     cutout=False
    # )
    get_UNet(
        n_train=100,
        save_dir='save/00011-GAN_OASIS-mirror-low_shot-kimg25000-batch32-color-translation-cutout',
        data_dir='data/OASIS-128-160',
        supr_dir='save_seg/supervised/exp9-257shot-reso256-augTrue/checkpoint/2_best.pth',
        folder_dir = 'save_seg/ws/eval_seg/OASIS',
        which_net='BiFPN',
        which_repre_layers=[4,8,16,32,64,128,256],
        shots=128,
        threshold=0,
        split=[72,8,20],
        combine=True,
        multi_class=True,
        dropout=False,
        cutout=False
    )
#----------------------------------------------------------------------------
