import torch
import pickle
import os
import random
from tqdm import tqdm
# from segmentation.utils import eval_net_supervised
from torch.utils.data.dataloader import DataLoader
from segmentation.dataset import  Triplet


def cal_metric(metric, tp, tn, fn, fp, eps):
    if metric=='dice':
        return (2*tp + eps)/(2*tp + fn + fp + eps)
    elif metric=='acc':
        return (tp+tn + eps) / (tp+tn+fn+fp+eps)
    elif metric=='iou':
        return (tp + eps)/(tp+fn+fp+eps)
    elif metric=='spec':
        return (tn+eps)/(tn+fp+eps)
    elif metric=='sen':
        return (tp+eps)/(tp+fn+eps)
    elif metric=='ppv':
        return (tp+eps)/(tp+fp+eps)
    elif metric=='npv':
        return (tn+eps)/(tn+fn+eps)


def evaluate_ce(seg, gt, eps=1e-6, parts=5, ignore_bg=False, bg_label=0):
    # B,H,W
    B = seg.shape[0]
    seg = seg.argmax(dim=1)

    total = 0
    metric_list = torch.zeros([len(metrics), parts]).cuda()
    w_metric = torch.zeros([B, len(metrics)]).cuda()

    for i in range(parts):
        if ignore_bg and i==bg_label:
            continue
        seg_part = torch.where(seg==i, 1, 0)
        gt_part = torch.where(gt==i, 1, 0)

        tp = torch.sum((seg_part == 1) & (gt_part == 1), [1,2])
        tn = torch.sum((seg_part == 0) & (gt_part == 0), [1,2])
        fn = torch.sum((seg_part == 0) & (gt_part == 1), [1,2])
        fp = torch.sum((seg_part == 1) & (gt_part == 0), [1,2]) # B

        weight = torch.sum(gt_part, [1,2]) # B

        for m in range(len(metrics)):
            metric_name = metrics[m]
            metric_val = cal_metric(metric_name, tp, tn, fn, fp, eps) # B

            if i>0:
                w_metric[:, m] += weight*metric_val
            metric_list[m, i] += metric_val.sum(0) # 一个Batch的图求和

        if i>0:
            total += weight

    w_metric = torch.mm(1/total.unsqueeze(0), w_metric).squeeze(0) # len(metrics)

    return metric_list, w_metric


def eval_net_supervised(net, loader, is_test, folder_dir, i_train, multi_class, ignore_bg=False, bg_label=0):
    net.eval()
    evaluate = evaluate_ce
    sum_metric_list = 0
    sum_w_metric = 0
    nSample = 0

    for _, image, mask in tqdm(loader, total=len(loader), desc='Validation', unit='batch', leave=False):
        nSample += image.shape[0]
        image, mask = image.cuda(), mask.cuda()
        image = (image-0.5)/0.5

        pred = net(image)
        
        metric_list, w_metric = evaluate(pred, mask, parts=pred.shape[1], ignore_bg=ignore_bg, bg_label=bg_label)
        sum_metric_list += metric_list
        sum_w_metric += w_metric
    
    sum_metric_list /= nSample
    sum_w_metric /= nSample
        
    return sum_metric_list, sum_w_metric


# if __name__ == '__main__':
def eval_folder(folder_dir:str, data_dir):
    # folder_dir = r'save_seg/00007-images-mirror-low_shot-kimg20000-batch32-color-translation-cutout/exp1-auto10shot-L--cbTrue[4, 8, 16, 32, 64, 128, 256]-seed0'
    # data_dir = r'./data/CANDI-64'

    # test part of i train
    # with open(os.path.join(folder_dir, 'checkpoint', f'{i_train}_best.pth'), 'rb') as f:
    with open(os.path.join(folder_dir, 'checkpoint', f'best.pth'), 'rb') as f:
        net = pickle.load(f)['net'].eval().requires_grad_(False).cuda()

    metrci_list, w_metric = eval_net_supervised(net, test_loader, False, folder_dir, '', True, False)

    output1 = metrci_list[:, 0].cpu().numpy().tolist()  # bg
    output2 = metrci_list[:, 1].cpu().numpy().tolist()  # part 1
    output3 = metrci_list[:, 2].cpu().numpy().tolist()  # part 2
    output4 = w_metric.cpu().numpy().tolist()           # weighted
    output5 = metrci_list[:, 1:].mean(1).cpu().numpy().tolist()# mean

    print(output1)
    print(output2)
    print(output3)
    print(output4)
    print(output5)

    with open(os.path.join('to_eval', 'test.txt'), 'a') as log:
        log.write(f"================={folder_dir.split('/')[-2]}-{folder_dir.split('/')[-1]}================\n")
        for output in [output1, output2, output3, output4, output5]:
            for item in output:
                log.write(f'{item:.4f},')
            log.write('\n')

        log.write(f"============================================\n")


if __name__ == "__main__":

    # 全局参数
    torch.manual_seed(0)
    data_dir = r'./data/CANDI-64'
    metrics = [
        'iou',
        'dice',
        'acc',
        'spec',
        'sen',
        'ppv',
        'npv'
    ]
    # =============================================

    split = [7,1,2]
    split = [103*s//sum(split) for s in split]
    split[1] = 103 - split[0] - split[2]
    print(f'dataset split: {split}')
    random.seed(28)
    samples = list(range(103))
    random.shuffle(samples)

    test_loader = DataLoader(
        Triplet(
            data_dir=data_dir,
            multi_class=True,
            aug=False,
            sample=samples[split[0]+split[1]:103],
            combine=True,
            length=64,
            w_steps=1000
        ),
        batch_size=5, drop_last=False, shuffle=False, num_workers=2, pin_memory=True
    )

    # for folder in os.listdir('to_eval'):
    #     if folder[-3:] == 'txt': continue
    #     if folder == 'Seg_L_noDA':
    #         eval_folder(os.path.join('to_eval', folder, '0'), data_dir)
    #     else:
    #         for i in range(3):
    #             folder_dir = os.path.join('to_eval', folder, str(i))
    #             eval_folder(folder_dir, data_dir)


    eval_folder('save_seg/supervised/exp46-1shot-cFalse-augTrue', data_dir)