import numpy as np
from config import parameter as para
import SimpleITK as sitk
import scipy.ndimage as ndimage
import os
import copy
import collections
from time import time
import torch
import pandas as pd
from tqdm import tqdm
from utilities.calculate_metrics import Metirc
from dataset.dataset import DatasetNoPretreat#id,ct_array, seg_array,direction,origin,spacing
from utilities.LaterProcess import connect_graph,remove_holes

def predictOneWithNoProcess(net,ct_array,direction,origin,spacing):
    '''
    预测原始的CT图像
    return CT图像的分割结果（np.array）
    '''
    # 将灰度值在阈值之外的截断掉
    net.eval()
    ct_array[ct_array > para.upper] = para.upper
    ct_array[ct_array < para.lower] = para.lower
    # min max 归一化
    ct_array = ct_array.astype(np.float32)
    ct_array = ct_array / 200
    # 对CT使用双三次算法进行插值，插值之后的array依然是int16
    ct_array = ndimage.zoom(ct_array, (spacing[-1] / para.slice_thickness, para.down_scale, para.down_scale), order = 3)

    # 对slice过少的数据使用padding
    too_small = False
    if ct_array.shape[0] < para.size:
        depth = ct_array.shape[0]
        temp = np.ones((para.size, int(512 * para.down_scale), int(512 * para.down_scale))) * para.lower
        temp[0: depth] = ct_array
        ct_array = temp
        too_small = True
    # 滑动窗口取样预测
    start_slice = 0
    end_slice = start_slice + para.size - 1
    count = np.zeros((ct_array.shape[0], 512, 512), dtype = np.int16)
    probability_map = np.zeros((ct_array.shape[0], 512, 512), dtype = np.float32)

    with torch.no_grad():
        while end_slice < ct_array.shape[0]:
            ct_tensor = torch.FloatTensor(ct_array[start_slice: end_slice + 1]).cuda()
            ct_tensor = ct_tensor.unsqueeze(dim = 0).unsqueeze(dim = 0)  # 维度变化，以便和网络输入要求的维度一致

            outputs = net(ct_tensor)

            count[start_slice: end_slice + 1] += 1
            probability_map[start_slice: end_slice + 1] += np.squeeze(outputs.cpu().detach().numpy())

            # 由于显存不足，这里直接保留ndarray数据，并在保存之后直接销毁计算图
            del outputs

            start_slice += para.stride
            end_slice = start_slice + para.size - 1

        if end_slice != ct_array.shape[0] - 1:
            end_slice = ct_array.shape[0] - 1
            start_slice = end_slice - para.size + 1

            ct_tensor = torch.FloatTensor(ct_array[start_slice: end_slice + 1]).cuda()
            ct_tensor = ct_tensor.unsqueeze(dim = 0).unsqueeze(dim = 0)
            outputs = net(ct_tensor)

            count[start_slice: end_slice + 1] += 1
            probability_map[start_slice: end_slice + 1] += np.squeeze(outputs.cpu().detach().numpy())

            del outputs

        pred_seg = np.zeros_like(probability_map)
        pred_seg[probability_map >= (para.threshold * count)] = 1

        if too_small:
            temp = np.zeros((depth, 512, 512), dtype = np.float32)
            temp += pred_seg[0: depth]
            pred_seg = temp
    # 将金标准读入内存

    pred_seg = pred_seg.astype(np.uint8)
    liver_seg = copy.deepcopy(pred_seg)
    liver_seg[liver_seg > 0.5] = 1
    liver_seg[liver_seg <= 0.5] = 0
    # 原来的代码有个最大连通域提取，但是有些问题，导致评价指标极差，先删掉
    liver_seg=connect_graph(liver_seg)
    liver_seg=remove_holes(liver_seg)
    return liver_seg


def SaveOneModelPredictMask(net,id_path,result_save_csv=None,save_nii_dir=None):
    '''
    在测试集上(原始的CT图像)，得到模型的输出，并保存到文件中
    参数设置是为了适用于from multiprocessing.dummy import Pool的多线程
    '''
    net.eval()
    if result_save_csv!=None and (not os.path.exists(os.path.dirname(result_save_csv))) and os.path.dirname(result_save_csv)!='':
        os.makedirs(os.path.dirname(result_save_csv))
    if save_nii_dir!=None and (not os.path.exists(save_nii_dir)):
        os.makedirs(save_nii_dir)
    need_seg=False
    if result_save_csv!=None:
        need_seg=True
    data_dl = DatasetNoPretreat(id_path = para.test_id_path, need_seg = need_seg, need_cut = False)
    #如果需要进行模型评估
    file_name = []  # 文件名称
    if need_seg==True:
        dice_intersection = 0.0
        dice_union = 0.0

        time_pre_case = []  # 单例数据消耗时间
        # 定义评价指标
        liver_score = collections.OrderedDict()
        liver_score['dice'] = []
        liver_score['mean_iou'] = []
    for id,ct_array, seg_array,direction,origin,spacing in tqdm(data_dl):
        start = time()
        file_name.append(id.split(' ')[0])
        #这里进行重采样了
        liver_seg=predictOneWithNoProcess(net,ct_array,direction,origin,spacing)
        if need_seg==True:
            seg_array = ndimage.zoom(seg_array, (spacing[-1] / para.slice_thickness, 1, 1), order = 0)
            seg_array[seg_array > 0] = 1
        # 将预测的结果保存为nii数据
        if save_nii_dir != None:
            # direction,origin,spacing
            pred_seg = sitk.GetImageFromArray(liver_seg)
            #
            pred_seg.SetDirection(direction)
            pred_seg.SetOrigin(origin)
            pred_seg.SetSpacing((spacing[0],spacing[1], para.slice_thickness))
            save_path=os.path.join(save_nii_dir,os.path.basename(id.split(' ')[1]))
            print('save one mask to path:',save_path)
            sitk.WriteImage(pred_seg,save_path )
        if need_seg:
            # 计算分割评价指标
            liver_metric = Metirc(seg_array, liver_seg)

            liver_score['dice'].append(liver_metric.get_dice_coefficient()[0])
            liver_score['mean_iou'].append(liver_metric.get_mean_iou())
            dice_intersection += liver_metric.get_dice_coefficient()[1]
            dice_union += liver_metric.get_dice_coefficient()[2]
            speed = time() - start
            time_pre_case.append(speed)
    if need_seg:
        # 将评价指标写入到.csv文件中
        liver_data = pd.DataFrame(liver_score,index = file_name)
        liver_statistics = pd.DataFrame(index=['mean', 'std', 'min', 'max'], columns=list(liver_data.columns))
        liver_statistics.loc['mean'] = liver_data.mean()
        liver_statistics.loc['std'] = liver_data.std()
        liver_statistics.loc['min'] = liver_data.min()
        liver_statistics.loc['max'] = liver_data.max()
        pd_results=pd.concat([liver_data,liver_statistics])
        if result_save_csv!=None:
            pd_results.to_csv(result_save_csv)
        # 打印dice global
        print('predict successful!results are: ',liver_data.mean())