from utils.get_args import get_args
import pandas as pd
from utils.map import *
from utils.extract_data import extract_data
from utils.drop_in import drop_in
import numpy as np
from utils.excel_temp import ExcelTemp 
from utils.cal_classifiction_result import cal_result
from utils.name_num_2_data import find_label,find_box,find_totalVolume,find_mean_density,find_norpr_scores,find_layer,name_num_2_box
from utils.cal_IoU import cal_dice,cal_dice_set1_set2
# 日志
from utils.log import logging
import json
# 日志
# logging.debug("----debug message----")
# logging.info("----info message----")
# logging.warning("----warning message----")
# logging.error("----error message----")
# logging.critical("----critical----")

args = get_args()
gt_dir = args.gt_dir
pred_dir = args.pred_dir
gt_data,ai_data,names = extract_data(gt_dir,pred_dir)
# 新建字典statistical_result_dict存放检出统计结果
statistical_result_dict = {}
# excel
excel_all = []
########################## 结节检出 ##########################
# 遍历ct
for ct in names:
    # TODO:临时加上层厚度
    if(ct[-8:-5] == '361'):
        thickness = 5
    if(ct[-8:-5] == '169'):
        thickness = 1
    # 当前ct的编号
    ct_index = names.index(ct)
    # 当前ct的金标准结节编号
    gt_indexes = gt_data['gt_group'][ct_index]
    # 存放当前ct的所有预测结节编号与当所有金标准不匹配的列表,用来看哪个预测结节从头到尾没匹配
    ai_indexes_all = list(set(ai_data['ai_group'][ct_index])).copy()
    # 遍历当前一张ct的金标准结节
    for gt_index in list(set(gt_indexes)):
        # 当前ct的预测结节编号
        ai_indexes = ai_data['ai_group'][ct_index]
        # drop_flags：用来存储与当前金标准结节是否落入的列表，1.0表示落入，0.0表示没有落入。
        drop_flags = []
        center_point_dises = []
        dice  = []
        # 遍历当前一张ct的预测结节编号
        for ai_index in list(set(ai_indexes)):
            # 当前预测结节编号下的所有预测box
            ai_box = [ai_data['ai_boxes'][ct_index][i] for i in range(len(ai_indexes)) if ai_indexes[i] == ai_index]
            # 当前预测结节编号下的所有预测box的层信息
            ai_layer = [ai_data['ai_layer'][ct_index][i] for i in range(len(ai_indexes)) if ai_indexes[i] == ai_index]
            # 当前金标准结节编号下的所有手工边界标注和bbox标注
            gt_box = [gt_data['gt_boxes'][ct_index][i] for i in range(len(gt_indexes)) if gt_indexes[i] == gt_index]
            # 当前金标准结节编号下的所有手工边界标注和bbox标注层信息
            gt_layer = [gt_data['gt_layer'][ct_index][i] for i in range(len(gt_indexes)) if gt_indexes[i] == gt_index]
            # 判断预测结节的中心点是否落在金标准结节所在的三维立体空间内
            drop_flag , center_point_dis = drop_in(set_gt = gt_box,gt_layer=gt_layer,set_ai = ai_box,ai_layer = ai_layer,thickness = thickness)
            drop_flags.append(drop_flag)
            # 中心点距离
            center_point_dises.append(center_point_dis)
            # 交并比
            dice.append(cal_dice_set1_set2(set1=name_num_2_box(name=ct,num=ai_index,data=ai_data),set2=name_num_2_box(name=ct,num=gt_index,data=gt_data)))
        if(args.match_mode.split('_')[0] == '0'):
        # 匹配方式：0中心落入
            # 根据ppt这边会存在三个分支
            for ai_index in list(set(ai_indexes)): 
                # 1.金标准没有和任何预测结节匹配
                if(np.array(drop_flags).sum() == 0):
                    # ，全部匹配完，没有匹配的金标准结节标记为FN，标为key = name_gt_indx_xx
                    key = str(ct)+'_'+str(gt_index)+'_'+'xx'
                    statistical_result_dict[key] = 'FN'
                # 2.金标准没有和单一预测结节匹配
                if(np.array(drop_flags).sum() == 1):
                    # 匹配的单一预测结节标记为TP
                    if(drop_flags[list(set(ai_indexes)).index(ai_index)] == 1.0):
                        key = str(ct)+'_'+str(gt_index)+'_'+str(ai_index)
                        statistical_result_dict[key] = 'TP'
                        # 该预测结节匹配了，所以从与当所有金标准不匹配的列表剔除
                        if(ai_index in ai_indexes_all):
                            ai_indexes_all.remove(ai_index)
                # 3.金标准没有和多个预测结节匹配
                if(np.array(drop_flags).sum() > 1):
                    # 分别求多个预测结节与该金标准结节最大层面中心点距离，距离最小的标记为TP，其它预测结节为FP 
                    center_point_dises_copy = center_point_dises.copy()
                    center_point_dises_copy.sort()
                    min_dis_index = center_point_dises.index(center_point_dises_copy[0])
                    # 匹配的多个预测结节
                    if(drop_flags[list(set(ai_indexes)).index(ai_index)] == 1.0):
                        # 匹配的多个预测结节标记中最小的标记为TP
                        if(list(set(ai_indexes)).index(ai_index) == min_dis_index):
                            key = str(ct)+'_'+str(gt_index)+'_'+str(ai_index)
                            statistical_result_dict[key] = 'TP'
                            # 该预测结节匹配了，所以从与当所有金标准不匹配的列表剔除
                            if(ai_index in ai_indexes_all):
                                ai_indexes_all.remove(ai_index)
                        # 匹配的多个预测结节标记中其他预测结节标记为标记为FP
                        else:
                            key = str(ct)+'_'+'xx'+'_'+str(ai_index)
                            statistical_result_dict[key] = 'FP'
                            # 该预测结节以及算在了FP里面，所以从与当所有金标准不匹配的列表剔除
                            if(ai_index in ai_indexes_all):
                                ai_indexes_all.remove(ai_index)
        elif(args.match_mode.split('_')[0] == '1'):
        # 匹配方式：1 中心点距离
            for ai_index in list(set(ai_indexes)): 
                # 中心距离小于10 算TP
                if(center_point_dises[list(set(ai_indexes)).index(ai_index)]<float(args.match_mode.split('_')[1])):
                    key = str(ct)+'_'+str(gt_index)+'_'+str(ai_index)
                    statistical_result_dict[key] = 'TP'
                    # 该预测结节匹配了，所以从与当所有金标准不匹配的列表剔除
                    if(ai_index in ai_indexes_all):
                        ai_indexes_all.remove(ai_index)
                if(min(center_point_dises)>=float(args.match_mode.split('_')[1])):
                    
                    key = str(ct)+'_'+str(gt_index)+'_'+'xx'
                    statistical_result_dict[key] = 'FN'
        elif(args.match_mode.split('_')[0] == '2'):
            for ai_index in list(set(ai_indexes)): 
                if(dice[list(set(ai_indexes)).index(ai_index)]>float(args.match_mode.split('_')[1])):
                    key = str(ct)+'_'+str(gt_index)+'_'+str(ai_index)
                    statistical_result_dict[key] = 'TP'
                    # 该预测结节匹配了，所以从与当所有金标准不匹配的列表剔除
                    if(ai_index in ai_indexes_all):
                        ai_indexes_all.remove(ai_index)
                if(max(dice)<=float(args.match_mode.split('_')[1])): 
                    key = str(ct)+'_'+str(gt_index)+'_'+'xx'
                    statistical_result_dict[key] = 'FN'
        else:
            raise Exception("指定模式出错")
    # 所有没有匹配的预测结节为FP
    for i in ai_indexes_all:
        key = str(ct)+'_'+'xx'+'_'+str(i)
        statistical_result_dict[key] = 'FP'
# 根据TP,FP,FN计算召回率，精确度，Fscore
TP_count = sum([1 for key,value in statistical_result_dict.items() if value == 'TP'])
FP_count = sum([1 for key,value in statistical_result_dict.items() if value == 'FP'])
FN_count = sum([1 for key,value in statistical_result_dict.items() if value == 'FN'])
# 召回率(recall) = TP/(TP+FN)
recall = TP_count/(TP_count+FN_count)
# 精确率(precision) = TP/(TP+FP)
precision = TP_count/(TP_count+FP_count)
# Fscore = 精确率 * 召回率 * 2 / (精确率 + 召回率)
Fscore = precision * recall * 2/(precision + recall)

########################## 结节分类 ##########################
# 是否有结节分类
classification_flag = args.classification
# 有结节分类，开始分类性能
if classification_flag:
    # 找出所有检出为TP的结节的分类结果
    TP_nodule = [key for key,value in statistical_result_dict.items() if value == 'TP']
    # 计算所有TP分类性能
    gt_label_tp,ai_label_tp = find_label(nodule = TP_nodule,gt_data=gt_data,ai_data=ai_data)
    sensitivity_dict,specificity_dict = cal_result(y_true = gt_label_tp,y_pred=ai_label_tp)
     # 找出所有的结节的分类结果
    label_dict = find_label(nodule = statistical_result_dict.keys(),gt_data=gt_data,ai_data=ai_data,mode = 1)
# 没有结节分类，转向结节分割
else:
    pass
########################## 结节分割 ##########################
# 是否有结节分割
segmentation_flag = args.segmentation
# 有结节分割，开始计算分割性能
if segmentation_flag:
    # 单纯分割，计算体积交并比
    # 找出分割所需要的信息
    box_dict_of_TP = find_box(TP_nodule = TP_nodule,gt_data=gt_data,ai_data=ai_data)
    # 计算体积交并比
    dice_dict = cal_dice(box_dict_of_TP)
    # 密度只需要平均密度，不需要最大密度
    mean_density_dict = find_mean_density(nodule = statistical_result_dict.keys(),gt_data=gt_data,ai_data=ai_data)
    # 体积来自输入
    totalVolume_dict = find_totalVolume(nodule = statistical_result_dict.keys(),gt_data=gt_data,ai_data=ai_data)
    print()
# 无结节分割，结束
else:
    print()
########################## 生成excel ##########################
# 找出结节的层
layer_dict = find_layer(nodule = statistical_result_dict.keys(),gt_data=gt_data,ai_data=ai_data)
# 找出预测检出概率和正常产品输出概率
norpr_scores_dict = find_norpr_scores(nodule = statistical_result_dict.keys() ,ai_data = ai_data)
group_dict = {}
for nodule_info in statistical_result_dict.keys():
    excel_line = ExcelTemp()
    # series ID
    excel_line.series_ID = nodule_info.split('_')[0]
    if(excel_line.series_ID) not in group_dict.keys():
        group_dict[excel_line.series_ID] = 1
    else:
        group_dict[excel_line.series_ID] += 1
    # group
    excel_line.group = group_dict[excel_line.series_ID]
    # 如果结节标号信息有，则输入excel，没有则是None
    # 对应gt编号
    if nodule_info.split('_')[1] !='xx':
        excel_line.num_gt = nodule_info.split('_')[1] 
    else:
        excel_line.num_gt = None
    # 对应inf编号
    if nodule_info.split('_')[2] !='xx':
        excel_line.num_inf = nodule_info.split('_')[2] 
    else:
        excel_line.num_inf = None
    # 检出统计结果
    excel_line.statistical_result = statistical_result_dict[nodule_info]
    # 交并比
    # 检出统计结果为TP才有交并比
    if(nodule_info in dice_dict.keys()):
        excel_line.IoU = dice_dict[nodule_info]
    else:
        excel_line.IoU = None
    # 分类仲裁结果
    excel_line.classification_results_gt = map_3.get(label_dict[nodule_info]["gt_label"])
    # 分类检测结果
    excel_line.classification_results_inf = map_3.get(label_dict[nodule_info]["ai_label"])
    # 病灶体积gt(mm)
    excel_line.volume_gt = totalVolume_dict[nodule_info]['gt_totalVolume']
    # 病灶体积inf(mm)
    excel_line.volume_inf = totalVolume_dict[nodule_info]['ai_totalVolume']
    # 平均结节密度gt
    excel_line.mean_nodule_density_gt = mean_density_dict[nodule_info]["gt_mean_density"]
    # 平均结节密度inf
    excel_line.mean_nodule_density_inf = mean_density_dict[nodule_info]["ai_mean_density"]
    # 平均结节密度
    # 预测检出概率
    excel_line.prob = norpr_scores_dict[nodule_info]["ai_totalVolume"]
    # 正常产品输出概率
    excel_line.normal_prob = norpr_scores_dict[nodule_info]["norpr"]

    # 对于每一层
    for layer in layer_dict[nodule_info]:
        # TODO:长径gt(mm)
        excel_line.long_gt = None
        # TODO:短径gt(mm)
        excel_line.short_gt = None
        # TODO:长径inf(mm)
        excel_line.long_inf = None
        # TODO:短径inf(mm)
        excel_line.short_inf = None
        # TODO:中心点距离，没有z轴信息？
        excel_line.center_distance = None
        #
        excel_all.append(excel_line.get_excel_temp())
df = pd.DataFrame(excel_all)
df.to_excel(excel_writer = args.out_excel+'.xlsx',index=False)
result = {}
# 结节检出
result['总体结节检出的召回率'] = recall
result['总体结节检出的精确度'] = precision
result['总体结节检出的Fscore'] = Fscore
# 结节分类
result['总体结节分类的灵敏度'] = sensitivity_dict
result['总体结节分类的特异性'] = specificity_dict
# 结节分割
result['总体体积交并比'] = None
result['结节长径相对误差'] = None
result['结节短径相对误差'] = None
result['结节平均密度偏差'] = None
with open(args.out_json+'.json', 'w') as f_obj:#打开模式为可写
	f_obj.write(json.dumps(result,ensure_ascii = False,indent = 4))
print()