import pydicom as dcm
import SimpleITK as sitk
import os
import json
import cv2
import glob
import pandas as pd
import numpy as np
import random
from collections import Counter
TrainPath = '../data/lumbar_train150/train'
JsonPath = '../data/lumbar_train150/lumbar_train150_annotation.json'
# jsonPath = './data/lumbar_train150/lumbar_train150_annotation.json'


def dicom_metainfo(dicm_path, list_tag):
    '''
    获取dicom的元数据信息
    :param dicm_path: dicom文件地址
    :param list_tag: 标记名称列表,比如['0008|0018',]
    :return:
    '''
    reader = sitk.ImageFileReader()
    reader.LoadPrivateTagsOn()
    reader.SetFileName(dicm_path)
    reader.ReadImageInformation()
    return [reader.GetMetaData(t) for t in list_tag]


def dicom2array(dcm_path):
    '''
    读取dicom文件并把其转化为灰度图(np.array)
    https://simpleitk.readthedocs.io/en/master/link_DicomConvert_docs.html
    :param dcm_path: dicom文件
    :return:
    '''
    image_file_reader = sitk.ImageFileReader()
    image_file_reader.SetImageIO('GDCMImageIO')
    image_file_reader.SetFileName(dcm_path)
    image_file_reader.ReadImageInformation()
    image = image_file_reader.Execute()
    if image.GetNumberOfComponentsPerPixel() == 1:
        image = sitk.RescaleIntensity(image, 0, 255)
        if image_file_reader.GetMetaData('0028|0004').strip() == 'MONOCHROME1':
            image = sitk.InvertIntensity(image, maximum=255)
        image = sitk.Cast(image, sitk.sitkUInt8)
    img_x = sitk.GetArrayFromImage(image)[0]
    return img_x



def get_info(trainpath=TrainPath,jsonpath=JsonPath):
    ## 处理json文件，存入csv文件中
    # studyUid,seriesUid,instanceUid,annotation
    annotation_info = pd.DataFrame(columns=('studyUid','seriesUid','instanceUid','annotation'))
    json_df = pd.read_json(jsonpath)
    ## 在json文件中读取数据，读取到带有标记的51个数据。
    for idx in json_df.index:
        studyUid = json_df.loc[idx,"studyUid"]
        seriesUid = json_df.loc[idx,"data"][0]['seriesUid']
        instanceUid =  json_df.loc[idx,"data"][0]['instanceUid']
        annotation =  json_df.loc[idx,"data"][0]['annotation']
        row = pd.Series({'studyUid':studyUid,'seriesUid':seriesUid,'instanceUid':instanceUid,'annotation':annotation})
        annotation_info = annotation_info.append(row,ignore_index=True)

    ## 在文件中进行数据读取操作，根据'studyUid','seriesUid','instanceUid'这三个字段筛选在标记的json
    ## 文件中存在的数据。进行筛选。也就是说，所有文件中只有'studyUid','seriesUid','instanceUid'和json
    ## 中一致的文件才会被保存。也就是通过'studyUid','seriesUid','instanceUid'来读取出标记json文件中对应的
    ## 文件路径罢了。
    dcm_paths = glob.glob(trainpath+"//**"+"//**.dcm")
    # 'studyUid','seriesUid','instanceUid'
    tag_list = ['0020|000d','0020|000e','0008|0018']
    dcm_info = pd.DataFrame(columns=('dcmPath','studyUid','seriesUid','instanceUid'))
    for dcm_path in dcm_paths:
        try:
            dcm_path = dcm_path.replace('\\','//')
            studyUid,seriesUid,instanceUid = dicom_metainfo(dcm_path,tag_list)
            row = pd.Series({'dcmPath':dcm_path,'studyUid':studyUid,'seriesUid':seriesUid,'instanceUid':instanceUid })
            dcm_info = dcm_info.append(row,ignore_index=True)
        except:
            continue
    result = pd.merge(annotation_info,dcm_info,on=['studyUid','seriesUid','instanceUid'])
    # result = result.set_index('dcmPath')['annotation']
    result = result[['dcmPath','annotation']]
    print(result.head())
    ## 将生成的数据路径和anno标记的csv进行转换。并重新保存。
    anno_csv = pd.DataFrame(columns=['image_path','identification','category','coord','zIndex','spacing'])
    for index in range(len(result)):
        annos = result.iloc[index].loc['annotation'][0]
        annos = annos['data']['point']
        for anno in annos:
            anno_dict = {'image_path': result.iloc[index].loc['dcmPath']}
            anno_dict['spacing'] = dcm.read_file(result.iloc[index].loc['dcmPath']).get('PixelSpacing')[0]
            anno_dict['identification'] = anno['tag']['identification']
            if 'vertebra' in anno['tag'].keys():
                anno_dict['category'] = anno['tag']['vertebra']
            else:
                anno_dict['category'] = anno['tag']['disc']
            anno_dict['coord'] = str(anno['coord'])
            anno_dict['zIndex'] = anno['zIndex']
            anno_csv = anno_csv.append(pd.DataFrame(anno_dict,index=[0]))
    anno_csv = anno_csv[['image_path','identification','category','coord','zIndex','spacing']]
    anno_csv.to_csv('../data/csv_label/train150_anno.csv',index=False)
    result = result[['dcmPath','annotation']]
    result.to_csv('../data/csv_label/train150.csv',index = False)



def make_image(csv_file,save_path):
    csv_data = pd.read_csv(csv_file)
    for index in range(len(csv_data)):
        image_path = csv_data.iloc[index,0]
        image = dicom2array(image_path)
        cv2.imwrite(os.path.join(save_path,image_path.split('//')[1]+'_'+
                                 image_path.split('//')[2].replace('dcm','jpg')),image)

def concat_csv(csvfile1='../data/csv_label/train51_anno.csv',
               csvfile2='../data/csv_label/train150_anno.csv',
               save_csv='../data/csv_label/trainall_anno.csv'):
    csv_1 = pd.read_csv(csvfile1)
    csv_2 = pd.read_csv(csvfile2)
    new_csv = csv_1.append(csv_2)
    new_csv.to_csv(save_csv,index=False)


def conver_csv(csv_file):
    ## 对生成的csv文件进行转换，返回包含所有数据信息的list。每个元素为一个dict，key有image_path
    ## 用来获取原始中间帧dcm数据路径，key还包含所有关键点类别T12~S1.等，value为坐标和属性。
    image_list = []
    csv_data = pd.read_csv(csv_file)
    csv_data = csv_data.fillna('v1')
    image_path = csv_data.iloc[0, 0]
    # spacing = dcm.read_file(image_path).get('S')
    anno_dict = {'image_path': csv_data.iloc[0, 0]}
    for index in range(len(csv_data)):
        if image_path != csv_data.iloc[index, 0]:
            image_list.append(anno_dict)
            anno_dict = {'image_path': csv_data.iloc[index, 0]}
        anno_dict[csv_data.iloc[index].loc['identification']] = [csv_data.iloc[index].loc['category'],
                                                                 csv_data.iloc[index].loc['coord'],
                                                                 csv_data.iloc[index].loc['spacing']]
        image_path = csv_data.iloc[index, 0]
    return image_list


def convert_dict(anno_dict):
    # 将dict形式的anno输入转换成坐标按照固定顺序排列的list
    point_name = ['L1', 'L2', 'L3', 'L4', 'L5', 'T12-L1', 'L1-L2', 'L2-L3', 'L3-L4', 'L4-L5', 'L5-S1']
    ## 将点分为锥体和椎间两类，分来生成heatmap
    anno_list = []
    for point in point_name:
        if point in anno_dict.keys():
            anno_list.append([(int(anno_dict[point][1].split(',')[0].replace('[', '')),
                                int(anno_dict[point][1].split(',')[1].replace(']', '').replace(' ', ''))),anno_dict[point][0],anno_dict[point][2]])
        else:
            anno_list.append([(0,0),'v1',0.9])
    return anno_list



def get_guanz(t2_data_dir,point):
    ## 根据t2中间帧的位置和每个点的坐标，获取对应的冠状位数据。
    t2_data_dir = t2_data_dir.replace('\\','/').replace('//','/')
    t2_data = dcm.read_file(t2_data_dir)
    t2_start = t2_data.get('ImagePositionPatient')[-1]
    t2_spacing = t2_data.get('PixelSpacing')[0]
    ## 获取每个锥体在世界坐标中Z的位置。以及关键点横向的坐标，用来定位对应的横截面位置，
    T12_L1 = t2_start - point[5][1] * t2_spacing
    L1 = t2_start - point[0][1] * t2_spacing
    L1_L2 = t2_start - point[6][1] * t2_spacing
    L2 = t2_start - point[1][1] * t2_spacing
    L2_L3 = t2_start - point[7][1] * t2_spacing
    L3 = t2_start - point[2][1] * t2_spacing
    L3_L4 = t2_start - point[8][1] * t2_spacing
    L4 = t2_start - point[3][1] * t2_spacing
    L4_L5 = t2_start - point[9][1] * t2_spacing
    L5 = t2_start - point[4][1] * t2_spacing
    L5_S1 = t2_start - point[10][1] * t2_spacing

    T12_L1_list = []
    L1_list = []
    L1_L2_list  = []
    L2_list = []
    L2_L3_list  = []
    L3_list = []
    L3_L4_list  = []
    L4_list = []
    L4_L5_list  = []
    L5_list = []
    L5_S1_list  = []
    # print(L1,L2,L3,L4,L5)
    # print(T12_L1,L1,L1_L2,L2,L2_L3,L3,L3_L4,L4,L4_L5,L5,L5_S1)
    data_path = t2_data_dir.replace(t2_data_dir.split('/')[-1],'')
    for name in os.listdir(data_path):
        try:
            data_dir = os.path.join(data_path,name)
            data = dcm.read_file(data_dir)
            orien = data.get('ImageOrientationPatient')
            series = data.get('SeriesDescription').lower()
            guan_spacing = data.get('PixelSpacing')
            guan_shape = dicom2array(data_dir).shape
            if orien[0] > 0.7 and orien[-2] > 0.7 and 't2' in series:
                position = data.get('ImagePositionPatient')[2]
                position = position +\
                           guan_shape[1]/2*guan_spacing[1]*orien[2] + \
                           guan_shape[0]/2*guan_spacing[0]*orien[5]
                # print(position,data.get('InstanceNumber'))
                # print(position)
                if T12_L1-8 <= position <= T12_L1+8:
                    T12_L1_list.append(data_dir)
                elif L1_L2-8 <= position <= L1_L2+8:
                    L1_L2_list.append(data_dir)
                elif L2_L3-8 <= position <= L2_L3+8:
                    L2_L3_list.append(data_dir)
                elif L3_L4-8 <= position <= L3_L4+8:
                    L3_L4_list.append(data_dir)
                elif L4_L5-8 <= position <= L4_L5+8:
                    L4_L5_list.append(data_dir)
                elif L5_S1-8 <= position <= L5_S1+8:
                    L5_S1_list.append(data_dir)
        except Exception:
            continue
    return T12_L1_list,L1_L2_list,L2_L3_list,L3_L4_list,L4_L5_list,L5_S1_list


def get_guanz_data(image_list,size = None):
    ## 获取冠状位的数据。根据关键点的位置找到对应的冠状位数据。并获取种类。
    ## 存放在list中，每个case对应list中一个元素，每个元素又多个list组成，
    # 分别为T12-L5到L5-S1的类别和对应冠状位数据的dir信息。

    all_guanz_list = []
    for image_dict in image_list:
        t2_data_dir = image_dict['image_path']
        label = convert_dict(image_dict)
        point = [p[0] for p in label]
        category = [p[1] for p in label]
        T12_L1, L1_L2, L2_L3, L3_L4, L4_L5, L5_S1 = get_guanz(t2_data_dir, point)
        # print(len(T12_L1), len(L1_L2), len(L2_L3), len(L3_L4), len(L4_L5), len(L5_S1))
        # for guan_dir in all:
        #     image = dicom2array(guan_dir)
        #     cv2.imshow('zhuan zhuang ', image)
        #     cv2.waitKey()
        T12_L1.insert(0,category[5])
        L1_L2.insert(0,category[6])
        L2_L3.insert(0,category[7])
        L3_L4.insert(0,category[8])
        L4_L5.insert(0,category[9])
        L5_S1.insert(0,category[10])
        all = (T12_L1, L1_L2, L2_L3, L3_L4, L4_L5, L5_S1)
        for p in all:
            ## 注意p中第一个元素是category，后面的才是image dir
            for i in p[1:]:
                print('image path',i)
                image = dicom2array(i)
                if size is not None:
                    image = cv2.resize(image,size)
                    image = np.stack((image,image,image),axis=-1)
                print(image.shape)
                ## 将category从str转成了数值。从0开始
                all_guanz_list.append([image,int(p[0][1])-1])
    return all_guanz_list



def get_cut_data(x_range=33,y_range=26,csv_file = '../data/csv_label/train51_anno.csv',size = None):
    ## 裁剪的结果存放在list中，每一个元素是一个list，其中包含该裁剪图片对应的类别和裁剪后的array数据shape[c,y,x]
    cut_image_list = []
    image_list = conver_csv(csv_file)
    for image_dict in image_list:
        t2_data_dir = image_dict['image_path'].replace('//','/').replace('\\','/')
        t2_image_dir = '../data/image/'+t2_data_dir.split('/')[-2] + '_' + t2_data_dir.split('/')[-1].replace('dcm','jpg')
        t2_image = cv2.imread(t2_image_dir)
        label = convert_dict(image_dict)
        ## 计算固定的裁剪距离在每个图片中对应的像素距离。
        spacing = label[0][2]
        x_range_l2 = x_range // spacing
        for index,zhuiti in enumerate(label[:5]):
            y_range_l2 = y_range//spacing
            point = zhuiti[0]
            category = zhuiti[1]
            ## 注意opencv读取的图片第一个维度为y轴，而我们读取的坐标第一个维度为x，需要反过来进行裁剪操作。
            cut_image = t2_image[int(point[1]-y_range_l2):int(point[1]+y_range_l2),int(point[0]-x_range_l2):int(point[0]+x_range_l2)]
            # print('original_shape',cut_image.shape)
            if size is not None:
                cut_image = cv2.resize(cut_image,size)
            category = int(category == 'v2')
            cut_image_list.append([cut_image,category])
            if category == 0:
                cut_image_list.append([cut_image, category])
                cut_image_list.append([cut_image, category])
                cut_image_list.append([cut_image, category])
            # print('resize shape',cut_image.shape)
            # cv2.imwrite('../data/show/cut_image/'+t2_image_dir.split('/')[-1].replace('jpg',str(index)+'_.jpg'),cut_image)
            # cv2.imshow('zhuiti',cut_image)
            # cv2.waitKey()
    random.shuffle(cut_image_list)
    return cut_image_list








if __name__ == '__main__':
    pass


    # cut_image_list = get_guanz_data(csv_file='../data/csv_label/train150_anno.csv')
    # label = [p[1] for p in cut_image_list]
    # result = Counter(label)
    # print(result)

    # cut_image_list = get_cut_data(csv_file='../data/csv_label/train51_anno.csv')
    # label = [p[1] for p in cut_image_list]
    # ## 统计list中每个元素出现的个数
    # result = Counter(label)
    # print(result)


    image_list = conver_csv('../data/csv_label/train150_anno.csv')
    for image_dict in image_list:
        t2_data_dir = image_dict['image_path']
        print(t2_data_dir)
        label = convert_dict(image_dict)
        point = [p[0] for p in label]
        category = [p[1] for p in label]
        T12_L1, L1_L2, L2_L3, L3_L4, L4_L5, L5_S1 = get_guanz(t2_data_dir,point)
        all = T12_L1+L1_L2+L2_L3+L3_L4+L4_L5+L5_S1
        print(len(T12_L1), len(L1_L2), len(L2_L3), len(L3_L4), len(L4_L5), len(L5_S1))
        for guan_dir in all:
            image = dicom2array(guan_dir)
            cv2.imshow('zhuan zhuang ',image)
            cv2.waitKey()


    # trans_csv(csv_file='../data/csv_label/train51.csv')

    # image_path = '../data/lumbar_train150/train/study121'
    # shizhuang_dir = '../data/lumbar_train150/train/study121/image31.dcm'
    # shizhuang_data = dcm.read_file(shizhuang_dir)
    # spacing = shizhuang_data.get('PixelSpacing')[0]
    # position = shizhuang_data.get('ImagePositionPatient')
    # print(position,spacing)
    # for name in os.listdir(image_path):
    #     image_dir = os.path.join(image_path,name)
    #     orien = dcm.read_file(image_dir).get('ImageOrientationPatient')
    #     if orien is not None:
    #         if orien[0] > 0.7 and orien[-2] > 0.7:
    #             pass
    #             print(dcm.read_file(image_dir).get('ImagePositionPatient'), orien)
    #             print(dcm.read_file(image_dir).get('InstanceNumber'), dcm.read_file(image_dir).get('SeriesDescription'))
    #             image = dicom2array(image_dir)
    #             cv2.imshow('image', image)
    #             cv2.waitKey()



    # csv_data = pd.read_csv('../data/csv_label/train51.csv')
    # for index in range(len(csv_data)):
    #     image_dir = csv_data.iloc[index,0]
    #     image_data = dcm.read_file(image_dir)
    #     image_path = image_dir.replace(image_dir.split('/')[-1],'')
    #     print(image_data.get('SeriesNumber'),image_data.get('InstanceNumber'))

        # print(image.get('PixelSpacing'))

    # get_info()

    # make_image(csv_file='../data/csv_label/train150.csv',save_path='../data/image')
    # concat_csv()
    # for i in range(1,65):
    #     image_path = 'D://complete//Spark/data/lumbar_train51/train//study25//image'+str(i)+'.dcm'
    #     data = dcm.read_file(image_path)
    #     # print(data.get('SeriesDescription'))
    #     if data.get('SeriesDescription') == 'WFST2_SAG(T2)':
    #         print(data)
    #         data = dicom2array(image_path)
    #         cv2.imshow('image',data)
    #         cv2.waitKey()
    #         # print(data.shape)