import json
import logging
import os

import skimage
import nibabel
import numpy as np
import pandas as pd


def analysis_one(raw_file, label_file, out_json_path):
    if not os.path.exists(raw_file) and not os.path.exists(label_file):
        raise Exception('file not exists')
    img = nibabel.load(raw_file)
    data = img.get_fdata()
    data = np.array(data, dtype=np.int16)

    spacing = list(img.header.get_zooms())
    direction = img.affine[:3, :3]
    origin = list(img.header.get_sform())

    # 体素数量
    voxel_shape = list(data.shape)
    # 体素总数
    voxel_total = np.prod(voxel_shape)
    # 体素最大值
    voxel_max = np.max(data)
    # 体素最小值
    voxel_min = np.min(data)
    # 体素平均值
    voxel_mean = np.mean(data)
    # 体素中位数
    voxel_median = np.median(data)
    # 体素标准差
    voxel_std = np.std(data)

    # 体素值分布
    # 截断到-100 到 500的区间
    tmp_data = data.copy()
    tmp_data[tmp_data < -100] = -101
    tmp_data[tmp_data > 500] = 501

    voxel_value_distribution = pd.Series(tmp_data.flatten()).value_counts()
    # 转化为-1024 到 3071的区间
    voxel_value_distribution = voxel_value_distribution.reindex(range(-100, 500), fill_value=None)
    # 按200为一个区间，统计每个区间的体素数量
    voxel_value_distribution = voxel_value_distribution.groupby(voxel_value_distribution.index // 20).sum().tolist()
    # 体素值分布比例
    voxel_value_distribution_ratio = (voxel_value_distribution / voxel_total).tolist()

    #  voxel_value_distribution voxel_value_distribution_ratio to dict
    voxel_value_distribution_dict = {}
    voxel_value_distribution_ratio_dict = {}
    for i in range(len(voxel_value_distribution)):
        voxel_value_distribution_dict[f'[{i * 20 - 100}, {i * 20 + 80}]'] = voxel_value_distribution[i]

    for i in range(len(voxel_value_distribution_ratio)):
        voxel_value_distribution_ratio_dict[f'[{i * 20 - 100}, {i * 20 + 80}]'] = voxel_value_distribution_ratio[i]

    label = nibabel.load(label_file)
    label_data = label.get_fdata()
    label_data = np.array(label_data, dtype=np.uint8)

    # 依据spacing参数，分别计算xyz方向上的直径、面积、体积
    diameter = {}
    area = {}
    volume = {}
    # 统计每个器官的连通域数量
    label_num = {}
    # 统计每个器官的最大值、最小值、平均值、中位数、标准差
    voxel_organ = {}
    # 统计每个器官的体素数量
    voxel_num_organ = {}

    spacing_x, spacing_y, spacing_z = spacing
    for i in range(1, np.max(label_data) + 1):
        x, y, z = np.where(label_data == i)
        x_min, x_max = np.min(x), np.max(x)
        y_min, y_max = np.min(y), np.max(y)
        z_min, z_max = np.min(z), np.max(z)
        # TODO 这里的算法可能有问题，需要再确认
        diameter[i] = {
            'x': (x_max - x_min) * spacing_x,
            'y': (y_max - y_min) * spacing_y,
            'z': (z_max - z_min) * spacing_z
        }
        area[i] = {
            'xy': (x_max - x_min) * (y_max - y_min) * spacing_x * spacing_y,
            'yz': (y_max - y_min) * (z_max - z_min) * spacing_y * spacing_z,
            'xz': (x_max - x_min) * (z_max - z_min) * spacing_x * spacing_z
        }
        volume[i] = (x_max - x_min) * (y_max - y_min) * (z_max - z_min) * spacing_x * spacing_y * spacing_z

        label_mask = label_data == i
        label_mask = label_mask.astype(np.uint8)
        _, label_num[i] = skimage.measure.label(label_mask, return_num=True)

        data_organ = data[label_data == i]
        voxel_organ[i] = {
            'max': np.max(data_organ),
            'min': np.min(data_organ),
            'mean': np.mean(data_organ),
            'median': np.median(data_organ),
            'std': np.std(data_organ)
        }
        voxel_num_organ[i] = np.sum(label_data == i)

    result = {
        'spacing': spacing,  # 体素间距
        'direction': direction,  # 体素方向
        'origin': origin,  # 体素原点
        'voxel_shape': voxel_shape,  # 体素形状
        'voxel_total': voxel_total,  # 体素总数
        'voxel_max': voxel_max,  # 体素最大值
        'voxel_min': voxel_min,  # 体素最小值
        'voxel_mean': voxel_mean,  # 体素平均值
        'voxel_median': voxel_median,  # 体素中位数
        'voxel_std': voxel_std,  # 体素标准差
        # 普通的统计图
        'voxel_value_distribution': voxel_value_distribution_dict,  # 体素值分布
        'voxel_value_distribution_ratio': voxel_value_distribution_ratio_dict,  # 体素值分布比例
        'diameter': diameter,  # 每个器官的直径
        'area': area,  # 每个器官的面积
        'volume': volume,  # 每个器官的体积
        'label_num': label_num,  # 每个器官的连通域数量
        'voxel_organ': voxel_organ,  # 每个器官的最大值、最小值、平均值、中位数、标准差(器官单个体素数据分析)
        'voxel_num_organ': voxel_num_organ  # 每个器官的体素数量
    }
    # 遍历dict，将所有数值类型转化为python原生类型
    result = convert_type(result)
    json.dump(result, open(out_json_path, 'w'))
    return result


def convert_type(data):
    if isinstance(data, dict):
        for key, value in data.items():
            data[key] = convert_type(value)
    elif isinstance(data, list):
        for i in range(len(data)):
            data[i] = convert_type(data[i])
    elif isinstance(data, np.int64):
        data = int(data)
    elif isinstance(data, np.float64):
        data = float(data)
    elif isinstance(data, np.ndarray):
        data = data.tolist()
    elif isinstance(data, np.float32):
        data = float(data)
    elif isinstance(data, np.int16):
        data = int(data)
    return data


def ana_path(raw_file, label_file, out_path):
    logging.info('analysising raw: {}, label: {}'.format(raw_file, label_file))
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    raw_files = os.listdir(raw_file)
    label_files = os.listdir(label_file)

    raw_files = [os.path.join(raw_file, file) for file in raw_files]
    label_files = [os.path.join(label_file, file) for file in label_files]

    raw_files.sort()
    label_files.sort()

    result = []

    for raw, label in zip(raw_files, label_files):
        logging.info('analysising raw: {}, label: {}'.format(raw, label))
        file_name = os.path.basename(raw)
        file_name = file_name.split('.')[0]
        r = analysis_one(raw, label, os.path.join(out_path, file_name + '.json'))
        result.append(r)

    # 将voxel_value_distribution_ratio_dict的值转化为list
    voxel_value_distribution_ratio_mean = {}
    for r in result:
        for key, value in r['voxel_value_distribution_ratio'].items():
            if key not in voxel_value_distribution_ratio_mean:
                voxel_value_distribution_ratio_mean[key] = []
            voxel_value_distribution_ratio_mean[key].append(value)
    for key, value in voxel_value_distribution_ratio_mean.items():
        voxel_value_distribution_ratio_mean[key] = np.mean(value, axis=0)

    # 将voxel_value_distribution_dict的值转化为list
    voxel_value_distribution_mean = {}
    for r in result:
        for key, value in r['voxel_value_distribution'].items():
            if key not in voxel_value_distribution_mean:
                voxel_value_distribution_mean[key] = []
            voxel_value_distribution_mean[key].append(value)
    for key, value in voxel_value_distribution_mean.items():
        voxel_value_distribution_mean[key] = int(np.mean(value, axis=0))

    logging.info('analysising gobal info')
    # 依据result统计全局信息
    ans = {'spacing': np.mean([i['spacing'] for i in result], axis=0),
           'direction': np.mean([i['direction'] for i in result], axis=0),
           'origin': np.mean([i['origin'] for i in result], axis=0),
           'voxel_shape': np.mean([i['voxel_shape'] for i in result], axis=0),
           'voxel_total': np.mean([i['voxel_total'] for i in result], axis=0),
           'voxel_max': np.mean([i['voxel_max'] for i in result], axis=0),
           'voxel_min': np.mean([i['voxel_min'] for i in result], axis=0),
           'voxel_mean': np.mean([i['voxel_mean'] for i in result], axis=0),
           'voxel_median': np.mean([i['voxel_median'] for i in result], axis=0),
           'voxel_std': np.mean([i['voxel_std'] for i in result], axis=0),
           'voxel_value_distribution': voxel_value_distribution_mean,
           'voxel_value_distribution_ratio': voxel_value_distribution_ratio_mean
           }

    ans = convert_type(ans)
    logging.info('analysising done')

    json.dump(ans, open(os.path.join(out_path, 'GlobalStatistics.json'), 'w'))


def get_id2label(plans):
    id2label = {}
    if os.path.basename(plans).endswith('yml'):
        import yaml
        with open(plans, 'r') as f:
            plans = yaml.load(f)
            # TODO
    elif os.path.basename(plans).endswith('pkl'):
        import pickle
        with open(plans, 'rb') as f:
            plans = pickle.load(f)



if __name__ == '__main__':
    raw_file = '/home/core/job/data/infer_result/convert_tmp/'
    label_file = '/home/core/job/data/infer_result/pred/'
    out_path = '/home/core/job/data/infer_result/analysis/'
    ana_path(raw_file, label_file, out_path)
