import numpy as np
import pandas as pd
import math
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler
import random

def create_class_weight(mu=0.15):
    labels_dict = {0: 607, 1: 179, 2: 29, 3: 83, 4: 25}
    total = np.sum(list(labels_dict.values()))
    keys = list(labels_dict.keys())
    class_weight = dict()

    for key in keys:
        score = math.log(mu*total/float(labels_dict[key]))
        class_weight[key] = score if score > 1.0 else 1.0

    return class_weight


def read_csv():
    sets_path = '/data1/zmy/data/kfold_dataset/'

    train_sets = ['dataset0.csv',
                  'dataset1.csv',
                  'dataset2.csv',
                  'dataset3.csv']

    test_set = 'dataset4.csv'

    # 读取数据集
    train_features = []
    test_features = []

    # 读取训练集
    for set in train_sets:
        train_data = pd.read_csv(sets_path+set)
        for j in range(len(train_data)):
            one_feature = [train_data['z'][j], train_data['x'][j], train_data['y'][j], train_data['r'][j],
                           train_data['patientWeight'][j], train_data['patientSex'][j], train_data['patientAge'][j],
                           train_data['patientSize'][j], train_data['local_suvmax'][j], train_data['local_suvmin'][j],
                           train_data['local_suvavg'][j], train_data['local_suvstd'][j], train_data['local_suvvar'][j],
                           train_data['lungW'][j], train_data['lungH'][j], int(train_data['cancer_type'][j])-1,
                           train_data['PETSlice_Path'][j], train_data['CTSlice_Path'][j]]

            train_features.append(one_feature)

    # 读取测试集
    test_data = pd.read_csv(sets_path+test_set)
    for i in range(len(test_data)):
        one_feature = [test_data['z'][i], test_data['x'][i], test_data['y'][i], test_data['r'][i],
                       test_data['patientWeight'][i], test_data['patientSex'][i], test_data['patientAge'][i],
                       test_data['patientSize'][i], test_data['local_suvmax'][i], test_data['local_suvmin'][i],
                       test_data['local_suvavg'][i], test_data['local_suvstd'][i], test_data['local_suvvar'][i],
                       test_data['lungW'][i], test_data['lungH'][i], int(test_data['cancer_type'][i])-1,
                       test_data['PETSlice_Path'][i], test_data['CTSlice_Path'][i]]

        test_features.append(one_feature)


    return train_features, test_features


def standard_scaler(features):
    # scaler = MinMaxScaler()  # 线性归一化： 归一化到【0，1】
    scaler = StandardScaler()  # 正态分布归一化：减均值除标准差
    # scaler = MaxAbsScaler()  # 绝对值归一化：归一化到【-1，1】
    x = scaler.fit_transform(features)

    return x


# 用于训练的数据生成器
def data_generator(batch_size, data_list, istrain):
    # 定义返回标志
    batch_idx = 0

    # 样本权重：按类比赋予
    s_weights = [1, 4, 21, 8, 25]

    # 如果是训练集，打乱数据集
    if istrain:
        random.shuffle(data_list)

    while True:

        # 定义返回特征
        extra_features = []
        ct_features = []
        pet_features = []

        labels = []
        sample_weights = []

        for item in data_list:

            labels.append(item[15])
            sample_weights.append(s_weights[item[15]])

            extra_features.append(item[:15])


            ct_name = '/data1/zmy/data/'+item[17]
            ct = np.load(ct_name)
            ct_features.append(ct)
            pet_name = '/data1/zmy/data/'+item[16]
            pet = np.load(pet_name)
            pet_features.append(pet)

            batch_idx = batch_idx+1

            if batch_idx >= batch_size:
                extra_features = np.asarray(extra_features, dtype=np.float)
                ct_features = np.asarray(ct_features)
                pet_features = np.asarray(pet_features)
                labels = np.asarray(labels, dtype=np.int)

                # 标签转换为onehot编码
                onehot_labels = to_categorical(labels, 5)

                # 特征归一化
                extra_features = standard_scaler(extra_features)

                # train test ct和pet进行合并
                img_feature = np.stack((ct_features, pet_features), axis=3)

                yield {'X_input': img_feature, 'other_info': extra_features}, {'X': onehot_labels}, {'sample_weight': sample_weights}




if __name__ == '__main__':
    # dicts = create_class_weight()
    # print(dicts)

    # read_csv()

    # a_list = [[1,2,3], [4,5,6], [7,8,9]]
    # random.shuffle(a_list)
    # print(a_list)
    print(-np.log(0.2))