# 半监督学习，分类癌症等级（尝试用灰度共生矩阵的“某一种”特征，看看效果）（HER2染色）
# 第一种(contrast 对比度)分的不错，接近90准确率

from PIL import Image
from skimage import io, transform
import numpy as np
import glob
import os
from sklearn.semi_supervised import LabelPropagation
from sklearn import preprocessing

# 读取图片特征txt文件（特征和标签两部分）（从各自的txt文件）
def read_feature_from_single_txt(g0_feature_path,g1_feature_path,g2_feature_path,g3_feature_path, detail_number = 7):
    #
    feature_data = []
    feature_label = []

    # txt文件中有字符串部分，所以这里提取得用字符串形式
    # 由于loadtxt默认读入的是byte，而不是普通的string，故改成下面的调用方式
    g0_feature = np.loadtxt(g0_feature_path,dtype = bytes).astype(str)
    g1_feature = np.loadtxt(g1_feature_path,dtype = bytes).astype(str)
    g2_feature = np.loadtxt(g2_feature_path,dtype = bytes).astype(str)
    g3_feature = np.loadtxt(g3_feature_path,dtype = bytes).astype(str)

    # 每个文件的行数，用于后面做循环处理
    g0_line = g0_feature.shape
    g0_line_number = g0_line[0]
    g1_line = g1_feature.shape
    g1_line_number = g1_line[0]
    g2_line = g2_feature.shape
    g2_line_number = g2_line[0]
    g3_line = g3_feature.shape
    g3_line_number = g3_line[0]

    # 将字符串数据去掉'b'字符后转为float（特征值）和int（类别）
    for i in range(g0_line_number):
        single_feature_data = []
        single_feature_label = []
        for j in range(detail_number):
            if j==0:
                single_feature_label.append(int(g0_feature[i][j]))
            else:
                single_feature_data.append(float(g0_feature[i][j][2:]))
        feature_data.append(single_feature_data)
        feature_label.append(single_feature_label)


    for i in range(g1_line_number):
        single_feature_data = []
        single_feature_label = []
        for j in range(detail_number):
            if j==0:
                single_feature_label.append(int(g1_feature[i][j]))
            else:
                single_feature_data.append(float(g1_feature[i][j][2:]))
        feature_data.append(single_feature_data)
        feature_label.append(single_feature_label)

    for i in range(g2_line_number):
        single_feature_data = []
        single_feature_label = []
        for j in range(detail_number):
            if j==0:
                single_feature_label.append(int(g2_feature[i][j]))
            else:
                single_feature_data.append(float(g2_feature[i][j][2:]))
        feature_data.append(single_feature_data)
        feature_label.append(single_feature_label)


    for i in range(g3_line_number):
        single_feature_data = []
        single_feature_label = []
        for j in range(detail_number):
            if j==0:
                single_feature_label.append(int(g3_feature[i][j]))
            else:
                single_feature_data.append(float(g3_feature[i][j][2:]))
        feature_data.append(single_feature_data)
        feature_label.append(single_feature_label)

    return feature_data,feature_label

# 读取图片特征txt文件（特征和标签两部分）（从一个总的txt文件）
def read_feature_from_all_txt(all_feature_path, detail_number = 7):
    #
    feature_data = []
    feature_label = []

    # txt文件中有字符串部分，所以这里提取得用字符串形式
    # 由于loadtxt默认读入的是byte，而不是普通的string，故改成下面的调用方式
    g_all_feature = np.loadtxt(all_feature_path,dtype = bytes).astype(str)

    # 每个文件的行数，用于后面做循环处理
    g_all_line_number = g_all_feature.shape[0]

    # 将字符串数据去掉'b'字符后转为float（特征值）和int（类别）
    for i in range(g_all_line_number):
        single_feature_data = []
        single_feature_label = []
        for j in range(detail_number):
            if j==0:
                single_feature_label.append(int(g_all_feature[i][j]))
            else:
                single_feature_data.append(float(g_all_feature[i][j][2:]))
        feature_data.append(single_feature_data)
        feature_label.append(single_feature_label)

    return feature_data,feature_label

# 数据标准化处理
def normalization_process(data):
    data_number = data.shape[0]
    data1 = []
    data2 = []
    data3 = []
    data4 = []
    data5 = []
    data6 = []

    for i in range(data_number):
        data1.append(data[i][0])
        data2.append(data[i][1])
        data3.append(data[i][2])
        data4.append(data[i][3])
        data5.append(data[i][4])
        data6.append(data[i][5])

    # 将数据转化为标准正态分布
    # 标准化（Z-Score），或者去除均值和方差缩放
    # 得到的结果是，对于每个属性/每列来说所有数据都聚集在0附近，方差为1
    # Z值的量代表着原始分数和母体平均值之间的距离，是以标准差为单位计算。在原始分数低于平均值时Z则为负数，反之则为正数。
    data1_scaled = preprocessing.scale(data1)
    data2_scaled = preprocessing.scale(data2)
    data3_scaled = preprocessing.scale(data3)
    data4_scaled = preprocessing.scale(data4)
    data5_scaled = preprocessing.scale(data5)
    data6_scaled = preprocessing.scale(data6)


    result_data = []

    for i in range(data_number):
        singal_result_data = []
        singal_result_data.append(data1_scaled[i])
        singal_result_data.append(data2_scaled[i])
        singal_result_data.append(data3_scaled[i])
        singal_result_data.append(data4_scaled[i])
        singal_result_data.append(data5_scaled[i])
        singal_result_data.append(data6_scaled[i])
        result_data.append(singal_result_data)

    return np.asarray(result_data)

# 数据标准化处理（顺带只保留某一种特征）
def normalization_process2(data,order):
    data_number = data.shape[0]
    data1 = []


    for i in range(data_number):
        data1.append(data[i][order-1])


    # 将数据转化为标准正态分布
    # 标准化（Z-Score），或者去除均值和方差缩放
    # 得到的结果是，对于每个属性/每列来说所有数据都聚集在0附近，方差为1
    # Z值的量代表着原始分数和母体平均值之间的距离，是以标准差为单位计算。在原始分数低于平均值时Z则为负数，反之则为正数。
    data1_scaled = preprocessing.scale(data1)



    result_data = []

    for i in range(data_number):
        singal_result_data = []
        singal_result_data.append(data1_scaled[i])
        result_data.append(singal_result_data)

    return np.asarray(result_data)

# 只保留某一种特征（不知道代码对不对）
def save_single_feature(data,order):
    data_number = data.shape[0]
    data1 = []


    for i in range(data_number):
        data1.append(data[i][order-1])

    result_data = []

    for i in range(data_number):
        singal_result_data = []
        singal_result_data.append(data1[i])
        result_data.append(singal_result_data)

    return np.asarray(result_data)


# 加载数据（随机从HER2等级的整体数据中取出  1/copies 出来训练）
# def load_data_1(data,label,copies=2):
#     rng=np.random.RandomState(8) #若随机数种子相同，则每次随机出来的序列也相同
#     index=np.arange(len(data))
#     rng.shuffle(index)
#     X=data[index]
#     Y=label[index]
#     # 应该是取前len(Y)/10个作为有标记的样本，n_labeled_points为其对应的索引数组
#     n_labeled_points=int(len(Y)/copies)
#     # 创造一个从n_labeled_points开始的终止点为len(Y)的等差数列？？
#     unlabeled_index=np.arange(len(Y))[n_labeled_points:]
#
#     return X,Y,unlabeled_index

# 加载数据（从HER2等级数据中“各”取出 总和1/copies 出来训练）
def load_data_2(data,label,copies):
    index=np.arange(len(data))
    X=data[index]
    Y=label[index]
    # 应该是取前copies个作为有标记的样本，n_labeled_points为其对应的索引数组
    n_labeled_points=int(len(Y) * copies)
    n_g0_labeled_points = int(n_labeled_points/4)
    n_g1_labeled_points = int(n_labeled_points/4)
    n_g2_labeled_points = int(n_labeled_points/4)
    n_g3_labeled_points = int(n_labeled_points/4)
    #
    unlabeled_index0 = np.arange(0,int(len(Y)/4 - n_g0_labeled_points))
    unlabeled_index1 = np.arange(int(len(Y)/4),int(len(Y)/2-n_g1_labeled_points))
    unlabeled_index2 = np.arange(int(len(Y)/2), int(3*len(Y)/4 - n_g2_labeled_points))
    unlabeled_index3 = np.arange(int(3*len(Y)/4), int(len(Y) - n_g3_labeled_points))
    unlabeled_index = np.concatenate((unlabeled_index0,unlabeled_index1,unlabeled_index2,unlabeled_index3))


    return X,Y,unlabeled_index

# 测试标签传播算法
def run_LabelPropagation(*data, max_iter, gamma):
    X,Y,unlabeled_index=data
    Y_train=np.copy(Y)
    Y_train[unlabeled_index]=-1
    # cls = LabelPropagation(max_iter=1000, kernel='rbf', gamma=7.35) # Accuracy:0.809474  所有的6种特征
    cls = LabelPropagation(max_iter=max_iter, kernel='rbf', gamma=gamma) # Accuracy:0.876316  第一种特征
    cls.fit(X,Y_train)
    print(X[unlabeled_index])

    predict_result = cls.predict(X[unlabeled_index])

    print(predict_result)
    # print(cls.predict_proba(X[unlabeled_index]))
    print("Accuracy:%f"%cls.score(X[unlabeled_index],Y[unlabeled_index]))





if __name__ == '__main__':
    # ********************特征处理（开始）*****************************

    all_feature_inputPath = '../../../data/txt/feature/glcm_10X_all.txt'

    # 读取图片glcm（灰度共生）特征
    feature_data, feature_label = read_feature_from_all_txt(all_feature_inputPath)
    feature_data_array = np.array(feature_data)  # 因为api接口貌似是要ndarray类型，所以这里转换一下
    feature_label_array = np.array(feature_label)
    # single_feature_data_array = save_single_feature(feature_data_array,1)
    single_feature_data_array = normalization_process2(feature_data_array, 2)


    # # 标签传播测试
    # X, Y, unlabeled_index = load_data_2(feature_data_array, feature_label_array, 0.002)
    X, Y, unlabeled_index = load_data_2(single_feature_data_array, feature_label_array, 0.03)
    run_LabelPropagation(X, Y, unlabeled_index, max_iter = 10000 , gamma = 400)


    # ********************特征处理（结束）*****************************