'''
二分类逻辑回归模型
'''

from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler, RobustScaler, normalize
import numpy as np
import pandas as pd
from release_code.data_analysis.data_two_metrics import auc_curve, pr_curve


# 按训练时划分的训练集和测试集
def read_data():
    test_data = pd.read_csv('D:/lung_cancer/data/divide_csv/two/test.csv')
    train_data = pd.read_csv('D:/lung_cancer/data/divide_csv/two/train.csv')
    test_features = []
    train_features = []
    test_labels = []
    train_labels = []
    for i in range(len(test_data)):
        one_feature = [test_data['z'][i], test_data['x'][i], test_data['y'][i], test_data['r'][i],
                       test_data['patientWeight'][i], test_data['patientSex'][i], test_data['patientAge'][i],
                       test_data['patientSize'][i], test_data['local_suvmax'][i], test_data['local_suvmin'][i],
                       test_data['local_suvavg'][i], test_data['local_suvstd'][i], test_data['local_suvvar'][i]]


        test_features.append(one_feature)
        test_labels.append(test_data['cancer_type'][i] - 1)

    for j in range(len(train_data)):
        one_feature = [train_data['z'][j], train_data['x'][j], train_data['y'][j], train_data['r'][j],
                       train_data['patientWeight'][j], train_data['patientSex'][j], train_data['patientAge'][j],
                       train_data['patientSize'][j], train_data['local_suvmax'][j], train_data['local_suvmin'][j],
                       train_data['local_suvavg'][j],  train_data['local_suvstd'][j], train_data['local_suvvar'][j]]

        train_features.append(one_feature)
        train_labels.append(train_data['cancer_type'][j] - 1)

    X_train = np.asarray(train_features, dtype=np.float)
    X_test = np.asarray(test_features, dtype=np.float)
    y_train = np.asarray(train_labels, dtype=np.int)
    y_test = np.asarray(test_labels, dtype=np.int)

    return X_train, X_test, y_train, y_test


# 去均值和方差归一化
# 概率模型不需要归一化（决策树、RF）
# Adaboost、SVM、LR、KNN、KMeans之类的最优化问题需要归一化
def train():
    train_features, test_features, train_labels, test_labels = read_data()

    scaler = MinMaxScaler()
    # scaler = StandardScaler()
    # scaler = MaxAbsScaler()
    # scaler = RobustScaler()
    train_features = scaler.fit_transform(train_features)
    test_features = scaler.fit_transform(test_features)

    clf = LogisticRegression(penalty='l2', solver='liblinear', C=1.0, class_weight='balanced')
    clf.fit(train_features, train_labels)

    test_preds = clf.predict_proba(test_features)
    train_preds = clf.predict_proba(train_features)
    test_one_preds = [] #预测种类为1的概率
    train_one_preds = []  # 预测种类为1的概率
    for i in range(len(test_preds)):
        test_one_preds.append(test_preds[i][1])

    for i in range(len(train_preds)):
        train_one_preds.append(train_preds[i][1])

    # 保存测试集结果（概率值）
    np.save('D:/lung_cancer/data/two_result/two_LR_labels.npy', test_labels)
    np.save('D:/lung_cancer/data/two_result/two_LR_preds.npy', test_one_preds)

    auc_curve(test_labels, test_one_preds)
    # auc_curve(train_labels, train_one_preds)
    pr_curve(test_labels, test_one_preds)
    # pr_curve(train_labels, train_one_preds)


if __name__ == '__main__':
    train()
