import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt


def FELog_lr(train, printSwitch):
    NaN_col_names = ['Plasma_glucose_concentration', 'blood_pressure', 'Triceps_skin_fold_thickness', 'serum_insulin',
                     'BMI']
    train[NaN_col_names] = train[NaN_col_names].replace(0, np.NaN)
    medians = train.median()
    train = train.fillna(medians)
    if printSwitch == 1:
        print(train.isnull().sum())
    #  get labels
    y_train = train['Target']
    X_train = train.drop(["Target"], axis=1)
    # 用于保存特征工程之后的结果
    columns_org = X_train.columns
    X_train_log = np.log1p(X_train)
    # 重新组成DataFrame
    feat_names = columns_org + "_log"
    X_train_log = pd.DataFrame(columns=feat_names, data=X_train_log.values)
    X_train_log.head()
    ms_log = StandardScaler()
    # 保存特征名字，用于结果保存为csv
    feat_names_log = X_train_log.columns
    # 用训练训练模型（得到均值和标准差）：fit
    # 并对训练数据进行特征缩放：transform
    X_train_log = ms_log.fit_transform(X_train_log)
    # 保存log特征变换结果,存为csv
    y = pd.Series(data=y_train, name='target')
    train_log = pd.concat([ pd.DataFrame(columns=feat_names_log, data=X_train_log), y], axis=1)
    train_log.to_csv('FE_pima-indians-diabetes_log.csv', index=False, header=True)
    if printSwitch == 1:
        print(train_log.head())

    train_log.info()
    lr = LogisticRegression()
    loss = cross_val_score(lr, X_train_log, y_train, cv=5, scoring='neg_log_loss')
    loss_accura = cross_val_score(lr, X_train_log, y_train, cv=5, scoring='accuracy')

    print('logloss of each fold is: ', -loss)
    print('cv logloss is:', -loss.mean())

    print('logloss_accura of each fold is: ', loss_accura)
    print('cv logloss_accura is:', loss_accura.mean())

    # 需要调优的参数
    # 请尝试将L1正则和L2正则分开，并配合合适的优化求解算法（slover）
    # tuned_parameters = {'penalty':['l1','l2'],
    #                   'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]
    #                   }
    penaltys = ['l1', 'l2']
    Cs = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
    tuned_parameters = dict(penalty=penaltys, C=Cs)

    lr_penalty = LogisticRegression(solver='liblinear')
    grid = GridSearchCV(lr_penalty, tuned_parameters, cv=5, scoring='neg_log_loss', return_train_score = True)
    grid.fit(X_train_log, y_train)
    print('log grid best score is:', -grid.best_score_)
    print('log grid best params is:', grid.best_params_)
    # plot CV误差曲线
    test_means = grid.cv_results_['mean_test_score']
    test_stds = grid.cv_results_['std_test_score']
    train_means = grid.cv_results_['mean_train_score']
    train_stds = grid.cv_results_['std_train_score']
    # plot results
    n_Cs = len(Cs)
    number_penaltys = len(penaltys)
    test_scores = np.array(test_means).reshape(n_Cs, number_penaltys)
    train_scores = np.array(train_means).reshape(n_Cs, number_penaltys)
    test_stds = np.array(test_stds).reshape(n_Cs, number_penaltys)
    train_stds = np.array(train_stds).reshape(n_Cs, number_penaltys)

    x_axis = np.log10(Cs)
    for i, value in enumerate(penaltys):
        # pyplot.plot(log(Cs), test_scores[i], label= 'penalty:'   + str(value))
        plt.errorbar(x_axis, -test_scores[:, i], yerr=test_stds[:, i], label=penaltys[i] + ' Test')
        #plt.errorbar(x_axis, -train_scores[:,i], yerr=train_stds[:,i] ,label = penaltys[i] +' Train')

    plt.legend()
    plt.xlabel('log(C)')
    plt.ylabel('logloss')
    plt.savefig('LogisticGridSearchCV_C.png')
    plt.show()
