import pandas as pd
import numpy as np

# plotting
#import seaborn as sn
import matplotlib.pyplot as plt


from matplotlib import pyplot

#读入数据
train = pd.read_csv("train_FE_6.csv")

print('Train info',train.info())

Cat_features_MEncoder = ['site_id','site_domain','app_id','app_domain','device_id','device_ip','device_model','C14','C17','C20']
#抽取要训练特征
y_train = train['click']
X_train = train.drop(np.hstack((Cat_features_MEncoder,['click'])), axis=1)
print('X_train head',X_train.head())
print('X_train info',X_train.info())

#用于后续特征重要性显示
feat_names = X_train.columns


#LR 默认
from sklearn.linear_model import LogisticRegression
lr= LogisticRegression()
# 交叉验证用于评估模型性能和进行参数调优（模型选择）
#分类任务中交叉验证缺省是采用StratifiedKFold
from sklearn.model_selection import cross_val_score
loss = cross_val_score(lr, X_train, y_train, cv=5, scoring='neg_log_loss')
print('LR logloss of each fold is: ',-loss)
print('LR cv logloss is:', -loss.mean())


# LR L1
from sklearn.linear_model import LogisticRegressionCV

Cs = [1, 10,100,1000]

# 大量样本（6W+）、高维度（93），L1正则 --> 可选用saga优化求解器(0.19版本新功能)
# LogisticRegressionCV比GridSearchCV快
lrcv_L1 = LogisticRegressionCV(Cs=Cs, cv = 5, scoring='neg_log_loss', penalty='l1', solver='saga', multi_class='ovr')
lrcv_L1.fit(X_train, y_train)

print('LR+L1 lrcv_L1.scores_',lrcv_L1.scores_)

n_Cs = len(Cs)
scores = np.zeros((1, n_Cs))
scores = np.mean(lrcv_L1.scores_[1], axis=0)

plt.plot(np.log10(Cs), -scores)
# plt.plot(np.log10(reg.Cs)*np.ones(3), [0.28, 0.29, 0.30])
plt.xlabel('log(C)')
plt.ylabel('logloss')
plt.title('LR+L1')
plt.show()

# 特征重要性
#Plot important coefficients
coefs = pd.Series(lrcv_L1.coef_[0], index = feat_names)
print("Lasso picked " + str(sum(coefs != 0)) + " features and eliminated the other " +  \
      str(sum(coefs == 0)) + " features")
imp_coefs = pd.concat([coefs.sort_values().head(10),
                     coefs.sort_values().tail(10)])
imp_coefs.plot(kind = "barh")
plt.title("Coefficients in the Lasso Model")
plt.show()


#LR L2
from sklearn.linear_model import LogisticRegressionCV

Cs = [1, 10,100,1000]

# 大量样本（6W+）、高维度（93），L2正则 --> 缺省用lbfgs，为了和GridSeachCV比较，也用liblinear

lr_cv_L2 = LogisticRegressionCV(Cs=Cs, cv = 5, scoring='neg_log_loss', penalty='l2', multi_class='ovr')
lr_cv_L2.fit(X_train, y_train)
print('lr_cv_L2.scores_',lr_cv_L2.scores_)
n_Cs = len(Cs)
scores = np.zeros((1, n_Cs))
scores = np.mean(lr_cv_L2.scores_[1], axis=0)

plt.plot(np.log10(Cs), -scores)
# plt.plot(np.log10(reg.Cs)*np.ones(3), [0.28, 0.29, 0.30])
plt.xlabel('log(C)')
plt.ylabel('logloss')
plt.title('LR+L2')
plt.show()

#GridsearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression

#需要调优的参数
# 请尝试将L1正则和L2正则分开，并配合合适的优化求解算法（slover）
#tuned_parameters = {'penalty':['l1','l2'],
#                   'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]
#                   }
penaltys = ['l1','l2']
Cs = [0.001, 0.01, 100, 1000]
tuned_parameters = dict(penalty = penaltys, C = Cs)

lr_penalty= LogisticRegression()
grid= GridSearchCV(lr_penalty, tuned_parameters,cv=5, scoring='neg_log_loss')
grid.fit(X_train,y_train)

#print('grid.cv_results_',grid.cv_results_)
print('-grid.best_score_',-grid.best_score_)
print('grid.best_params_',grid.best_params_)

# plot CV误差曲线
test_means = -grid.cv_results_['mean_test_score']
test_stds = grid.cv_results_['std_test_score']
train_means = -grid.cv_results_['mean_train_score']
train_stds = grid.cv_results_['std_train_score']

# plot results
n_Cs = len(Cs)
number_penaltys = len(penaltys)
test_scores = np.array(test_means).reshape(n_Cs, number_penaltys)
train_scores = np.array(train_means).reshape(n_Cs, number_penaltys)
test_stds = np.array(test_stds).reshape(n_Cs, number_penaltys)
train_stds = np.array(train_stds).reshape(n_Cs, number_penaltys)

x_axis = np.log10(Cs)
for i, value in enumerate(penaltys):
    # pyplot.plot(log(Cs), test_scores[i], label= 'penalty:'   + str(value))
    pyplot.errorbar(x_axis, test_scores[:, i], yerr=test_stds[:, i], label=penaltys[i] + ' Test')
    pyplot.errorbar(x_axis, train_scores[:, i], yerr=train_stds[:, i], label=penaltys[i] + ' Train')

plt.legend()
plt.xlabel('log(C)')
plt.ylabel('logloss')
plt.title('LogisticGridSearchCV_C')
#plt.savefig('LogisticGridSearchCV_C.png')
plt.show()
