# 不同模型的学习曲线 对比
import numpy as np
import pandas as pd
import os
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale, StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.metrics import confusion_matrix, accuracy_score, mean_squared_error, r2_score, roc_auc_score, roc_curve, classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from lightgbm import LGBMClassifier
from sklearn.model_selection import KFold
from sklearn.model_selection import validation_curve
from sklearn.model_selection import learning_curve

from sklearn.metrics import f1_score,precision_score,recall_score,roc_auc_score,accuracy_score,roc_curve
import matplotlib.pyplot as plt
from xgboost.sklearn import XGBClassifier
import lightgbm as lgb

data = pd.read_csv('../featureEngineering/featuredData.csv')
y = data['Outcome']
X = data.drop(['Outcome'],axis=1)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)

# 绘制学习曲线，以确定模型的状况
def plot_learning_curve(estimator, title, X, y,ylim=None,          cv=None,
                        train_sizes=np.linspace(.1, 1.0, 5)):

    '''
    画出data在某模型上的learning curve.
    参数解释
    ----------
    estimator : 你用的分类器。
    title : 表格的标题。
    X : 输入的feature，numpy类型
    y : 输入的target vector
    ylim : tuple格式的(ymin, ymax), 设定图像中纵坐标的最低点和最高点
    cv : 做cross-validation的时候，数据分成的份数，其中一份作为cv集，其余n-1份作为training(默认为3份)
    '''
    plt.figure()
    train_sizes, train_scores, test_scores = learning_curve(
        estimator, X, y, cv=5, n_jobs=1, train_sizes=train_sizes)
    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)
    plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
                     train_scores_mean + train_scores_std, alpha=0.1,
                     color="r")
    plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
                     test_scores_mean + test_scores_std, alpha=0.1, color="g")
    plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
             label="Training score")
    plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
             label="Cross-validation score")
    plt.xlabel("Training examples")
    plt.ylabel("Score")
    plt.legend(loc="best")
    plt.grid("on")
    if ylim:
        plt.ylim(ylim)
    plt.title(title)
    plt.show()

# 少样本的情况情况下绘出学习曲线
lr = LogisticRegression()
plot_learning_curve(lr, "lr",
                    X, y, ylim=(0.5, 1.01),
                    train_sizes=np.linspace(.1, 1.0, 5))
kr = KNeighborsClassifier()
plot_learning_curve(kr, "kr",
                    X, y, ylim=(0.5, 1.01),
                    train_sizes=np.linspace(.1, 1.0, 5))
svm =SVC()
plot_learning_curve(svm, "svm",
                    X, y, ylim=(0.5, 1.01),
                    train_sizes=np.linspace(.1, 1.0, 5))
forest = RandomForestClassifier()
plot_learning_curve(forest, "forest",
                    X, y, ylim=(0.5, 1.01),
                    train_sizes=np.linspace(.1, 1.0, 5))
tr = DecisionTreeClassifier()
plot_learning_curve(tr, "tr",
                    X, y, ylim=(0.5, 1.01),
                    train_sizes=np.linspace(.1, 1.0, 5))
Gbdt = GradientBoostingClassifier()
plot_learning_curve(Gbdt, "Gbdt",
                    X, y, ylim=(0.5, 1.01),
                    train_sizes=np.linspace(.1, 1.0, 5))
gbm = LGBMClassifier(learning_rate=0.01,max_depth=5,n_estimators=500,num_leaves=50,min_child_weight=12,gamma=0.35)
plot_learning_curve(gbm, "gbm",
                    X, y, ylim=(0.5, 1.01),
                    train_sizes=np.linspace(.1, 1.0, 5))
