#coding:utf-8
import time
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt

from sklearn import metrics
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble.forest import RandomForestClassifier

class Model_Process:
    data_feature_file='../../doc/data/feature0a.csv'

    def train_predict(self):
        df=pd.read_csv(self.data_feature_file)
        Y=df.loan_status
        X=df.drop('loan_status',1,inplace=False)s
        X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.3,random_state=0)

        lr=LogisticRegression()
        start=time.time()
        lr.fit(X_train,Y_train)
        train_predict=lr.predict(X_train)
        train_f1=metrics.f1_score(train_predict,Y_train)
        train_acc=metrics.accuracy_score(train_predict,Y_train)
        train_rec=metrics.recall_score(train_predict,Y_train)
        print('逻辑回归模型上的效果入下：')
        print('在训练集上f1_mean的值为%.4f' % train_f1,end=' ')
        print('在训练集上的精确率的值为%.4f' % train_acc,end=' ')
        print('在训练集上的查全率的值为%.4f' % train_rec,end=' ')

        test_predict=lr.predict(X_test)
        test_f1=metrics.f1_score(test_predict,Y_test)
        test_acc=metrics.accuracy_score(test_predict,Y_test)
        test_rec=metrics.recall_score(test_predict,Y_test)
        print('在测试集上f1_mean的值为%.4f' % test_f1,end=' ')
        print('在训练集上的精确率的值为%.4f' % test_acc,end=' ')
        print('在训练集上的查全率的值为%.4f' % test_rec,end=' ')
        end=time.time()
        print(end-start)

        print('随机森林效果如下'+'='*30)
        rf=RandomForestClassifier()
        start=time.time()
        rf.fit(X_train,Y_train)
        train_predict=rf.predict(X_train)
        train_f1=metrics.f1_score(train_predict,Y_train)
        train_acc=metrics.accuracy_score(train_predict,Y_train)
        train_rec=metrics.recall_score(train_predict,Y_train)
        print('在训练集上f1_mean的值为%.4f' % train_f1, end=' ')
        print('在训练集上的精确率的值为%.4f' % train_acc, end=' ')
        print('在训练集上的查全率的值为%.4f' % train_rec, end=' ')

        test_predict=rf.predict(X_test)
        test_f1=metrics.f1_score(test_predict,Y_test)
        test_acc=metrics.accuracy_score(test_predict,Y_test)
        test_rec=metrics.recall_score(test_predict,Y_test)
        print('在测试集上f1_mean的值为%.4f' % test_f1, end=' ')
        print('在训练集上的精确率的值为%.4f' % test_acc, end=' ')
        print('在训练集上的查全率的值为%.4f' % test_rec, end=' ')
        end=time.time()
        print(end-start)

        print("GBDT上效果如下" + "=" * 30)
        gb=GradientBoostingClassifier()
        start=time.time()
        gb.fit(X_train,Y_train)
        train_predict=gb.predict(X_train)
        train_f1=metrics.f1_score(train_predict,Y_train)
        train_acc=metrics.accuracy_score(train_predict,Y_train)
        train_rec=metrics.recall_score(train_predict,Y_train)
        print('在训练集上f1_mean的值为%.4f' % train_f1, end=' ')
        print('在训练集上的精确率的值为%.4f' % train_acc, end=' ')
        print('在训练集上的查全率的值为%.4f' % train_rec, end=' ')

        test_predict=gb.predict(X_test)
        test_f1=metrics.f1_score(test_predict,Y_test)
        test_acc=metrics.accuracy_score(test_predict,Y_test)
        test_rec=metrics.recall_score(test_predict,Y_test)
        print('在测试集上f1_mean的值为%.4f' % test_f1, end=' ')
        print('在训练集上的精确率的值为%.4f' %  test_acc, end=' ')
        print('在训练集上的查全率的值为%.4f' % test_rec, end=' ')
        end=time.time()
        print(end-start)

        feature_inmportance=rf.feature_importances_
        feature_inmportance=100.0*(feature_inmportance/feature_inmportance.max())
        index=np.argsort(feature_inmportance)[-10:]
        plt.barh(np.arange(10),feature_inmportance[index],color='dodgerblue',alpha=0.4)
        print(np.array(X.columns)[index])
        plt.yticks(np.arange(10+0.25),np.array(X.columns)[index])
        plt.xlabel('Relative importance')
        plt.title('Top 10 Importance Variable')
        plt.show()