import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt

from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression,LassoCV,RidgeCV
from sklearn.linear_model.coordinate_descent import ConvergenceWarning
from sklearn.tree import DecisionTreeRegressor
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2

mpl.rcParams['font.sans-serif']=[u'simHei']
mpl.rcParams['axes.unicode_minus']=False

## 拦截异常
warnings.filterwarnings(action='ignore',category=ConvergenceWarning)

def not_empty(s):
    return s!=''

def main():
    names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT']
    path='../data/boston_housing.data'
    ## 由于数据文件格式不统一，所以读取的时候，先按照一行一个字段属性读取数据，然后再按照每行数据进行处理
    df=pd.read_csv(path,header=None)
    data=np.empty((len(df),14))
    for i,d in enumerate(df.values):
        d=map(float,filter(not_empty,d[0].split(' ')))
        data[i]=list(d)

    X,Y=np.split(data,(13,),axis=1)
    Y=Y.ravel()

    print('样本数据量:%d, 特征个数：%d' % X.shape)
    print('target样本数据量:%d' % X.shape[0])

    # 数据的分割，
    X_train,X_test,Y_train,Y_test=train_test_split(X,Y,train_size=0.8,random_state=14)
    X_train1,X_test1,Y_train1,Y_test1=X_train,X_test,Y_train,Y_test
    print('训练数据集样本数目：%d, 测试数据集样本数目：%d' %(X_train.shape[0],X_test.shape[0]))

    # 标准化
    mm=MinMaxScaler()
    X_train1=mm.fit_transform(X_train1,Y_train1)
    X_test1=mm.transform(X_test1)

    print('原始数据各个特征属性的调整最小值:',mm.min_)
    print('原始数据各个特征属性的缩放数据值:',mm.scale_)

    #构建模型（回归）
    model=DecisionTreeRegressor(criterion='mae',max_depth=7)
    # 模型训练
    model.fit(X_train1,Y_train1)
    # 模型预测
    Y_test_hat=model.predict(X_test1)
    # 评估模型
    score=model.score(X_test1,Y_test1)
    print('Score:',score)
    # 构建线性回归
    lr=LinearRegression()
    lr.fit(X_train1,Y_train1)
    lr_Y_test_hat=lr.predict(X_test1)
    lr_score=lr.score(X_test1,Y_test1)
    print('lr_score:', lr_score)
    # 构建lasso
    lasso=LassoCV(alphas=np.logspace(-3,1,20))
    lasso.fit(X_train1,Y_train1)
    lasso_Y_test_hat=lasso.predict(X_test1)
    lasso_score=lasso.score(X_test1,Y_test1)
    print('lasso:',lasso_score)
    # 构建岭回归
    ridge=RidgeCV(alphas=np.logspace(-3,1,20))
    ridge.fit(X_train1,Y_train1)
    ridge_Y_test_hat=ridge.predict(X_test1)
    ridge_score=ridge.score(X_test1,Y_test1)
    print('ridge:',ridge_score)

    ## 7. 画图
    ln_X_test = range(len(X_test1))
    plt.figure(figsize=(12,6),facecolor='w')
    plt.plot(ln_X_test,Y_test1,'r-',lw=2,label=u'实际值')
    plt.plot(ln_X_test,lr_Y_test_hat,'b-',lw=2,label='Linear回归，$R^2$=%.3f' % lr_score)
    plt.plot(ln_X_test,lasso_Y_test_hat,'y-',lw=2,label=u'Lasso回归，$R^2$=%.3f' % lasso_score)
    plt.plot(ln_X_test,ridge_Y_test_hat,'c-',lw=2,label=u'Ridge回归，$R^2$=%.3f' % ridge_score)
    plt.plot(ln_X_test,Y_test_hat,'g-',lw=2,label=u'回归决策树预测值，$R^2$=%.3f' %score)
    plt.xlabel(u'数据编码')
    plt.ylabel(u'租赁价格')
    plt.legend(loc='lower right')
    plt.grid(True)
    plt.title(u'波士顿房屋租赁数据预测')
    plt.show()

    # 参数优化
    pipes=[
        Pipeline([
            ('mms',MinMaxScaler()), ## 归一化操作
            ('pca',PCA()), ## 降纬
            ('decision',DecisionTreeRegressor(criterion='mse'))
        ]),
        Pipeline([
            ('mms',MinMaxScaler()),
            ('decision',DecisionTreeRegressor(criterion='mse'))
        ]),
        Pipeline([
            ('decision',DecisionTreeRegressor(criterion='mse'))
        ])
    ]

    # 参数
    parameters=[
        {
            'pca__n_components':[0.25,0.5,0.75,1],
            'decision__max_depth':np.linspace(1,20,20).astype(np.int8)
        },
        {
            'decision__max_depth':np.linspace(1,20,20).astype(np.int8)
        },
        {
            'decision__max_depth':np.linspace(1,20,20).astype(np.int8)
        }
    ]
    # 获取数据
    X_train2,X_test2,Y_train2,Y_test2=X_train,X_test,Y_train,Y_test
    for t in range(3):
        pipe=pipes[t]
        gscv=GridSearchCV(pipe,param_grid=parameters[t])
        gscv.fit(X_train2,Y_train2)
        print(t,'score值:',gscv.best_score_,'最优参数列表:',gscv.best_params_)

    # 使用最优参数看看正确率
    mms_best=MinMaxScaler()
    decision3=DecisionTreeRegressor(criterion='mse',max_depth=4)
    X_train3,X_test3,Y_train3,Y_test3=X_train,X_test,Y_train,Y_test
    X_train3=mms_best.fit_transform(X_train3, Y_train3)
    X_test3=mms_best.transform(X_test3)
    decision3.fit(X_train3,Y_train3)
    print('正确率：',decision3.score(X_test3,Y_test3))

    # 查看各个不同深度的错误率
    X_train4, X_test4, Y_train4, Y_test4 = X_train, X_test, Y_train, Y_test
    depths=np.arange(1,20)
    err_list=[]
    for d in depths:
        clf=DecisionTreeRegressor(criterion='mse',max_depth=d)
        clf.fit(X_train4,Y_train4)

        score1=clf.score(X_test4,Y_test4)
        err=1-score1
        err_list.append(err)
        print('%d深度，正确率%.5f' % (d,score1))
    ## 画图
    plt.figure(facecolor='w')
    plt.plot(depths,err_list,'ro-',lw=3)
    plt.xlabel(u'决策树深度',fontsize=16)
    plt.ylabel(u'错误率',fontsize=16)
    plt.grid(True)
    plt.title(u'决策树层次太多导致的拟合问题(欠拟合和过拟合)',fontsize=18)
    plt.show()


main()
