#第1个机器学习的pythondemo，波士顿房价预测
#通过线性、向量机、KNN、决策树等算法实验
#2020-03-13 济南office by elvis
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
data = pd.read_csv("/Users/elvis/Documents/deep/数据集/boston/boston_housing.csv")
print(data.isnull().any().sum())
#df = pd.plotting.scatter_matrix(data,alpha=0.7,figsize=(10,10),diagonal='kde')
#data.info()
#data.describe()
#print ("hellp")
#fig = plt.figure();
#ax1 = fig.add_subplot(2,2,1)
#plt.show()
#寻找相关性特征，相关系数法？？这个方法的原理以及其他方法需要了解，如何确定高维数据中的相关特征
x = data[['CRIM','ZN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT']]
y = data[['MEDV']]
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
SelectKBest = SelectKBest(f_regression,k=3)
bestFeature = SelectKBest.fit_transform(x,y)
SelectKBest.get_support()
print(x.columns[SelectKBest.get_support()])
#主要特征的散点分布
features = data[['RM','PTRATIO','LSTAT']]
#pd.plotting.scatter_matrix(features,alpha=0.7,figsize=(6,6),diagonal='hist')

#特征归一化
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
for feature in features.columns:
	features[feature] = scaler.fit_transform(features[[feature]])
	#print(features[feature])

#font ={'family':'SimHei'}
#matplotlib.rc('font',**font)
#pd.plotting.scatter_matrix(features, alpha=0.7, figsize=(6,6), diagonal='hist')
#plt.show()
#数据集拆分
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(features, y, test_size=0.3,random_state=33)
#线性回归
from sklearn import linear_model
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_val_score
lr = linear_model.LinearRegression()
lr_predict = cross_val_predict(lr,x_train, y_train, cv=5)
lr_score = cross_val_score(lr, x_train, y_train, cv=5)
lr_meanscore = lr_score.mean()
print(lr_meanscore)

#SVR 支持向量机
from sklearn.svm import SVR
linear_svr = SVR(kernel = 'linear')
linear_svr_predict = cross_val_predict(linear_svr, x_train, y_train, cv=5)
linear_svr_score = cross_val_score(linear_svr, x_train, y_train, cv=5)
linear_svr_meanscore = linear_svr_score.mean()
print(linear_svr_meanscore)
poly_svr = SVR(kernel = 'poly')
poly_svr_predict = cross_val_predict(poly_svr, x_train, y_train, cv=5)
poly_svr_score = cross_val_score(poly_svr, x_train, y_train, cv=5)
poly_svr_meanscore = poly_svr_score.mean()
print(poly_svr_meanscore)
rbf_svr = SVR(kernel = 'rbf')
rbf_svr_predict = cross_val_predict(rbf_svr, x_train, y_train, cv=5)
rbf_svr_score = cross_val_score(rbf_svr, x_train, y_train, cv=5)
rbf_svr_meanscore = rbf_svr_score.mean()
print(rbf_svr_meanscore)

#KNN 首先要确定n-neighbors个数，逐个测试1~20，经观察发现>3之后开始回落，故取值为3
from sklearn.neighbors import KNeighborsRegressor
score=[]
for n_neighbors in range(1,21):
    knn = KNeighborsRegressor(n_neighbors, weights = 'uniform' )
    knn_predict = cross_val_predict(knn, x_train, y_train, cv=5)
    knn_score = cross_val_score(knn, x_train, y_train, cv=5)
    knn_meanscore = knn_score.mean()
    score.append(knn_meanscore)
plt.plot(score)
plt.xlabel('n-neighbors')
plt.ylabel('mean-score')
#plt.show()

n_neighbors=2 #确定值
knn = KNeighborsRegressor(n_neighbors, weights = 'uniform' )
knn_predict = cross_val_predict(knn, x_train, y_train, cv=5)
knn_score = cross_val_score(knn, x_train, y_train, cv=5)
knn_meanscore = knn_score.mean()
print(knn_meanscore)

#Decision Tree 决策树，同理knn也需要确定树的深度，轮询验证
#
#
from sklearn.tree import DecisionTreeRegressor
score=[]
for n in range(1,11):
    dtr = DecisionTreeRegressor(max_depth = n)
    dtr_predict = cross_val_predict(dtr, x_train, y_train, cv=5)
    dtr_score = cross_val_score(dtr, x_train, y_train, cv=5)
    dtr_meanscore = dtr_score.mean()
    score.append(dtr_meanscore)
plt.plot(np.linspace(1,10,10), score)
plt.xlabel('max_depth')
plt.ylabel('mean-score')


n=4 #深度取4
dtr = DecisionTreeRegressor(max_depth = n)
dtr_predict = cross_val_predict(dtr, x_train, y_train, cv=5)
dtr_score = cross_val_score(dtr, x_train, y_train, cv=5)
dtr_meanscore = dtr_score.mean()
print(dtr_meanscore)

#汇总各算法得分，综合评比一下
evaluating = {
        'lr':lr_score,
        'linear_svr':linear_svr_score,
        'poly_svr':poly_svr_score,
        'rbf_svr':rbf_svr_score,
        'knn':knn_score,
        'dtr':dtr_score
        }
evaluating = pd.DataFrame(evaluating)
#evaluating.plot.kde(alpha=0.6,figsize=(8,7))
evaluating.hist(color='k',alpha=0.6,figsize=(8,7))
#plt.show()
#参数调优，对于某些算法的参数进行微调以获得最高得分，该环节省略，通过更改惩罚系数等手段...
evaluating.mean().sort_values(ascending=False)
print(evaluating)
print("===========================")

#接下来对测试数据集进行预测
#rbf
rbf_svr.fit(x_train,y_train)
rbf_svr_y_predict = rbf_svr.predict(x_test)
rbf_svr_y_predict_score=rbf_svr.score(x_test, y_test)
#KNN
knn.fit(x_train,y_train)
knn_y_predict = knn.predict(x_test)
knn_y_predict_score = knn.score(x_test, y_test)
#poly_svr
poly_svr.fit(x_train,y_train)
poly_svr_y_predict = poly_svr.predict(x_test)
poly_svr_y_predict_score = poly_svr.score(x_test, y_test)
#dtr
dtr.fit(x_train, y_train)
dtr_y_predict = dtr.predict(x_test)
dtr_y_predict_score = dtr.score(x_test, y_test)
#lr
lr.fit(x_train, y_train)
lr_y_predict = lr.predict(x_test)
lr_y_predict_score = lr.score(x_test, y_test)
#linear_svr
linear_svr.fit(x_train, y_train)
linear_svr_y_predict = linear_svr.predict(x_test)
linear_svr_y_predict_score = linear_svr.score(x_test, y_test)
predict_score = {
        'lr':lr_y_predict_score,
        'linear_svr':linear_svr_y_predict_score,
        'poly_svr':poly_svr_y_predict_score,
        'rbf_svr':rbf_svr_y_predict_score,
        'knn':knn_y_predict_score,
        'dtr':dtr_y_predict_score
        }
predict_score = pd.DataFrame(predict_score, index=['score']).transpose()
predict_score.sort_values(by='score',ascending = False)
print(predict_score)


