import numpy as np
import pandas as pd
import warnings
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
from scipy.special import jn
from IPython.display import display, clear_output
import time
warnings.filterwarnings('ignore')
# 模型预测
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import AdaBoostRegressor, BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
# 数据降维处理的
import lightgbm as lgb
import xgboost as xgb
# 参数搜索和评价
from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error
from ngboost import NGBRegressor
# 通过Pandas对数据进行读取
Train_data = pd.read_csv('D:\\机器学习\\used_car_train_20200313\\used_car_train_20200313.csv', sep=' ')
TestB_data = pd.read_csv('D:\\机器学习\\used_car_testB_20200421\\used_car_testB_20200421.csv', sep=' ')
# 输出数据的大小信息
print('Train date shape:',Train_data.shape)
print('TestB date shape:',TestB_data.shape)
# 通过.head()简要浏览读取数据的形式
Train_data.head()
# 通过.info()简要可以看到对应一些数据列名，以及NAN缺失信息
Train_data.info()
TestB_data.info() # 查看每一列类型和缺失值情况
## 通过.columnsc查看列名
Train_data.columns
## 通过.describe()可以查看数值特征列的一些统计信息
Train_data.describe()
TestB_data.describe()
numberical_cols = Train_data.select_dtypes(exclude= 'object' ).columns
print(numberical_cols)

plt.scatter(Train_data.SaleID,Train_data.price)
plt.ylabel("price")
plt.grid(True, which='major', axis='y')
plt.title("SaleID")
plt.show()

plt.scatter(Train_data.bodyType,Train_data.price)
plt.ylabel("price")
plt.grid(True, which='major', axis='y')
plt.title("bodyType")
plt.show()

plt.scatter(Train_data.fuelType,Train_data.price)
plt.ylabel("price")
plt.grid(True, which='major', axis='y')
plt.title("fuelType")
plt.show()

plt.scatter(Train_data.gearbox,Train_data.price)
plt.ylabel("price")
plt.grid(True, which='major', axis='y')
plt.title("gearbox")
plt.show()

plt.scatter(Train_data.power,Train_data.price)
plt.ylabel("price")
plt.grid(True, which='major', axis='y')
plt.title("power")
plt.show()
# 选择特征值
feature_cols = [col for col in numberical_cols if col not in ['SaleID','name','regDate','price','model','brand','regionCode']]

# 提前特征值，标签列构造训练样本和测试样本
X_data = Train_data[feature_cols]
Y_data = Train_data['price']

X_test = TestB_data[feature_cols]

print('X train shape:',X_data.shape)
print('X test shape:',X_test.shape)
#定义了一个统计函数，方便后续信息统计
def Sta_inf(data):
    print('_min',np.min(data))
    print('_max',np.max(data))
    print('_mean',np.mean(data))
    print('_ptp',np.ptp(data))
    print('_std',np.std(data))
    print('_var',np.var(data))
print('Sta of label:')
Sta_inf(Y_data)
## 绘制标签的统计图，查看标签分布
plt.hist(Y_data)
plt.show()
plt.close()
X_data = X_data.fillna(-1)
X_test = X_test.fillna(-1)
# 利用xgb进行五折交叉验证查看模型的参数效果
xgr = xgb.XGBRegressor(n_estimators=120,learning_rate=0.1,gamma=0,subsample=0.8, \
                       colsample_bytree=0.9,max_depth=7) # ,objecttive='reg:squarederroe'

scores_train = []
scores = []

# 5折交叉验证方法
sk = StratifiedKFold(n_splits=5, shuffle=True, random_state=0)
for train_ind, val_ind in sk.split(X_data, Y_data):
    train_x = X_data.iloc[train_ind].values
    train_y = Y_data.iloc[train_ind]
    val_x = X_data.iloc[val_ind].values
    val_y = Y_data.iloc[val_ind]

    xgr.fit(train_x, train_y)
    pred_train_xgb = xgr.predict(train_x)
    pred_xgb = xgr.predict(val_x)

    score_train = mean_absolute_error(train_y, pred_train_xgb)
    scores_train.append(score_train)
    score = mean_absolute_error(val_y, pred_xgb)
    scores.append(score)

print('Train mae:',np.mean(scores_train))
print('Val mae:',np.mean(scores))

def build_model_xgb(x_train, y_train):
    model = xgb.XGBRegressor(n_estimators=150,learning_rate=0.1,gamma=0,subsample=0.8,\
                             colsample_bytree=0.9,max_depth=7) # ,objecttive='reg:squarederroe'
    model.fit(x_train, y_train)
    return model

def build_model_lgb(x_train, y_train):
    estimator = lgb.LGBMRegressor(num_leaves=127, n_estimators=150)
    param_grid = {
        'learning_rate':[0.01, 0.05, 0.1, 0.2],
    }
    gbm = GridSearchCV(estimator,param_grid)
    gbm.fit(x_train, y_train)
    return gbm

def build_model_rf(x_train, y_train):
    # 随机森林回归模型参数设置
    rf = RandomForestRegressor(n_estimators=100, random_state=42, n_jobs=-1)
    rf.fit(x_train, y_train)
    return rf

# AdaBoost模型训练函数
def build_model_ada(x_train, y_train):
    model = AdaBoostRegressor(DecisionTreeRegressor(), n_estimators=50, random_state=0)
    model.fit(x_train, y_train)
    return model


# Bagging模型训练函数
def build_model_bag(x_train, y_train):
    # 在非常旧的scikit-learn版本中，可能不支持base_estimator参数
    model = BaggingRegressor(n_estimators=50, random_state=0)
    model.fit(x_train, y_train)
    return model

# 划分数据集为训练集和验证集
x_train, x_val, y_train, y_val = train_test_split(X_data, Y_data, test_size=0.3, random_state=0)

# 训练AdaBoost模型
print('Train AdaBoost...')
model_ada = build_model_ada(x_train, y_train)
val_ada = model_ada.predict(x_val)
MAE_ada = mean_absolute_error(y_val, val_ada)
print('MAE of val with AdaBoost:', MAE_ada)

print('Predict ada...')
model_ada_pre = build_model_ada(X_data, Y_data)
subA_ada = model_ada_pre.predict(X_test)
print('Sta of Predict ada:')
Sta_inf(subA_ada)
# 训练Bagging模型
print('Train Bagging...')
model_bag = build_model_bag(x_train, y_train)
val_bag = model_bag.predict(x_val)
MAE_bag = mean_absolute_error(y_val, val_bag)
print('MAE of val with Bagging:', MAE_bag)

print('Predict bag...')
model_bag_pre = build_model_bag(X_data, Y_data)
subA_bag = model_bag_pre.predict(X_test)
print('Sta of Predict bag:')
Sta_inf(subA_bag)
## Split data with val
x_train, x_val, y_train, y_val = train_test_split(X_data, Y_data, test_size=0.3)

print('Train rf...')
model_rf = build_model_rf(x_train, y_train)
val_rf = model_rf.predict(x_val)
MAE_rf = mean_absolute_error(y_val, val_rf)
print('MAE of val with rf:', MAE_rf)

print('Predict rf...')
model_rf_pre = build_model_rf(X_data, Y_data)
subA_rf = model_rf_pre.predict(X_test)
print('Sta of Predict rf:')
Sta_inf(subA_rf)
## Split data with val
x_train,x_val,y_train,y_val = train_test_split(X_data,Y_data,test_size=0.3)
print('Train lgb...')
model_lgb = build_model_lgb(x_train,y_train)
val_lgb = model_lgb.predict(x_val)
MAE_lgb = mean_absolute_error(y_val,val_lgb)
print('MAE of val with lgb:',MAE_lgb)

print('Predict lgb...')
model_lgb_pre = build_model_lgb(X_data,Y_data)
subA_lgb = model_lgb_pre.predict(X_test)
print('Sta of Predict lgb:')
Sta_inf(subA_lgb)
## Split data with val
print('Train xgb...')
model_xgb = build_model_xgb(x_train,y_train)
val_xgb = model_xgb.predict(x_val)
MAE_xgb = mean_absolute_error(y_val,val_xgb)
print('MAE of val with xgb:',MAE_xgb)

print('Predict xgb...')
model_xgb_pre = build_model_xgb(X_data,Y_data)
subA_xgb = model_xgb_pre.predict(X_test)
print('Sta of Predict xgb:')
Sta_inf(subA_xgb)

##这里采取简单的加权融合 rf和lgb
val_Weighted = (1-MAE_rf/(MAE_lgb+MAE_rf))*val_rf+(1-MAE_lgb/(MAE_lgb+MAE_rf))*val_lgb
val_Weighted[val_Weighted<0]=10 # 预测的最小值有负数，真实情况price不可能为负数
print('MAE of val with Weighted ensemble:',mean_absolute_error(y_val,val_Weighted))
sub_Weighted = (1-MAE_rf/(MAE_lgb+MAE_rf))*subA_rf+(1-MAE_lgb/(MAE_lgb+MAE_rf))*subA_lgb
'''
# 这里采取简单的加权融合 rf和ada
val_Weighted = (1-MAE_rf/(MAE_ada+MAE_rf))*val_rf+(1-MAE_ada/(MAE_ada+MAE_rf))*val_ada
val_Weighted[val_Weighted<0]=10 # 预测的最小值有负数，真实情况price不可能为负数
print('MAE of val with Weighted ensemble:',mean_absolute_error(y_val,val_Weighted))
sub_Weighted = (1-MAE_rf/(MAE_ada+MAE_rf))*subA_rf+(1-MAE_ada/(MAE_ada+MAE_rf))*subA_lgb
'''
# 查看预测值的统计进行
plt.hist(Y_data)
plt.show()
plt.close()
sub = pd.DataFrame()
sub['SaleID'] = X_test.index
sub['price'] = sub_Weighted
sub.to_csv('D:\机器学习\sub_Weighted4.csv',index=False)
sub.head()
'''
# 查看预测值的统计进行
plt.hist(Y_data)
plt.show()
plt.close()
sub = pd.DataFrame()
sub['SaleID'] = X_test.index
sub['price'] = sub_Weighted
sub.to_csv('D:\机器学习\sub_Weighted5.csv',index=False)
sub.head()
'''