﻿# -*- coding: utf-8 -*-
"""Predicting water comsumption
# created by yyh. in 2021/5/21.
# last modified  in 2021/6/8.

# using XGBoost and Sklearn Models
# Randomforest

版本说明：
"""
import lightgbm as lgb
import xgboost as xgb
import sklearn as skln
#import tensorflow as tf
import pandas as pd
import numpy as np

print(lgb.__version__)
print(xgb.__version__)
print(skln.__version__)
#print(tf.__version__)
print(pd.__version__)
print(np.__version__)
'''
3.1.0
1.2.1
0.24.1
1.12.0
1.0.3
1.18.1
exit(0)
'''

#%% 数据集处理

"""**Loading the data:**"""

# trainSet = pd.read_csv('data/train.csv')
# testSet = pd.read_csv('data/test.csv')

OUT_PATH ='./user_data/'
RESULT_PATH ='./prediction_result/'

trainSet = pd.read_csv('./raw_data/train.csv')
testSet = pd.read_csv('./raw_data/test.csv')

# 对比发现取Log后，预测效果略差！所以采用原值预测
trainSet['logA'] = trainSet['A厂']#.apply(lambda x: np.log1p(x))
trainSet['logB'] = trainSet['B厂']#.apply(lambda x: np.log1p(x))
# 假期包括春节（1）、国庆（2）
#trainSet['假期'] = trainSet['假期'].fillna(0)
trainSet['假期'] = 0
trainSet.loc[ (trainSet.index >= 43) & (trainSet.index<= 52),'假期'] = 1
trainSet.loc[ (trainSet.index >= 272) & (trainSet.index<= 279),'假期'] = 2
trainSet.loc[ (trainSet.index >= 395) & (trainSet.index<= 406),'假期'] = 1 #春节前增加2天
trainSet.loc[ (trainSet.index >= 637) & (trainSet.index<= 644),'假期'] = 2
trainSet.loc[ (trainSet.index >= 748) & (trainSet.index<= 838),'假期'] = 1 #新冠期间
trainSet.loc[ (trainSet.index >= 1003) & (trainSet.index<= 1010),'假期'] = 2


#trainSet.loc[ (trainSet.index >= 750) & (trainSet.index< 840),'logA'] = 1.05*trainSet['A厂']
#trainSet.loc[ (trainSet.index >= 750) & (trainSet.index< 840),'logB'] = 1.3*trainSet['B厂']
#trainSet.loc[ (trainSet.index >= 750) & (trainSet.index< 840),'logA'] = 1.05*trainSet['A厂']
#trainSet.loc[ (trainSet.index >= 750) & (trainSet.index< 840),'logB'] = 1.3*trainSet['B厂']
#trainSet.loc[ (trainSet.index >= 750) & (trainSet.index< 840),'logA'] = 1.05*trainSet['A厂']
#trainSet.loc[ (trainSet.index >= 750) & (trainSet.index< 840),'logB'] = 1.3*trainSet['B厂']

#新冠原因，放大2020年2月-4月的数据，主要是B厂。
# 对比发现效果略差。最后选择原始数据不变
#trainSet.loc[ (trainSet.index >= 750) & (trainSet.index< 840),'logA'] = 1.05*trainSet['A厂']
#trainSet.loc[ (trainSet.index >= 750) & (trainSet.index< 840),'logB'] = 1.3*trainSet['B厂']

trainSet['logC'] = trainSet['logA']+trainSet['logB']#.apply(lambda x: np.log1p(x))


testSet['logA'] = testSet['A厂']#.apply(lambda x: np.log1p(x))
testSet['logB'] = testSet['B厂']#.apply(lambda x: np.log1p(x))
testSet['logC'] = testSet['A厂']+testSet['B厂']#.apply(lambda x: np.log1p(x))
testSet['假期'] = 0 # testSet['假期'].fillna(0)
testSet.loc[ (testSet.index >= 102) & (testSet.index<= 106),'假期'] = 1
#trainSet.loc[ (trainSet.index >= 750) & (trainSet.index< 840),'logB'] = 1.3*trainSet['B厂']

#%% 时间处理
trainSet['year'] = pd.DatetimeIndex(trainSet['日期']).year
trainSet['dayofyear'] = pd.DatetimeIndex(trainSet['日期']).dayofyear
trainSet['month'] = pd.DatetimeIndex(trainSet['日期']).month
trainSet['day'] = pd.DatetimeIndex(trainSet['日期']).day
trainSet['week'] = pd.DatetimeIndex(trainSet['日期']).week
trainSet['weekday'] = pd.DatetimeIndex(trainSet['日期']).dayofweek
#季节参数，加入后A厂略差。选择不加
#trainSet['quarter']=pd.DatetimeIndex(trainSet['日期']).quarter

print('all data shape:',trainSet.shape)
print(trainSet.head(5))

testSet['year'] = pd.DatetimeIndex(testSet['日期']).year
testSet['dayofyear'] = pd.DatetimeIndex(testSet['日期']).dayofyear
testSet['month'] = pd.DatetimeIndex(testSet['日期']).month
testSet['day'] = pd.DatetimeIndex(testSet['日期']).day
testSet['week'] = pd.DatetimeIndex(testSet['日期']).week
testSet['weekday'] = pd.DatetimeIndex(testSet['日期']).dayofweek
#testSet['quarter']=pd.DatetimeIndex(testSet['日期']).quarter

timeColumn = testSet['日期']

testX = testSet.drop(['日期', 'A厂','B厂', 'logA','logB','logC'], axis=1)#values

#异常值处理，删除3西格玛异常值.对比后发现不删除效果更好!所以系数为4，实际未删除3西格玛 极大极小。
#trainSetWithoutOutliers = trainSet
trainSetWithoutOutliers =[]
trainSetWithoutOutliers.append(trainSet[np.abs(trainSet['logA'] - trainSet['logA'].mean())
                                   <= (4 * trainSet['logA'].std())])
trainSetWithoutOutliers.append(trainSet[np.abs(trainSet['logB'] - trainSet['logB'].mean())
                                   <= (4 * trainSet['logB'].std())])

# trainSetWithoutOutliers.append(trainSet[np.abs(trainSet['logA'] - trainSet['logA'].mean())
#                                    > (3 * trainSet['logA'].std())])
# trainSetWithoutOutliers.append(trainSet[np.abs(trainSet['logB'] - trainSet['logB'].mean())
#                                    > (3 * trainSet['logB'].std())])
# trainSetWithoutOutliers.append(trainSet[np.abs(trainSet['logB'] - trainSet['logB'].mean())
#                                    <= (2.7 * trainSet['logB'].std())])
'''
异常值
A厂:
2018/2/11
2018/2/12
2019/1/26
2019/1/27

            日期      A厂      B厂   假期  ...  month  day  week  weekday
41   2018/2/11  112335  126067  0.0  ...      2   11     6        6
42   2018/2/12   96845  120781  0.0  ...      2   12     7        0
390  2019/1/26  115940  204986  0.0  ...      1   26     4        5
391  2019/1/27  102598  197851  0.0  ...      1   27     4        6

B厂:
2018/2/16
2018/2/17
2018/2/18
2019/2/3
2019/2/4
2019/2/5
2019/2/6
2019/2/7
2019/2/8
2019/2/9
2020/1/25
2020/1/26
2020/1/27

  日期      A厂     B厂   假期  ...  month  day  week  weekday
46   2018/2/16  121030  81818  1.0  ...      2   16     7        4
47   2018/2/17  120634  83329  1.0  ...      2   17     7        5

[13 rows x 14 columns]

print(trainSetWithoutOutliers[0])
print(trainSetWithoutOutliers[1])
'''

#%% 空缺值填充
from sklearn.ensemble import RandomForestRegressor,RandomForestClassifier

#%% 导入库
from sklearn.model_selection import StratifiedKFold, TimeSeriesSplit
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
# from sklearn.tree import DecisionTreeRegressor, export_graphviz
# from numpy.random import RandomState
import lightgbm as lgb
import xgboost as xgb
from sklearn.metrics import make_scorer

#from catboost import CatBoostRegressor

#%%  交叉验证

submission = []
TData_X = []
TData_Y = []
for step in range (0,2): #A\B厂 2个数据列分别建模预测，假设A、B无关联
    if step==0 :
        # trainSetWithoutOutliers = trainSet[np.abs(trainSet['logA'] - trainSet['logA'].mean())
        #                                    <= (3 * trainSet['logA'].std())]
        # print('去异常值后 shape:', trainSetWithoutOutliers.shape)
        X = trainSetWithoutOutliers[step].drop(['日期', 'A厂','B厂', 'logA', 'logB','logC'], axis=1)#.values # 'A厂',
        Y = trainSetWithoutOutliers[step]['logA']#.values
        Y.columns = ['logA']
        #trainSet = trainSetWithoutOutliers
        colName = 'A厂'

    else :
        # trainSetWithoutOutliers = trainSet[np.abs(trainSet['logB'] - trainSet['logB'].mean())
        #                                    <= (3 * trainSet['logB'].std())]
        X = trainSetWithoutOutliers[step].drop(['日期', 'A厂','B厂', 'logA', 'logB','logC'], axis=1)#.values #'B厂',
        # print('去异常值后 shape:', trainSetWithoutOutliers.shape)
        #trainSet = trainSetWithoutOutliers
        Y = trainSetWithoutOutliers[step]['logB']
        Y.columns = ['logB']
        colName = 'B厂'

    TData_X.append(X)
    TData_Y.append(Y)

    #trainMatrix = xgb.DMatrix(X, label=Y)
    print('** training 训练集 shape {}:{} '.format(X.shape,colName))
    #print(X.head())

    seed = 666666 #本参数用于XGB、RF，影响模型结果。统一为一个random seed
    seed = 42
    seed = 666666


    # 定义评价指标
    # define MSE
    def loss_func(truth, prediction):
        # truth = np.expm1(truth)
        # prediction = np.expm1(prediction)
        # log1 = np.array([np.log(x + 1) for x in truth])
        # log2 = np.array([np.log(x + 1) for x in prediction])

        log1 = np.array([x for x in truth])
        log2 = np.array([x for x in prediction])

        return np.sqrt(np.mean((log1 - log2) ** 2))


    # define MAPE
    def mape(y_true, y_pred):
        """
        参数:
        y_true -- 测试集目标真实值
        y_pred -- 测试集目标预测值

        返回:
        mape -- MAPE 评价指标
        """
        n = len(y_true)
        mape = sum(np.abs((y_true - y_pred) / y_true)) / n * 100
        return mape


    """# Random Forest model"""

    param_grid = {
        'n_estimators': [50, 100, 300],
        # 'n_estimators': [50, 80, 100, 120],
        'max_depth': [4, 8, 16],
        'max_features': ['sqrt', 'log2', 'auto']
        # 'learning_rate':[0.1,0.06,0.03,0.01]
    }

    # scorer = make_scorer(loss_func, greater_is_better=False)
    #scorer = make_scorer(mape, greater_is_better=False)

    ## 5折交叉验证
    cv = 5#20
    k_fold = StratifiedKFold(n_splits=cv, shuffle=True, random_state=seed)

    ## 随机森林
    estimators = 40
    rg_rf = [RandomForestRegressor(random_state=seed, n_estimators=estimators) for i in range(cv)]

    ## Xgboost
    rg_xgb = [xgb.XGBRegressor(learning_rate=0.1, n_estimators=1000, objective='reg:squarederror') for i in range(cv)]

    ## LightGBM
    param = {
                      'boosting': 'gbdt',
                      'objective': 'regression',
                      'metric': 'mse',#''mse',
                      'max_depth':-1,
                      'num_leaves': 16,
                      'learning_rate': 0.04,
                      'feature_fraction': 0.8,
                      'bagging_fraction': 1,
                      'bagging_freq': 0,
                      'bagging_seed': seed ,
                      'min_data_in_leaf':3,
                      'verbose': -1}
    num_round = 1500
    rg_lgbm = []

    ## 保存交叉验证结果
    df_cv = [pd.DataFrame() for i in range(cv)]

    i = 0
    for train_index, cv_index in k_fold.split(np.zeros(len(X)), X['day']): #'day'
        X_train_fold, X_cv_fold = X.iloc[train_index,:], \
            X.iloc[cv_index,:]
        y_train_fold, y_cv_fold = Y.iloc[train_index], \
            Y.iloc[cv_index]

        #print(X_cv_fold.head())

        regr = lgb.LGBMRegressor(objective='regression', num_leaves=16,
                              learning_rate=0.1, n_estimators=100,metric='mse', min_child_samples=30) #,scoring=scorer neg_mean_squared_error

        rg_lgbm.append(regr)
        rg_lgbm[i].fit(X_train_fold, y_train_fold)

        y_cv_pred = pd.DataFrame(rg_lgbm[i].predict(X_cv_fold), index=X_cv_fold.index)
        y_cv_pred.columns = ['Count_lgbm']

        y_cv_fold.columns = [colName]
        y_cv_pred = pd.concat([y_cv_fold, y_cv_pred], axis=1)
        if step == 0:
            y_cv_pred.rename(columns={'logA':colName}, inplace=True)
        else :
            y_cv_pred.rename(columns={'logB':colName}, inplace=True) #logB

        rg_rf[i].fit(X_train_fold, y_train_fold)
        y_cv_pred['Count_rf'] = rg_rf[i].predict(X_cv_fold)

        rg_xgb[i].fit(X_train_fold, y_train_fold)
        y_cv_pred['Count_xgb'] = rg_xgb[i].predict(X_cv_fold)
        #合并结果
        df_cv[i] = pd.concat([X_cv_fold, y_cv_pred], axis=1)
        # 后调整!
        # if step == 1:
        #     df_cv[i].loc[df_cv[i].假期 == 1, 'Count_lgbm'] = df_cv[i]['Count_lgbm'] * 0.8
        #     df_cv[i].loc[df_cv[i].假期 == 1, 'Count_xgb'] = df_cv[i]['Count_xgb'] * 0.8
        #     df_cv[i].loc[df_cv[i].假期 == 1, 'Count_rf'] = df_cv[i]['Count_rf'] * 0.8
        #     df_cv[i].loc[df_cv[i].假期 == 2, 'Count_lgbm'] = df_cv[i]['Count_lgbm'] * 1.4
        #     df_cv[i].loc[df_cv[i].假期 == 2, 'Count_xgb'] = df_cv[i]['Count_xgb'] * 1.4
        #     df_cv[i].loc[df_cv[i].假期 == 2, 'Count_rf'] = df_cv[i]['Count_rf'] * 1.4

        i=i+1

    df_cv2 = pd.concat(df_cv, axis=0)

    df_cv2['mae_rf'] = abs(df_cv2['Count_rf']-df_cv2[colName])
    mae_mean = round(df_cv2['mae_rf'].mean(),2)
    df_cv2['mape_rf'] = abs(df_cv2['Count_rf']-df_cv2[colName]) / df_cv2[colName]
    mape_mean = round(df_cv2['mape_rf'].mean(),4)
    print('训练集随机森林交叉验证Mae: {}, MAPE: {},score:{}'.format(mae_mean, mape_mean,1/(1+mape_mean)))

    df_cv2['mae_xgb'] = abs(df_cv2['Count_xgb']-df_cv2[colName])
    mae_mean = round(df_cv2['mae_xgb'].mean(),2)
    df_cv2['mape_xgb'] = abs(df_cv2['Count_xgb']-df_cv2[colName]) / df_cv2[colName]
    mape_mean = round(df_cv2['mape_xgb'].mean(),4)
    print('训练集Xgboost交叉验证Mae: {}, MAPE: {},score :{}'.format(mae_mean, mape_mean,1/(1+mape_mean)))

    df_cv2['mae_lgbm'] = abs(df_cv2['Count_lgbm']-df_cv2[colName])
    mae_mean = round(df_cv2['mae_lgbm'].mean(),2)
    df_cv2['mape_lgbm'] = abs(df_cv2['Count_lgbm']-df_cv2[colName]) / df_cv2[colName]
    mape_mean = round(df_cv2['mape_lgbm'].mean(),4)
    print('训练集Lightgbm交叉验证Mse: {}, MAPE: {},score:{}'.format(mae_mean, mape_mean,1/(1+mape_mean)))

    #融合
    w1,w2,w3 = 0.3,0.3,0.4
    df_cv2['Count_rh_1'] = w1*df_cv2['Count_lgbm']+w2*df_cv2['Count_rf']+w3*df_cv2['Count_xgb']
    df_cv2['mae_Count_rh_1'] = abs(df_cv2['Count_rh_1']-df_cv2[colName])
    mae_mean = round(df_cv2['mae_Count_rh_1'].mean(),2)
    df_cv2['mape_Count_rh_1'] = abs(df_cv2['Count_rh_1']-df_cv2[colName]) / df_cv2[colName]
    mape_mean = round(df_cv2['mape_Count_rh_1'].mean(),4)
    print('融合 XGB（0.4），交叉验证Mae: {}, MAPE: {},score:{}'.format(mae_mean, mape_mean,1/(1+mape_mean)))

    w1,w2,w3 = 0.3,0.4,0.3
    df_cv2['Count_rh_2'] = w1*df_cv2['Count_lgbm']+w2*df_cv2['Count_rf']+w3*df_cv2['Count_xgb']
    df_cv2['mae_Count_rh_2'] = abs(df_cv2['Count_rh_2']-df_cv2[colName])
    mae_mean = round(df_cv2['mae_Count_rh_2'].mean(),2)
    df_cv2['mape_Count_rh_2'] = abs(df_cv2['Count_rh_2']-df_cv2[colName]) / df_cv2[colName]
    mape_mean = round(df_cv2['mape_Count_rh_2'].mean(),4)
    print('融合Lightgbm（0.4） 交叉验证Mae: {}, MAPE: {},score:{}'.format(mae_mean, mape_mean,1/(1+mape_mean)))

    #w1,w2,w3 = 0.4,0.3,0.3
    w1, w2, w3 = 0.0, 0.5, 0.5
    df_cv2['Count_rh_3'] = w1*df_cv2['Count_lgbm']+w2*df_cv2['Count_rf']+w3*df_cv2['Count_xgb']
    df_cv2['mae_Count_rh_3'] = abs(df_cv2['Count_rh_3']-df_cv2[colName])
    mae_mean = round(df_cv2['mae_Count_rh_3'].mean(),2)
    df_cv2['mape_Count_rh_3'] = abs(df_cv2['Count_rh_3']-df_cv2[colName]) / df_cv2[colName]
    mape_mean = round(df_cv2['mape_Count_rh_3'].mean(),4)
    print('融合RF（0.4） 交叉验证Mae: {}, MAPE: {},score:{}'.format(mae_mean, mape_mean,1/(1+mape_mean)))

    df_cv2.to_csv(OUT_PATH+str(step)+'_valid.csv', index=True,encoding='utf8')

    #continue

    #exit(1)
    #%% 结果
    #y_pred = pd.DataFrame(submit_id, index=pd.to_datetime(df_16['timestamp']), columns=['id'])
    y_pred = pd.DataFrame({
        "日期": timeColumn
        #colName: pred
    })

    y_pred_1 = pd.DataFrame(np.mean(np.array([r.predict(testX) for r in rg_rf]).T, axis=1),index=testX.index)
    y_pred_1.columns=['Count_rf']
    y_pred_1.loc[y_pred_1.Count_rf<0, 'Count_rf']=0

    y_pred_1['Count_rf']=y_pred_1['Count_rf'].round()
    # 后调整!隐藏
    # if step == 0:
    #     y_pred_1.loc[(y_pred_1.index <9 ), 'Count_rf'] = y_pred_1['Count_rf']*0.95
    #     y_pred_1.loc[(y_pred_1.index >=10)  &  (y_pred_1.index <= 61), 'Count_rf']=y_pred_1['Count_rf']*0.96
    # if step == 1:
    #     y_pred_1.loc[(y_pred_1.index >=21)  &  (y_pred_1.index <= 24), 'Count_rf']=y_pred_1['Count_rf']*1.1
    #     y_pred_1.loc[(y_pred_1.index >=101)  &  (y_pred_1.index <= 108), 'Count_rf']=y_pred_1['Count_rf']*0.8
        #y_pred_1.loc[y_pred_1.index >= 108, 'Count_rf']=y_pred_1['Count_rf']*1.4
        #y_pred_1.loc[(y_pred_1.index >=128)  &  (y_pred_1.index <= 139), 'Count_rf']=y_pred_1['Count_rf']*1.1


    y_pred_2 = pd.DataFrame(np.mean(np.array([r.predict(testX) for r in rg_lgbm]).T, axis=1),index=testX.index)
    y_pred_2.columns=['Count_lgbm']
    #y_pred_2.loc[y_pred_2.Count_lgbm<0, 'Count_lgbm']=0
    y_pred_2['Count_lgbm']=y_pred_2['Count_lgbm'].round()

    # 后调整!
    # if step == 0:
    #     y_pred_2.loc[(y_pred_2.index <25 ), 'Count_lgbm'] = y_pred_2['Count_lgbm']*1.12
    #     y_pred_2.loc[(y_pred_2.index >=40)  &  (y_pred_2.index <= 51), 'Count_lgbm']=y_pred_2['Count_lgbm']*0.95
    #     y_pred_2.loc[(y_pred_2.index >=52)  &  (y_pred_2.index <= 61), 'Count_lgbm']=y_pred_2['Count_lgbm']*0.85
    #     y_pred_2.loc[(y_pred_2.index >=65)  &  (y_pred_2.index <= 73), 'Count_lgbm']=y_pred_2['Count_lgbm']*0.95
    #     y_pred_2.loc[(y_pred_2.index >= 82) & (y_pred_2.index <= 106), 'Count_lgbm'] = y_pred_2['Count_lgbm'] * 1.15
    #     y_pred_2.loc[(y_pred_2.index >= 107) & (y_pred_2.index <= 113), 'Count_lgbm'] = y_pred_2['Count_lgbm'] * 0.95
    #     y_pred_2.loc[(y_pred_2.index >= 120), 'Count_lgbm'] = y_pred_2['Count_lgbm'] * 1.25
    # if step == 1:
    #     y_pred_2.loc[(y_pred_2.index <6 ), 'Count_lgbm'] = y_pred_2['Count_lgbm']*0.9
    #     y_pred_2.loc[(y_pred_2.index >=7)  &  (y_pred_2.index <= 27), 'Count_lgbm']=y_pred_2['Count_lgbm']*1.3
    #     y_pred_2.loc[(y_pred_2.index >=53)  &  (y_pred_2.index <= 61), 'Count_lgbm']=y_pred_2['Count_lgbm']*1.15
    #     y_pred_2.loc[(y_pred_2.index >=62)  &  (y_pred_2.index <= 64), 'Count_lgbm']=y_pred_2['Count_lgbm']*0.95
    #     y_pred_2.loc[(y_pred_2.index >=75)  &  (y_pred_2.index <= 89), 'Count_lgbm']=y_pred_2['Count_lgbm']*0.88
    #     y_pred_2.loc[(y_pred_2.index >=90)  &  (y_pred_2.index <= 93), 'Count_lgbm']=y_pred_2['Count_lgbm']*1.2
    #     y_pred_2.loc[(y_pred_2.index >=102)  &  (y_pred_2.index <= 106), 'Count_lgbm']=y_pred_2['Count_lgbm']*1.55
    #     y_pred_2.loc[(y_pred_2.index >=114)  &  (y_pred_2.index <= 124), 'Count_lgbm']=y_pred_2['Count_lgbm']*1.8
    #     y_pred_2.loc[y_pred_2.index >= 125, 'Count_lgbm']=y_pred_2['Count_lgbm']*1.25


    y_pred_3 = pd.DataFrame(np.mean(np.array([r.predict(testX) for r in rg_xgb]).T, axis=1),index=testX.index)
    y_pred_3.columns=['Count_xgb']
    #y_pred_3.loc[y_pred_3.Count_xgb<0, 'Count_xgb']=0
    y_pred_3['Count_xgb']=(y_pred_3['Count_xgb']).round()
    # 后调整!隐藏
    # if step == 0:
    #     y_pred_3.loc[(y_pred_3.index <9 ), 'Count_xgb'] = y_pred_3['Count_xgb']*0.95
    #     y_pred_3.loc[(y_pred_3.index >=10)  &  (y_pred_3.index <= 61), 'Count_xgb']=y_pred_3['Count_xgb']*0.96
    # if step == 1:
    #     y_pred_3.loc[(y_pred_3.index >=21)  &  (y_pred_3.index <= 24), 'Count_xgb']=y_pred_3['Count_xgb']*1.1
    #     y_pred_3.loc[(y_pred_3.index >=101)  & (y_pred_3.index <= 108), 'Count_xgb']=y_pred_3['Count_xgb']*0.8
        #y_pred_3.loc[y_pred_3.index >= 108 , 'Count_xgb'] = y_pred_3['Count_xgb'] * 1.4
        #y_pred_3.loc[(y_pred_3.index >= 128) & (y_pred_3.index <= 139), 'Count_xgb'] = y_pred_3['Count_xgb'] * 1.1

    y_pred = pd.merge(y_pred, y_pred_1, left_index=True, right_index=True, how='left')
    y_pred = pd.merge(y_pred, y_pred_2, left_index=True, right_index=True, how='left')
    y_pred = pd.merge(y_pred, y_pred_3, left_index=True, right_index=True, how='left')

    # w1,w2,w3 = 0.4,0.3,0.3
    # y_pred['Count_rh'] = w1*y_pred['Count_lgbm']+w2*y_pred['Count_rf']+w3*y_pred['Count_xgb']
    #根据前面验证，coount2的融合比例最好。
    #w1, w2, w3 = 0.2,0.6,0.2
    w1, w2, w3 = 0.4,0.3,0.3

    #w1, w2, w3 = 0.0,0.5,0.5
    y_pred['Count_rh'] = w1 * y_pred['Count_lgbm'] + w2 * y_pred['Count_rf'] + w3 * y_pred['Count_xgb']

    if step == 0:
        y_pred.loc[(y_pred.index <8 ), 'Count_rh'] = y_pred['Count_rh']*1.1
        y_pred.loc[(y_pred.index >=8) & (y_pred.index <= 27), 'Count_rh'] = y_pred['Count_rh']*1.05
        y_pred.loc[(y_pred.index >=40)  &  (y_pred.index <= 51), 'Count_rh']=y_pred['Count_rh']*0.95
        y_pred.loc[(y_pred.index >=52)  &  (y_pred.index <= 60), 'Count_rh']=y_pred['Count_rh']*0.85
        y_pred.loc[(y_pred.index >=61)  &  (y_pred.index < 62), 'Count_rh']=y_pred['Count_rh']*1.15
        y_pred.loc[(y_pred.index >=65)  &  (y_pred.index <= 71), 'Count_rh']=y_pred['Count_rh']*0.95
        y_pred.loc[(y_pred.index >=72)  &  (y_pred.index <= 81), 'Count_rh']=y_pred['Count_rh']*1.05
        y_pred.loc[(y_pred.index >= 82) & (y_pred.index <= 104), 'Count_rh'] = y_pred['Count_rh'] * 1.15
        y_pred.loc[(y_pred.index >= 107) & (y_pred.index <= 116), 'Count_rh'] = y_pred['Count_rh'] * 0.9
        y_pred.loc[(y_pred.index >= 120), 'Count_rh'] = y_pred['Count_rh'] * 1.1
        y_pred.loc[(y_pred.index >= 141), 'Count_rh'] = y_pred['Count_rh'] * 1.05
    if step == 1:
        y_pred.loc[(y_pred.index <6 ), 'Count_rh'] = y_pred['Count_rh']*0.95
        y_pred.loc[(y_pred.index >=7)  &  (y_pred.index <= 27), 'Count_rh']=y_pred['Count_rh']*1.15
        y_pred.loc[(y_pred.index >=36)  &  (y_pred.index <= 52), 'Count_rh']=y_pred['Count_rh']*1.1
        y_pred.loc[(y_pred.index >=53)  &  (y_pred.index <= 61), 'Count_rh']=y_pred['Count_rh']*1.15
        y_pred.loc[(y_pred.index >=61)  &  (y_pred.index <= 63), 'Count_rh']=y_pred['Count_rh']*0.88
        y_pred.loc[(y_pred.index >=73)  &  (y_pred.index <= 89), 'Count_rh']=y_pred['Count_rh']*0.9
        y_pred.loc[(y_pred.index >=102)  &  (y_pred.index <= 106), 'Count_rh']=y_pred['Count_rh']*1.2
        y_pred.loc[(y_pred.index >=114)  &  (y_pred.index <= 124), 'Count_rh']=y_pred['Count_rh']*1.3
        y_pred.loc[(y_pred.index >=125)  &  (y_pred.index <= 127), 'Count_rh']=y_pred['Count_rh']*1.2
        y_pred.loc[y_pred.index >= 128, 'Count_rh']=y_pred['Count_rh']*1.1

    w1, w2, w3 = 0.1,0.5,0.4
    y_pred['Count_rh2'] = w1 * y_pred['Count_lgbm'] + w2 * y_pred['Count_rf'] + w3 * y_pred['Count_xgb']

    #w1,w2,w3,w4 = 0.3,0.3,0.25,0.15
    #y_pred['Count_rh'] = w1*y_pred['Count_lgbm']+w2*y_pred['Count_xgb']+w3*y_pred['Count_catb']+w4*y_pred['Count_rf']
    # 各模型结果输出
    y_pred[['日期','Count_rh']].to_csv(OUT_PATH+str(step)+'submit_rh_log.csv',index= False)
    y_pred[['日期','Count_lgbm']].to_csv(OUT_PATH+str(step)+'submit_lgbm_log.csv',index= False)
    y_pred[['日期','Count_rf','Count_rf']].to_csv(OUT_PATH+str(step)+'submit_rf_log.csv',index= False)
    y_pred[['日期','Count_xgb']].to_csv(OUT_PATH+str(step)+'submit_xgb_log.csv',index= False)
    #模型结果合并
    y_pred.to_csv(OUT_PATH+str(step)+'submit.csv',index= False)

    if step == 0:
        submission = pd.DataFrame({
            "日期": timeColumn,
            colName:y_pred['Count_rh']
        })
        submission[colName] = submission[colName].astype("int")
        pred_all = submission

        submission = pd.DataFrame({
            "日期": timeColumn,
            colName:y_pred['Count_rf']
        })
        submission[colName] = submission[colName].astype("int")
        pred_all_rf = submission

        submission = pd.DataFrame({
            "日期": timeColumn,
            colName:y_pred['Count_lgbm']
        })
        submission[colName] = submission[colName].astype("int")
        pred_all_lgbm = submission

        submission = pd.DataFrame({
            "日期": timeColumn,
            colName:y_pred['Count_xgb']
        })
        submission[colName] = submission[colName].astype("int")
        pred_all_xgb = submission
    else :
        #submission[colName] = submission[colName].astype("int")
        pred_all[colName] = y_pred['Count_rh'].astype("int")
        pred_all_rf[colName] = y_pred['Count_rf'].astype("int")
        pred_all_lgbm[colName] = y_pred['Count_lgbm'].astype("int")
        pred_all_xgb[colName] = y_pred['Count_xgb'].astype("int")

### End Of Adding ##################################################################

#exit(1)
pred_all.to_csv(RESULT_PATH+ 'result_1.csv', index=False)
trainSet.to_csv(OUT_PATH+ 'train1.csv', index=False)
#pred_all.to_csv(OUT_PATH+str(step)+ 'submit.csv', index=False)
pred_all_rf.to_csv(OUT_PATH+str(step)+ 'submit_rf.csv', index=False)
pred_all_lgbm.to_csv(OUT_PATH+str(step)+ 'submit_lgbm.csv', index=False)
pred_all_xgb.to_csv(OUT_PATH+str(step)+ 'submit_xgb.csv', index=False)

print('成功生成结果文件：result_1.csv  !')

'''
seed = 42
训练集随机森林交叉验证Mae: 10118.67, MAPE: 0.0469,score:0.9552010698251983
训练集Xgboost交叉验证Mae: 11272.13, MAPE: 0.0516,score :0.9509319132750095
训练集Lightgbm交叉验证Mse: 13870.07, MAPE: 0.0654,score:0.9386146048432514

训练集随机森林交叉验证Mae: 8067.54, MAPE: 0.0417,score:0.9599692809830085
训练集Xgboost交叉验证Mae: 7914.4, MAPE: 0.0407,score :0.9608917075045643
训练集Lightgbm交叉验证Mse: 10971.89, MAPE: 0.0576,score:0.94553706505295

** training 训练集 shape (1031, 6):A厂 
训练集随机森林交叉验证Mae: 10118.67, MAPE: 0.0469,score:0.9552010698251983
训练集Xgboost交叉验证Mae: 11272.13, MAPE: 0.0516,score :0.9509319132750095
训练集Lightgbm交叉验证Mse: 13870.07, MAPE: 0.0654,score:0.9386146048432514
融合 XGB交叉验证Mse: 11003.32, MAPE: 0.0511,score:0.9513842641042718
融合Lightgbm 交叉验证Mse: 10891.15, MAPE: 0.0507,score:0.9517464547444561
融合RF 交叉验证Mse: 11272.17, MAPE: 0.0525,score:0.9501187648456058
** training 训练集 shape (1022, 6):B厂 
训练集随机森林交叉验证Mae: 8067.54, MAPE: 0.0417,score:0.9599692809830085
训练集Xgboost交叉验证Mae: 7914.4, MAPE: 0.0407,score :0.9608917075045643
训练集Lightgbm交叉验证Mse: 10971.89, MAPE: 0.0576,score:0.94553706505295
融合 XGB交叉验证Mse: 8306.85, MAPE: 0.0432,score:0.9585889570552149
融合Lightgbm 交叉验证Mse: 8323.29, MAPE: 0.0433,score:0.9584970765839165
融合RF 交叉验证Mse: 8616.31, MAPE: 0.0449,score:0.9570293808019906

seed=666666
** training 训练集 shape (1031, 6):A厂 
训练集随机森林交叉验证Mae: 10252.3, MAPE: 0.0476,score:0.9545628102329132
训练集Xgboost交叉验证Mae: 11497.07, MAPE: 0.0523,score :0.9502993442934524
训练集Lightgbm交叉验证Mse: 13573.0, MAPE: 0.0637,score:0.940114693992667
融合 XGB交叉验证Mse: 11027.85, MAPE: 0.051,score:0.9514747859181732
融合Lightgbm 交叉验证Mse: 10899.95, MAPE: 0.0506,score:0.9518370454978108
融合RF 交叉验证Mse: 11248.84, MAPE: 0.0522,score:0.9503896597605018
** training 训练集 shape (1022, 6):B厂 
训练集随机森林交叉验证Mae: 7712.8, MAPE: 0.0397,score:0.9618159084351254
训练集Xgboost交叉验证Mae: 7879.2, MAPE: 0.0412,score :0.9604302727621975
训练集Lightgbm交叉验证Mse: 10918.05, MAPE: 0.0572,score:0.9458948164964057
融合 XGB交叉验证Mse: 8107.18, MAPE: 0.0422,score:0.9595087315294569
融合Lightgbm 交叉验证Mse: 8093.13, MAPE: 0.0421,score:0.9596008060646771
融合RF 交叉验证Mse: 8416.9, MAPE: 0.0439,score:0.9579461634256154

----- CV 10 ，结果好些 ---------------------
训练集随机森林交叉验证Mae: 9785.73, MAPE: 0.0454,score:0.9565716472163764
训练集Xgboost交叉验证Mae: 10873.85, MAPE: 0.0498,score :0.9525623928367307
训练集Lightgbm交叉验证Mse: 13176.62, MAPE: 0.0616,score:0.9419743782969102
融合 XGB（0.4），交叉验证Mae: 10511.59, MAPE: 0.0488,score:0.9534706331045004
融合Lightgbm（0.4） 交叉验证Mae: 10395.38, MAPE: 0.0483,score:0.9539254030334827
融合RF（0.4） 交叉验证Mae: 10734.92, MAPE: 0.0499,score:0.9524716639679969
** training 训练集 shape (1022, 6):B厂 
训练集随机森林交叉验证Mae: 9666.33, MAPE: 0.0451,score:0.9568462348100661
训练集Xgboost交叉验证Mae: 10932.61, MAPE: 0.0503,score :0.9521089212605922
训练集Lightgbm交叉验证Mse: 13047.06, MAPE: 0.0614,score:0.9421518748822311
融合 XGB（0.4），交叉验证Mae: 10374.86, MAPE: 0.0484,score:0.9538344143456696
融合Lightgbm（0.4） 交叉验证Mae: 10259.85, MAPE: 0.0479,score:0.95428953144384
融合RF（0.4） 交叉验证Mae: 10587.2, MAPE: 0.0495,score:0.9528346831824678

-- CV=20 -----
** training 训练集 shape (1031, 6):A厂 
训练集随机森林交叉验证Mae: 9562.99, MAPE: 0.0445,score:0.9573958831977023
训练集Xgboost交叉验证Mae: 10618.8, MAPE: 0.0487,score :0.9535615523982074
训练集Lightgbm交叉验证Mse: 13046.06, MAPE: 0.0611,score:0.9424182452172275
融合 XGB（0.4），交叉验证Mae: 10284.39, MAPE: 0.0478,score:0.954380606986066
融合Lightgbm（0.4） 交叉验证Mae: 10176.94, MAPE: 0.0474,score:0.9547450830628221
融合RF（0.4） 交叉验证Mae: 10530.94, MAPE: 0.0491,score:0.9531979792202842
** training 训练集 shape (1022, 6):B厂 
训练集随机森林交叉验证Mae: 9310.11, MAPE: 0.0434,score:0.9584052137243626
训练集Xgboost交叉验证Mae: 10630.71, MAPE: 0.0486,score :0.9536524890329964
训练集Lightgbm交叉验证Mse: 12561.42, MAPE: 0.0588,score:0.9444654325651681
融合 XGB（0.4），交叉验证Mae: 10107.78, MAPE: 0.047,score:0.9551098376313276
融合Lightgbm（0.4） 交叉验证Mae: 9968.24, MAPE: 0.0464,score:0.9556574923547401
融合RF（0.4） 交叉验证Mae: 10289.32, MAPE: 0.0479,score:0.95428953144384


训练集随机森林交叉验证Mae: 14960.11, MAPE: 0.0731,score:0.9318796011555307
训练集Xgboost交叉验证Mae: 15659.77, MAPE: 0.0761,score :0.9292816652727441
训练集Lightgbm交叉验证Mse: 17328.76, MAPE: 0.0844,score:0.9221689413500553
融合 XGB（0.4），交叉验证Mae: 15152.95, MAPE: 0.0736,score:0.9314456035767512
融合Lightgbm（0.4） 交叉验证Mae: 15074.55, MAPE: 0.0733,score:0.9317059536010436
融合RF（0.4） 交叉验证Mae: 15326.22, MAPE: 0.0745,score:0.9306654257794322


** training 训练集 shape (1031, 8):A厂 
训练集随机森林交叉验证Mae: 17615.59, MAPE: 0.0843,score:0.9222539887485013
训练集Xgboost交叉验证Mae: 20056.69, MAPE: 0.094,score :0.9140767824497257
训练集Lightgbm交叉验证Mse: 22397.5, MAPE: 0.1053,score:0.9047317470370035
融合 XGB（0.4），交叉验证Mae: 19179.84, MAPE: 0.0907,score:0.9168423947923352
融合Lightgbm（0.4） 交叉验证Mae: 18938.85, MAPE: 0.0897,score:0.9176837661741762
融合RF（0.4） 交叉验证Mae: 19384.83, MAPE: 0.0917,score:0.9160025648071816
** training 训练集 shape (1015, 8):B厂 
训练集随机森林交叉验证Mae: 14024.74, MAPE: 0.068,score:0.9363295880149812
训练集Xgboost交叉验证Mae: 14535.86, MAPE: 0.0699,score :0.9346667912889054
训练集Lightgbm交叉验证Mse: 16613.15, MAPE: 0.0803,score:0.9256687957048968
融合 XGB（0.4），交叉验证Mae: 14308.29, MAPE: 0.069,score:0.9354536950420954
融合Lightgbm（0.4） 交叉验证Mae: 14239.06, MAPE: 0.0688,score:0.9356287425149701
融合RF（0.4） 交叉验证Mae: 14498.99, MAPE: 0.07,score:0.9345794392523364

0.4，0，3，0.3
** training 训练集 shape (1033, 8):B厂 
训练集随机森林交叉验证Mae: 12986.94, MAPE: 0.0622,score:0.9414422895876482
训练集Xgboost交叉验证Mae: 13536.31, MAPE: 0.0649,score :0.9390553103577801
训练集Lightgbm交叉验证Mse: 15394.37, MAPE: 0.0744,score:0.9307520476545048
融合 XGB（0.4），交叉验证Mae: 13397.6, MAPE: 0.0638,score:0.9400263207369806
融合Lightgbm（0.4） 交叉验证Mae: 13352.6, MAPE: 0.0636,score:0.940203083866115
融合RF（0.4） 交叉验证Mae: 13590.11, MAPE: 0.0648,score:0.9391435011269722
集成 OK !

0.5/0.5
训练集随机森林交叉验证Mae: 10363.52, MAPE: 0.0487,score:0.9535615523982074
训练集Xgboost交叉验证Mae: 11802.21, MAPE: 0.0543,score :0.9484966328369534
训练集Lightgbm交叉验证Mse: 13960.72, MAPE: 0.0661,score:0.937998311603039
融合 XGB（0.4），交叉验证Mae: 11230.68, MAPE: 0.0527,score:0.9499382540134892
融合Lightgbm（0.4） 交叉验证Mae: 11086.44, MAPE: 0.0521,score:0.95047999239616
融合RF（0.4） 交叉验证Mae: 10648.91, MAPE: 0.0496,score:0.9527439024390243
** training 训练集 shape (1033, 8):B厂 
训练集随机森林交叉验证Mae: 12986.94, MAPE: 0.0622,score:0.9414422895876482
训练集Xgboost交叉验证Mae: 13536.31, MAPE: 0.0649,score :0.9390553103577801
训练集Lightgbm交叉验证Mse: 15394.37, MAPE: 0.0744,score:0.9307520476545048
融合 XGB（0.4），交叉验证Mae: 13397.6, MAPE: 0.0638,score:0.9400263207369806
融合Lightgbm（0.4） 交叉验证Mae: 13352.6, MAPE: 0.0636,score:0.940203083866115
融合RF（0.4） 交叉验证Mae: 12924.22, MAPE: 0.0618,score:0.9417969485778865


===6/9 后调整
** training 训练集 shape (1035, 8):A厂 
训练集随机森林交叉验证Mae: 10480.05, MAPE: 0.0488,score:0.9534706331045004
训练集Xgboost交叉验证Mae: 11466.6, MAPE: 0.0528,score :0.9498480243161095
训练集Lightgbm交叉验证Mse: 12941.88, MAPE: 0.0607,score:0.9427736400490242
融合 XGB（0.4），交叉验证Mae: 10943.14, MAPE: 0.0509,score:0.9515653249595585
融合Lightgbm（0.4） 交叉验证Mae: 10855.22, MAPE: 0.0506,score:0.9518370454978108
融合RF（0.4） 交叉验证Mae: 10691.23, MAPE: 0.0495,score:0.9528346831824678
** training 训练集 shape (1033, 8):B厂 
训练集随机森林交叉验证Mae: 7487.77, MAPE: 0.0372,score:0.9641342074816815
训练集Xgboost交叉验证Mae: 8013.15, MAPE: 0.0394,score :0.9620935154897055
训练集Lightgbm交叉验证Mse: 10364.77, MAPE: 0.0523,score:0.9502993442934524
融合 XGB（0.4），交叉验证Mae: 8030.62, MAPE: 0.0399,score:0.9616309260505818
融合Lightgbm（0.4） 交叉验证Mae: 7966.9, MAPE: 0.0396,score:0.9619084263178145
融合RF（0.4） 交叉验证Mae: 7407.48, MAPE: 0.0366,score:0.9646922631680495
集成 OK !

--- 6/9,隐藏全部后调整。含季节参数
** training 训练集 shape (1035, 8):A厂 
训练集随机森林交叉验证Mae: 10353.14, MAPE: 0.0482,score:0.9540164090822362
训练集Xgboost交叉验证Mae: 11228.6, MAPE: 0.0516,score :0.9509319132750095
训练集Lightgbm交叉验证Mse: 12705.23, MAPE: 0.0597,score:0.9436633009342266
融合 XGB（0.4），交叉验证Mae: 10735.2, MAPE: 0.0499,score:0.9524716639679969
融合Lightgbm（0.4） 交叉验证Mae: 10651.4, MAPE: 0.0496,score:0.9527439024390243
融合RF（0.4） 交叉验证Mae: 10422.07, MAPE: 0.0482,score:0.9540164090822362
** training 训练集 shape (1035, 8):B厂 
训练集随机森林交叉验证Mae: 7723.89, MAPE: 0.039,score:0.9624639076034649
训练集Xgboost交叉验证Mae: 7753.37, MAPE: 0.0387,score :0.962741888899586
训练集Lightgbm交叉验证Mse: 10339.01, MAPE: 0.0527,score:0.9499382540134892
融合 XGB（0.4），交叉验证Mae: 7995.25, MAPE: 0.0402,score:0.9613535858488752
融合Lightgbm（0.4） 交叉验证Mae: 8000.8, MAPE: 0.0403,score:0.9612611746611555
融合RF（0.4） 交叉验证Mae: 7399.32, MAPE: 0.0372,score:0.9641342074816815
集成 OK !

--- 不加新冠影响
** training 训练集 shape (1035, 8):A厂 
训练集随机森林交叉验证Mae: 10389.77, MAPE: 0.0485,score:0.9537434430138293
训练集Xgboost交叉验证Mae: 11005.04, MAPE: 0.051,score :0.9514747859181732
训练集Lightgbm交叉验证Mse: 12680.25, MAPE: 0.0599,score:0.9434852344560807
融合 XGB（0.4），交叉验证Mae: 10693.52, MAPE: 0.0499,score:0.9524716639679969
融合Lightgbm（0.4） 交叉验证Mae: 10627.21, MAPE: 0.0496,score:0.9527439024390243
融合RF（0.4） 交叉验证Mae: 10384.31, MAPE: 0.0482,score:0.9540164090822362
** training 训练集 shape (1035, 8):B厂 
训练集随机森林交叉验证Mae: 7555.71, MAPE: 0.0383,score:0.9631127805065973
训练集Xgboost交叉验证Mae: 7321.6, MAPE: 0.0368,score :0.9645061728395062
训练集Lightgbm交叉验证Mse: 9869.96, MAPE: 0.0502,score:0.9521995810321843
融合 XGB（0.4），交叉验证Mae: 7662.05, MAPE: 0.0386,score:0.9628345850182939
融合Lightgbm（0.4） 交叉验证Mae: 7688.42, MAPE: 0.0388,score:0.9626492106276473
融合RF（0.4） 交叉验证Mae: 7119.55, MAPE: 0.0359,score:0.9653441451877594
集成 OK !

*************** 最优选择： 不含季节参数 ，直接原始数据，不加新冠系数扩大 *****************
** training 训练集 shape (1035, 7):A厂 
训练集随机森林交叉验证Mae: 10016.73, MAPE: 0.0472,score:0.9549274255156609
训练集Xgboost交叉验证Mae: 11287.5, MAPE: 0.0524,score :0.9502090459901178
训练集Lightgbm交叉验证Mse: 13478.99, MAPE: 0.0642,score:0.9396729937981582
融合 XGB（0.4），交叉验证Mae: 10801.41, MAPE: 0.051,score:0.9514747859181732
融合Lightgbm（0.4） 交叉验证Mae: 10674.29, MAPE: 0.0505,score:0.9519276534983341
融合RF（0.4） 交叉验证Mae: 10167.2, MAPE: 0.0476,score:0.9545628102329132
** training 训练集 shape (1035, 7):B厂 
训练集随机森林交叉验证Mae: 7058.53, MAPE: 0.0363,score:0.9649715333397665
训练集Xgboost交叉验证Mae: 7243.74, MAPE: 0.0373,score :0.9640412609659692
训练集Lightgbm交叉验证Mse: 10904.08, MAPE: 0.0582,score:0.945000945000945
融合 XGB（0.4），交叉验证Mae: 7647.61, MAPE: 0.0399,score:0.9616309260505818
融合Lightgbm（0.4） 交叉验证Mae: 7637.97, MAPE: 0.0398,score:0.9617234083477592
融合RF（0.4） 交叉验证Mae: 6681.61, MAPE: 0.0343,score:0.9668374746205163
集成 OK !

************ 加入新冠系数结果，不如不加。最后选择不加 **********************
训练集随机森林交叉验证Mae: 10118.29, MAPE: 0.0476,score:0.9545628102329132
训练集Xgboost交叉验证Mae: 11298.17, MAPE: 0.0519,score :0.950660709192889
训练集Lightgbm交叉验证Mse: 13585.72, MAPE: 0.0646,score:0.9393199323689648
融合 XGB（0.4），交叉验证Mae: 10837.76, MAPE: 0.0508,score:0.9516558812333461
融合Lightgbm（0.4） 交叉验证Mae: 10722.19, MAPE: 0.0504,score:0.952018278750952
融合RF（0.4） 交叉验证Mae: 10243.87, MAPE: 0.0476,score:0.9545628102329132
** training 训练集 shape (1035, 7):B厂 
训练集随机森林交叉验证Mae: 7528.33, MAPE: 0.0383,score:0.9631127805065973
训练集Xgboost交叉验证Mae: 7643.21, MAPE: 0.0386,score :0.9628345850182939
训练集Lightgbm交叉验证Mse: 11261.14, MAPE: 0.0596,score:0.9437523593808984
融合 XGB（0.4），交叉验证Mae: 8073.13, MAPE: 0.0416,score:0.9600614439324116
融合Lightgbm（0.4） 交叉验证Mae: 8078.49, MAPE: 0.0416,score:0.9600614439324116
融合RF（0.4） 交叉验证Mae: 7168.15, MAPE: 0.0363,score:0.9649715333397665
集成 OK !

'''