#导入必要的库
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import preprocessing
from sklearn.metrics import mean_squared_error
import seaborn as sns
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.decomposition import PCA
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor   #神经网络回归
from sklearn.metrics import mean_absolute_error    #平均绝对误差MAE
from sklearn.metrics import mean_squared_error    #平均平方误差MSE
from sklearn.ensemble import RandomForestRegressor  #随机森林
from xgboost import XGBRegressor                   #XGBoost
from xgboost import plot_importance                #输出权重
from deepforest import CascadeForestRegressor      #深度森林
import joblib                                      #保存模型
from tqdm import tqdm                              #进度条
plt.style.use('classic')

file_path='homework/train_data.csv'
data=pd.read_csv(file_path,parse_dates=True)
key=data.keys()

data.drop(columns=['c_rcs_estimate','event_id'],inplace=True)   #event_id只记录第几次事件，无效信息。c_rcs_estimate缺失五万多条数据，占比32.49%，所以删去（其余最多缺失九千多条）

for i in range(50):   #循环测试50次，使用留出法测试并记录
    rfmse_list=open('rfmse.txt','a')
    xgbmse_list=open('xgbmse.txt','a')

    # 从数据中划分出预测标签列
    y = data.risk
    X = data.drop(['risk'], axis=1)     #预测标签列'risk'

    # 将数据划分为训练集和测试集
    X_train_full, X_valid_full, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2)

    #选取标签列，独立字符串标签数小于10且dtype格式为object
    categorical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and 
                            X_train_full[cname].dtype == "object"]
    # 选择数值列
    numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]

    # 只保留数值列和标签列
    my_cols = categorical_cols + numerical_cols
    X_train = X_train_full[my_cols].copy()
    X_valid = X_valid_full[my_cols].copy()
    labels=my_cols[1:]+[(my_cols[0]+'(%i)')%i for i in range(1,6)]
    #处理数值类型数据
    # numerical_transformer = SimpleImputer(strategy='constant')
    numerical_transformer =Pipeline(steps=[('imputer',SimpleImputer(strategy='most_frequent')),    #缺失值处理，用众数填充
    #                                       ('reduce_dim', PCA()),                                    #PCA降维，已经放弃，加了之后效果更差
                                            ('scaler', StandardScaler())])               #这里用zscore标准化方法，即x'=(x-u)/sigma,也可以考虑用MinMaxScaler，即0-1归一化
    #处理字符串类型数据
    categorical_transformer = Pipeline(steps=[
        ('imputer', SimpleImputer(strategy='most_frequent')),               #缺失值处理，用众数填充
        ('onehot', OneHotEncoder(handle_unknown='ignore'))                  #忽略未知的分类特征
    ])
    #捆绑数值和分类数据的预处理
    preprocessor = ColumnTransformer(transformers=[('num', numerical_transformer, numerical_cols),
            ('cat', categorical_transformer, categorical_cols)])
    preprocessor.fit(X_train)
    X_train=preprocessor.transform(X_train)
    X_valid =preprocessor.transform(X_valid)
    #随机森林模型 100个树
    # 重要参数 n_estimators 弱学习器个数，默认100   oob_score  是否袋外采样 默认false  建议True
    #criterion 评价标准 回归RF有mae，默认mse 不用改
    # model = RandomForestRegressor(n_estimators=100, random_state=0,verbose=2)
    model2_1 = RandomForestRegressor(n_estimators=100,oob_score=True,verbose=0,n_jobs=4) #100棵树 开启袋外采样  打印状态  4个核心并行
    model2_1.fit(X_train,y_train)                    #训练集拟合
    preds2_1 = model2_1.predict(X_valid)             #进行预测
    # score2_1 = mean_absolute_error(y_valid, preds) #测量平均绝对误差MAE
    score2_1 = mean_squared_error(y_valid, preds2_1) #测量均方误差MSE
    # print('MAE:', score2_1) #0.034816310036587715
    rfmse_list.write(str(score2_1)+'\n')
    rfmse_list.close()
    print(score2_1)
    X_train_test=pd.DataFrame(X_train)
    X_train_test.columns=labels
    X_valid_test=pd.DataFrame(X_valid)
    X_valid_test.columns=labels
    #XGBRegressor模型   参考文档 https://xgboost.readthedocs.io/en/latest/parameter.html#general-parameters
    # XGBClassifier(learning_rate =0.1,n_estimators=1000,max_depth=4,min_child_weight=6,gamma=0,subsample=0.8,colsample_bytree=0.8,reg_alpha=0.005,objective= 'binary:logistic',
    #  nthread=4,scale_pos_weight=1,seed=27)   #XGBoost的可调参数
    model3_1 = XGBRegressor(verbosity=0)   #默认n_estimators=100,XGBoost 默认开启并行，且进程数设置为按当前机器配置的最大核心数
    model3_1.fit(X_train_test, y_train)
    # model3_1 = joblib.load("model3_1.joblib.dat")
    #进行预测
    preds3_1 = model3_1.predict(X_valid_test)
    #模型评价
    # score3_1 = mean_absolute_error(y_valid, preds3_1)
    score3_1= mean_squared_error(y_valid, preds3_1)    #测量均方误差MSE
    # print('MAE:', score2_2) 
    xgbmse_list.write(str(score3_1)+'\n')
    print(score3_1)
    xgbmse_list.close()
    #深度森林模型参数较少，而且对参数不太敏感，这里只测试一次   参考文档https://deep-forest.readthedocs.io/en/latest/
