"""
demo02_rf.py   随机森林回归器
"""
import sys
import warnings
import numpy as np
import matplotlib.pyplot as plt
import sklearn.utils as su
import sklearn.ensemble as se
import sklearn.metrics as sm
import mlflow
import mlflow.pyfunc as mp
import mlflow.sklearn as ms


# 评价函数
def eval_metrics(actual, pred):
    rmse = np.sqrt(sm.mean_squared_error(actual, pred))
    mae = sm.mean_absolute_error(actual, pred)
    r2 = sm.r2_score(actual, pred)
    return rmse, mae, r2

warnings.filterwarnings("ignore")
# 读取数据集
headers = None
data = []
with open('./ml_data/bike_day.csv', 'r') as f:
    for i, line in enumerate(f.readlines()):
        if i==0:
            headers = line.split(',')[2:]
        else:
            data.append(line.split(',')[2:])
headers = np.array(headers)
data = np.array(data, dtype='f8')

# 整理数据集   
x = data[:, 0:11]
y = data[:, -1]
# 拆分测试集与训练集
x, y = su.shuffle(x, y, random_state=7)
train_size = int(len(x) * 0.9)
train_x, test_x, train_y, test_y = x[:train_size], x[train_size:], y[:train_size], y[train_size:]


# 设置默认参数
if len(sys.argv[1:]) == 1:
    max_depth = 5
    n_estimators = 500
    min_samples_split = int(sys.argv[1])
else:
    max_depth = int(sys.argv[1])
    n_estimators = int(sys.argv[2])
    min_samples_split = int(sys.argv[3])


# 指定记录存储位置，默认为当前路径的./mlruns文件夹。
# 若配置过MLFLOW_TRACKING_URI环境变量，则使用该参数，HttpServer的方式在UI部分说明
mlflow.set_tracking_uri("http://127.0.0.1:5000")

# 创建和设置实验。其实只需要设置实验即可
try:
    mlflow.set_experiment("bike_RFmodel")
except:
    mlflow.create_experiment("bike_RFmodel")
    mlflow.set_experiment("bike_RFmodel")


# 构建随机森林回归器模型  并训练模型
with mlflow.start_run():
    RFmodel = se.RandomForestRegressor(max_depth=max_depth, n_estimators=n_estimators, min_samples_split=min_samples_split)
    RFmodel.fit(train_x, train_y)

	# 针对测试集进行预测  输出评估得分
    pred_test_y = RFmodel.predict(test_x)
    (rmse, mae, r2) = eval_metrics(test_y, pred_test_y)
    print("RandomForest model (max_depth={}, n_estimators={}, min_samples_split={}):".format(max_depth, n_estimators, min_samples_split))
    print("  RMSE: %s" % rmse)
    print("  MAE: %s" % mae)
    print("  R2: %s" % r2)

    # 记录参数使用log_param， 记录指标使用log_metric
    mlflow.log_param("max_depth", max_depth)
    mlflow.log_param("n_estimators", n_estimators)
    mlflow.log_param("min_samples_split", min_samples_split)
    mlflow.log_metric("rmse", rmse)
    mlflow.log_metric("r2", r2)
    mlflow.log_metric("mae", mae)
	# 保存模型为运行部件
	# tracking 保存模型功能，第一个参数是模型obj， 第二个参数为目录名
    mlflow.sklearn.log_model(RFmodel, "model")
    mlflow.sklearn.save_model(RFmodel,'hostmodel/model_{}_{}_{}'.format(max_depth,n_estimators,min_samples_split))
    print('已保存到本地')


# 获取特征重要性
day_fi = RFmodel.feature_importances_
day_headers = headers[0:11]
print(day_headers)





# # 读取bike_hour.csv数据集
# headers = None
# data = []
# with open('../ml_data/bike_hour.csv', 'r') as f:
# 	for i, line in enumerate(f.readlines()):
# 		if i==0:
# 			headers = line.split(',')[2:]
# 		else:
# 			data.append(line.split(',')[2:])
# headers = np.array(headers)
# data = np.array(data, dtype='f8')

# # 整理数据集   
# x = data[:, 0:12]
# y = data[:, -1]
# # 拆分测试集与训练集
# x, y = su.shuffle(x, y, random_state=7)
# train_size = int(len(x) * 0.9)
# train_x, test_x, train_y, test_y = x[:train_size], x[train_size:], y[:train_size], y[train_size:]

# # 构建随机森林回归器模型  并训练模型
# with mlflow.start_run():
# 	model = se.RandomForestRegressor(max_depth=max_depth, n_estimators=n_estimators, min_samples_split=min_samples_split)
# 	model.fit(train_x, train_y)
# 	# 针对测试集进行预测  输出评估得分
# 	pred_test_y = model.predict(test_x)
# 	(rmse, mae, r2) = eval_metrics(test_y, pred_test_y)
#     print("RandomForest model (max_depth=%d, n_estimators=%d, min_samples_split=%d):" % (max_depth, n_estimators, min_samples_split))
#     print("  RMSE: %s" % rmse)
#     print("  MAE: %s" % mae)
#     print("  R2: %s" % r2)

# # 获取特征重要性
# hour_fi = model.feature_importances_
# hour_headers = headers[0:12]

# 绘制特征重要性柱状图
# 柱状图显示特征重要性
plt.figure('Feature Importance', facecolor='lightgray')
# plt.subplot(211)
plt.title('Feature Importances', fontsize=16)
plt.ylabel('Importance', fontsize=12)
plt.tick_params(labelsize=10)
plt.grid(axis='y', linestyle=':')
x = np.arange(day_fi.size)
sorted_indices = np.argsort(day_fi)[::-1]
plt.bar(x, day_fi[sorted_indices], 0.8, color='orangered', label='day FI')
plt.xticks(x, day_headers[sorted_indices])
plt.legend()

# plt.subplot(212)
# plt.ylabel('Importance', fontsize=12)
# plt.tick_params(labelsize=10)
# plt.grid(axis='y', linestyle=':')
# x = np.arange(hour_fi.size)
# sorted_indices = np.argsort(hour_fi)[::-1]
# plt.bar(x, hour_fi[sorted_indices], 0.8, 
# 	color='dodgerblue', label='hour FI')
# plt.xticks(x, hour_headers[sorted_indices])
# plt.legend()


plt.show()








