import pandas as pd
import datetime
import numpy as np
from sklearn.model_selection import train_test_split
# 导入算法
from sklearn.ensemble import RandomForestRegressor

features = pd.read_csv('data/temps.csv')

years = features['year']
months = features['month']
days = features['day']
dates = [str(int(year)) + '-' + str(int(month)) + '-' + str(int(day)) for year, month, day in zip(years, months, days)]
dates = [datetime.datetime.strptime(date, '%Y-%m-%d') for date in dates]

# 独热编码
features = pd.get_dummies(features)
features.head(5)

# 标签
labels = np.array(features['actual'])
# 在特征中去掉标签 axis=1 指定删除的是列而不是行。axis=0 是删除行，axis=1 是删除列。
features= features.drop('actual', axis = 1)

features= features.drop('Unnamed: 9', axis = 1)
features= features.drop('Unnamed: 10', axis = 1)

# 名字单独保存一下，以备后患
feature_list = list(features.columns)

#random_state 是随机数种子，用来控制数据分割的随机性。通过设定一个固定的 random_state（例如 42），
# 你可以确保每次分割数据时得到相同的结果（即使你重新运行代码）。
train_features, test_features, train_labels, test_labels = (
    train_test_split(features, labels, test_size = 0.25,random_state = 42))
print('训练集特征:', train_features.shape)
print('训练集标签:', train_labels.shape)
print('测试集特征:', test_features.shape)
print('测试集标签:', test_labels.shape)

#建模
rf = RandomForestRegressor(n_estimators= 1000, random_state=42)
# 训练
rf.fit(train_features, train_labels)
# 预测结果
predictions = rf.predict(test_features)
# 计算误差
errors = abs(predictions - test_labels)
# mean absolute percentage error (MAPE)
mape = 100 * (errors / test_labels)
print ('MAPE:',np.mean(mape))

# 得到特征重要性
importances = list(rf.feature_importances_)

#round(importance, 2)：将特征的重要性 importance 四舍五入到小数点后两位。
# 这样做通常是为了简化展示和减少数据中的噪声。
# 转换格式
feature_importances = [(feature, round(importance, 2))
                       for feature, importance in zip(feature_list, importances)]
# 排序
feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True)
# 对应进行打印
[print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances]


# 选择最重要的那两个特征来试一试
rf_most_important = RandomForestRegressor(n_estimators= 1000, random_state=42)

# 拿到这俩特征
#index() 方法用于返回指定元素在列表中的索引位置。
important_indices = [feature_list.index('temp_1'), feature_list.index('average')]
#[:, important_indices] 是 NumPy 的切片语法，用于选择所有行（:）和特定的列（important_indices）。
train_important = train_features.iloc[:,important_indices ]
test_important = test_features.iloc[:,important_indices ]

# 重新训练模型
rf_most_important.fit(train_important, train_labels)

# 预测结果
predictions = rf_most_important.predict(test_important)

errors = abs(predictions - test_labels)

# 评估结果

mape = np.mean(100 * (errors / test_labels))

print('mape:', mape)