#!/usr/bin/python
# -*- coding: UTF-8 -*-

from sklearn.model_selection import train_test_split, cross_val_score, KFold, GridSearchCV
import pandas as pd
import numpy as np

from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn import utils, preprocessing
from sklearn.metrics import confusion_matrix

path = 'train.csv'
data = pd.read_csv(path)

Y = data["Tag"].values  #
X = data.drop(["lat", "lon", "Tag"], axis=1).values
columns = data.columns.values
x_columns = columns[:-1]
feat_labels = x_columns

min_max_scaler = preprocessing.MinMaxScaler()
X = min_max_scaler.fit_transform(X)  # 偷懒合并一把处理
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)

# 实例化分类器（默认生成100颗树，信息熵，叶子结点最少样本量为3，最大深度为15）
clf = RandomForestClassifier(n_estimators=2, criterion="entropy", min_samples_leaf=3, max_depth=15)

# 训练
clf.fit(x_train, y_train)
# 预测并评估
predict_Y = clf.predict(x_test)

print(f"准确率acc = {accuracy_score(y_pred=predict_Y, y_true=y_test)}")

# 下面对训练好的随机森林，完成重要性评估
# feature_importances_  可以调取关于特征重要程度
importances = clf.feature_importances_
print("重要性：", importances)
indices = np.argsort(importances)[::-1]
x_columns_indices = []
for f in range(x_train.shape[1]):
    # 对于最后需要逆序排序，我认为是做了类似决策树回溯的取值，从叶子收敛
    # 到根，根部重要程度高于叶子。
    print("%2d) %-*s %f" % (f + 1, 30, feat_labels[indices[f]], importances[indices[f]]))
    x_columns_indices.append(feat_labels[indices[f]])

# print(x_columns_indices)
# print(x_columns.shape[0])
# print(x_columns)
# print(np.arange(x_columns.shape[0]))

cm = confusion_matrix(y_test, predict_Y)
print(cm)
