from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import  pandas as pd
import numpy as np
df = pd.read_csv('iris.data')
enc=preprocessing.LabelEncoder()   #获取一个LabelEncoder
enc=enc.fit(['Iris-setosa','Iris-versicolor','Iris-virginica'])  #训练LabelEncoder
df['e']=enc.transform(df['e'])
# 对比列在最后一列
x = df.iloc[:,:-1].values
y = df.iloc[:, -1].values
# x.shape (150,4) x.shape[1],特征数
# print(x.shape)
# print(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3, random_state = 0)
# 第一列到倒数第二列
feat_labels = df.columns[:-1]
print(feat_labels)
forest = RandomForestClassifier(n_estimators=10000, random_state=0, n_jobs=-1,max_depth=3)
forest.fit(x_train, y_train)
score = forest.score(x_test, y_test)  # score=0.98148
forest.feature_importances_

importances = forest.feature_importances_
indices = np.argsort(importances)[::-1] # 下标排序，从大到小排
for f in range(x_train.shape[1]):   # x_train.shape[1]=13
    print("%2d) %-*s %f" % \
          (f + 1, 30, feat_labels[indices[f]], importances[indices[f]]))



