from sklearn import preprocessing, __all__
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
from sklearn import metrics
import xgboost as xgb
import numpy as np

df = pd.read_excel('/Users/a/Desktop/鲍鱼数据集.xlsx')
sex = preprocessing.LabelEncoder()
sex.fit(df.Sex)
df.Sex = sex.transform(df.Sex)
# 对比列在最后一列
x = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
# x.shape (150,4) x.shape[1],特征数
# print(x.shape)
# print(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)
# 第一列到倒数第二列
feat_labels = df.columns[:-1]
print(feat_labels)
forest = RandomForestClassifier(n_estimators=10000, random_state=0, n_jobs=-1, max_depth=3)
forest.fit(x_train, y_train)
score = forest.score(x_test, y_test)  # score=0.98148
forest.feature_importances_

# importances = forest.feature_importances_
# indices = np.argsort(importances)[::-1] # 下标排序，从大到小排
# for f in range(x_train.shape[1]):   # x_train.shape[1]=13
#     print("%2d) %-*s %f" % \
#           (f + 1, 30, feat_labels[indices[f]], importances[indices[f]]))
XGB_model = xgb.XGBRegressor(n_estimators=1000, learning_rate=0.05 )
XGB_model.fit(x_train, y_train)
y_pred_XGB = XGB_model.predict(x_test)
Accuracy_XGB = metrics.accuracy_score(y_test, y_pred_XGB)
print()