# 1.导包
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier         # 决策树
from sklearn.ensemble import RandomForestClassifier     # 随机森林
from sklearn.ensemble import AdaBoostClassifier         # adaboost
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV

# 2.读取数据
df_wine = pd.read_csv("wine0501.csv")

# 3.数据基本处理
print(df_wine.info())   # 没有空值、特殊值、错误值

# 3.1 获得特征值和目标值
x_data = df_wine.iloc[:, 1:]
y_data = df_wine.iloc[:, 0]

# 3.2 划分数据集
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, random_state=928, stratify=y_data)

# 4.特征工程
# 4.1 特征预处理
transformer = StandardScaler()
x_train = transformer.fit_transform(x_train)
x_test = transformer.transform(x_test)

# 5.模型训练
# 5.1 普通的决策树模型
model1 = DecisionTreeClassifier(criterion="gini", max_depth=10, random_state=928)
model1.fit(x_train, y_train)
y_pre1 = model1.predict(x_test)
print("普通的决策树",accuracy_score(y_test,y_pre1))

# 5.2 Bagging_随机森林
model2 = RandomForestClassifier(criterion="gini", max_depth=10, random_state=928)   # max_depth:5   n_estimators=10
model2.fit(x_train, y_train)
y_pre2 = model2.predict(x_test)
print("Bagging_随机森林",accuracy_score(y_test,y_pre2))

# 5.3 Boosting_Adaboost
model3 = AdaBoostClassifier(n_estimators=5, random_state=928, algorithm="SAMME")   # 原n_estimators：20
model3.fit(x_train, y_train)
y_pre3 = model3.predict(x_test)
print("Boosting_Adaboost",accuracy_score(y_test, y_pre3))

# 5.4 使用交叉验证和网格搜索获得超参数
# # 5.1 对随机森林采用
# estimator = RandomForestClassifier()
# param_dict = {
#     "criterion": ["gini", "entropy"],
#     "max_depth": [5*i for i in range(1,5)],
#     "random_state": [928],
#     "n_estimators": [5*i for i in range(1,21)]
# }
# model4 = GridSearchCV(estimator=estimator,param_grid=param_dict,cv=4)
# model4.fit(x_train,y_train)
# print(model4.best_params_)
