from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from sklearn import tree

wine = load_wine()

print(wine.data.shape)
# 数据集分成训练和测试
train_X, test_X, train_Y, test_Y = train_test_split(wine.data, wine.target, test_size=0.3, random_state=91)
# print(wine.target)

# 分类树
clf = tree.DecisionTreeClassifier(criterion="gini"
                                  , random_state=0
                                  , splitter="best"
                                  , max_depth=4
                                  # , min_samples_leaf=10  # 指定每个子节点必须包含至少10个训练样本,一般搭配max_depth使用
                                  # , min_samples_split=10  # 指定一个节点至少要10个训练样本才能剪枝
                                  , max_features=10  # 限制最大分支数
                                  , min_impurity_decrease=0.001  # 限制信息增益大小，小于这个值则不会被分支
                                  )
clf = clf.fit(train_X, train_Y)
# 测试集查看准确度
result = clf.score(test_X, test_Y)
print(result)

feature_name = ["酒精", "苹果酸", "灰", "灰的碱性", "镁", "总酚", "类黄酮", "非黄烷类酚类"
    , "花青素", "颜色强度", "色调", "od280/0d315稀释葡萄酒", "脯氨酸"]
class_names = ["三得利", "百威", "科罗娜"]
# 画图
import graphviz

dot_data = tree.export_graphviz(clf, out_file=None, feature_names=feature_name, class_names=class_names,
                                filled=True, rounded=True)
graph = graphviz.Source(dot_data)
graph.render("treeResult")

#  描述每个特征的特征重要性
# print(clf.feature_importances_)
importances = [*zip(feature_name, clf.feature_importances_)]
print(importances)

# 寻找最优参数
# 先来单独看一个参数:max_depth


# 查看max_depth在1到10取值时候的效果 并画图
test = []
for i in range(10):
    classifier = tree.DecisionTreeClassifier(criterion="gini", random_state=0, splitter="best", max_depth=i + 1)
    classifier = classifier.fit(train_X, train_Y)
    score = classifier.score(test_X, test_Y)
    test.append(score)

import matplotlib.pyplot as plt

plt.plot(range(1, 11), test, color="red", label="max_depth")
plt.legend()
plt.show()
