#coding=utf-8
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier, export_graphviz
import graphviz
import pydotplus
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn import tree
from six import StringIO
import graphviz


def kesgihua(model):
    # X=['纹饰','类型','颜色']
    # text_labels = ['无风化','风化']
    X = ['figure', 'type', 'color']
    labels = ['no', 'yes']
    dot_data = export_graphviz(model
                               , feature_names=X
                               , class_names=labels
                               , filled=True
                               , rounded=True
                               )
    graph = pydotplus.graph_from_dot_data(dot_data)
    graph.write_pdf("ID3.pdf")
    # dot_data = StringIO()
    # tree.export_graphviz(model
    #                            , feature_names=X
    #                            , class_names=text_labels
    #                            , filled=True
    #                            , rounded=True
    #                            )
    #
    # graph = graphviz.Source(dot_data.getvalue())
    # graph
    # graph.render("dx_fig01")  # 生成PDF文件

def train(model):
    # 创建决策树分类器
    data = pd.read_csv('data.csv')
    target = pd.read_csv('target.csv')
    X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.1)
    #print(X_train)
    #print(y_train)
    model.fit(X_train, y_train)
    y_predict= model.predict(X_test)
    print('ID3决策树准确率: %0.4lf' % accuracy_score(y_predict, y_test))


def tree_mdl(x_train, x_test, y_train, y_test, criterion, max_depth):
    tree = DecisionTreeClassifier(random_state=1, criterion=criterion, max_depth=max_depth)  # "entropy"
    tree.fit(x_train, y_train)
    acu_train = tree.score(x_train, y_train)
    acu_test = tree.score(x_test, y_test)
    X = ['figure', 'type', 'color']
    labels = ['no', 'yes']
    dot_data = export_graphviz(tree
                               , feature_names=X
                               , class_names=labels
                               , filled=True
                               , rounded=True
                               )
    #graph = pydotplus.graph_from_dot_data(dot_data)
    #graph.write_pdf("str(criterion)+"max" + str(max_depth) + ".pdf")
    return acu_train, acu_test

def run_tree():
    data = pd.read_csv('data.csv')
    target = pd.read_csv('target.csv')
    X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.1)
    result = {"criterion":[],
              "max_depth":[],
              "acu_train":[],
              "acu_test":[]
              }
    for criterion in ["gini", "entropy"]:
        acu_tr_lis = []
        acu_te_lis = []
        for max_depth in range(1,10):
            acu_train, acu_test = tree_mdl(X_train, X_test, y_train, y_test, criterion, max_depth)
            acu_tr_lis.append(acu_train)
            acu_te_lis.append(acu_test)
            result["criterion"].append(criterion)
            result["max_depth"].append(max_depth)
            result["acu_train"].append(acu_train)
            result["acu_test"].append(acu_test)
        plt.plot(range(1, 10), acu_tr_lis, "o-",label="acu-train")
        plt.plot(range(1, 10), acu_te_lis, "*-",label="acu-test")
        plt.xlabel("max_depth")
        plt.ylabel("accuracy")
        plt.title("Criterion = "+str(criterion))
        plt.legend(["acu-train", "acu-test"])
        plt.show()
    return pd.DataFrame(result)


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    model = DecisionTreeClassifier(max_depth=13,criterion="entropy")
    train(model)
    kesgihua(model)
    #run_tree()

# See PyCharm help at https://www.jetbrains.com/help/pycharm/
