import pandas as pd
import numpy as np
import math
from collections import Counter
import random
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree as sk_tree
import matplotlib.pyplot as plt

# 划分数据集
def train_test_split(X, y, test_size=0.8):
    # 随机种子
    # random.seed(43) #1
    # random.seed(42) #0.25
    # random.seed(44) #0.5
    random.seed(46) #0.75
    idx = random.sample(range(len(X)), int(len(X) * test_size))
    train_X = [X[i] for i in idx]
    train_y = [y[i] for i in idx]
    test_X = [X[i] for i in range(len(X)) if i not in idx]
    test_y = [y[i] for i in range(len(y)) if i not in idx]
    return train_X, train_y, test_X, test_y

# 决策树生成
def decision_tree(X, y, feature_names):
    # 都是同一类别
    if len(set(y)) == 1:
        return y[0]
    # 没有特征，y为空，选择出现次数最多的类别
    if len(feature_names) == 0:
        return Counter(y).most_common()[0][0]
    # 选择最佳特征
    best_feature_idx = choose_best_feature(X, y)
    best_feature = feature_names[best_feature_idx]
    # 创建根节点
    tree = {
        'best_feature': best_feature,
        'subtrees': {}
    }
    # 移除已使用特征
    feature_names = feature_names[:best_feature_idx] + feature_names[best_feature_idx + 1:]
    # 递归生成子树
    feature_values = set(row[best_feature_idx] for row in X)
    for value in feature_values:
        sub_X = [row for row in X if row[best_feature_idx] == value]
        sub_y = [y[i] for i in range(len(y)) if X[i][best_feature_idx] == value]
        # 去除已使用的
        sub_X = [row[:best_feature_idx] + row[best_feature_idx + 1:] for row in sub_X]
        subtree = decision_tree(sub_X, sub_y, feature_names)
        tree['subtrees'][value] = subtree
    return tree

# 选择最佳特征
def choose_best_feature(X, y):
    best_info_gain = -1
    best_feature = None
    for feature in range(len(X[0])):
        info_gain = information_gain(X, y, feature)
        if info_gain > best_info_gain:
            best_info_gain = info_gain
            best_feature = feature
    return best_feature

# 计算信息增益
def information_gain(X, y, feature):
    base_entropy = entropy(y)
    feature_values = [row[feature] for row in X]
    value_counts = Counter(feature_values)
    conditional_entropy = 0.0
    for value, count in value_counts.items():
        subset_y = [y[i] for i in range(len(y)) if X[i][feature] == value]
        prob = count / len(X)
        conditional_entropy += prob * entropy(subset_y)
    info_gain = base_entropy - conditional_entropy
    return info_gain

# 计算熵
def entropy(y):
    label_counts = Counter(y)
    entropy = 0.0
    for count in label_counts.values():
        prob = count / len(y)
        entropy -= prob * math.log2(prob)
    return entropy

# 打印决策树
def print_tree(tree, depth):
    if not isinstance(tree, dict):
        print("\t" * depth + "-> " + str(tree))
        return
    for key, value in tree.items():
        print("\t" * depth + str(key))
        if not isinstance(value, dict):
            print("\t" * (depth + 1) + "-> " + str(value))
            continue
        for sub_key, sub_value in value.items():
            print("\t" * (depth + 1) + str(sub_key))
            print_tree(sub_value, depth + 2)

def sk_print_tree(data, feature_name):
    ctf = DecisionTreeClassifier()
    ctf.fit(data, [1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0])
    sk_tree.plot_tree(ctf, feature_names=feature_name)
    plt.savefig('decision_tree.png')

# 预测
def predict(tree, sample):
    if not isinstance(tree, dict):
        return tree
    feature_values = tree['subtrees'].keys()
    for feature_value in feature_values:
        if feature_value in sample:
            return predict(tree['subtrees'][feature_value], sample)
    return None

def main():
    Integr_data = np.array([
        [0, 0, 0, 0, 0, 0],  
        [1, 0, 1, 0, 0, 0],  
        [1, 0, 0, 0, 0, 0],  
        [0, 0, 1, 0, 0, 0],  
        [2, 0, 0, 0, 0, 0],  
        [0, 1, 0, 0, 1, 1],  
        [1, 1, 0, 1, 1, 1],  
        [1, 1, 0, 0, 1, 0],  
        [1, 1, 1, 1, 1, 0],  
        [0, 2, 2, 0, 2, 1],  
        [2, 2, 2, 2, 2, 0],  
        [2, 0, 0, 2, 2, 1],  
        [0, 1, 0, 1, 0, 0],  
        [2, 1, 1, 1, 0, 0],  
        [1, 1, 0, 0, 1, 1],  
        [2, 0, 0, 2, 2, 0],  
        [0, 0, 1, 1, 1, 0]   
    ])
    # 数据预处理
    X = [
        ['青绿', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '是'],
        ['乌黑', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', '是'],
        ['乌黑', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '是'],
        ['青绿', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', '是'],
        ['浅白', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '是'],
        ['青绿', '稍蜷', '浊响', '清晰', '稍凹', '软粘', '是'],
        ['乌黑', '稍蜷', '浊响', '稍糊', '稍凹', '软粘', '是'],
        ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '硬滑', '是'],
        ['乌黑', '稍蜷', '沉闷', '稍糊', '稍凹', '硬滑', '否'],
        ['青绿', '硬挺', '清脆', '清晰', '平坦', '软粘', '否'],
        ['浅白', '硬挺', '清脆', '模糊', '平坦', '硬滑', '否'],
        ['浅白', '蜷缩', '浊响', '模糊', '平坦', '软粘', '否'],
        ['青绿', '稍蜷', '浊响', '稍糊', '凹陷', '硬滑', '否'],
        ['浅白', '稍蜷', '沉闷', '稍糊', '凹陷', '硬滑', '否'],
        ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '软粘', '否'],
        ['浅白', '蜷缩', '浊响', '模糊', '平坦', '硬滑', '否'],
        ['青绿', '蜷缩', '沉闷', '稍糊', '稍凹', '硬滑', '否']
    ]
    y = [1 if row[-1] == '是' else 0 for row in X]
    # 去除X的最后一列
    X = [row[:-1] for row in X]
    feature_names = ['色泽', '根蒂', '敲声', '纹理', '脐部', '触感']
    # 划分数据集
    train_X, train_y, test_X, test_y = train_test_split(X, y, test_size=0.8)

    # 生成决策树
    tree = decision_tree(train_X, train_y, feature_names)

    # 可视化决策树1
    # print(tree)
    print_tree(tree, 0)
    # 可视化决策树2
    plt.rcParams['font.sans-serif'] = 'SimHei' 
    plt.rcParams['axes.unicode_minus'] = False
    sk_print_tree(Integr_data, feature_names)

    # 预测
    y_pred = []
    for i in range(len(test_X)):
        prediction = predict(tree, test_X[i])
        y_pred.append(prediction)
        print("样本 {} 的预测结果是: {}".format(i, prediction))
    # 计算准确率
    correct = sum(1 for i in range(len(test_y)) if test_y[i] == y_pred[i])
    accuracy = correct / len(test_y)
    print(y_pred)
    print("准确率:", accuracy)

if __name__ == '__main__':
    main()
