import numpy as np
import pandas as pd
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pickle


# 定义决策树节点类
class DecisionNode:
    def __init__(self, feature_index=None, threshold=None, value=None, true_branch=None, false_branch=None):
        self.feature_index = feature_index  # 特征索引
        self.threshold = threshold  # 分割阈值
        self.value = value  # 叶节点类别值
        self.true_branch = true_branch  # 左子树
        self.false_branch = false_branch  # 右子树


# 定义决策树分类器类
class DecisionTreeClassifier:
    def __init__(self, max_depth=None, feature_selection="gini"):
        self.max_depth = max_depth  # 最大深度
        self.feature_selection = feature_selection  # 特征选择策略
        self.root = None  # 根节点

    # 计算基尼不纯度
    def gini(self, y):
        _, counts = np.unique(y, return_counts=True)
        probabilities = counts / len(y)
        gini_impurity = 1 - np.sum(probabilities ** 2)
        return gini_impurity

    # 计算信息熵
    def entropy(self, y):
        _, counts = np.unique(y, return_counts=True)
        probabilities = counts / len(y)
        entropy = -np.sum(probabilities * np.log2(probabilities))
        return entropy

    # 根据基尼不纯度选择最佳分割特征和阈值
    def find_best_split_gini(self, X, y):
        best_gini = float('inf')
        best_feature_index = None
        best_threshold = None

        for feature_index in range(X.shape[1]):
            values = np.unique(X[:, feature_index])
            for threshold in values:
                true_indices = X[:, feature_index] <= threshold
                false_indices = X[:, feature_index] > threshold
                true_gini = self.gini(y[true_indices])
                false_gini = self.gini(y[false_indices])
                gini_impurity = (len(true_indices) * true_gini + len(false_indices) * false_gini) / len(y)

                if gini_impurity < best_gini:
                    best_gini = gini_impurity
                    best_feature_index = feature_index
                    best_threshold = threshold

        return best_feature_index, best_threshold

    # 根据信息熵选择最佳分割特征和阈值
    def find_best_split_entropy(self, X, y):
        best_entropy = float('inf')
        best_feature_index = None
        best_threshold = None

        for feature_index in range(X.shape[1]):
            values = np.unique(X[:, feature_index])
            for threshold in values:
                true_indices = X[:, feature_index] <= threshold
                false_indices = X[:, feature_index] > threshold
                true_entropy = self.entropy(y[true_indices])
                false_entropy = self.entropy(y[false_indices])
                entropy = (len(true_indices) * true_entropy + len(false_indices) * false_entropy) / len(y)

                if entropy < best_entropy:
                    best_entropy = entropy
                    best_feature_index = feature_index
                    best_threshold = threshold

        return best_feature_index, best_threshold

    # 构建决策树
    def build_tree(self, X, y, depth=0):
        if depth == self.max_depth or len(np.unique(y)) == 1:
            # 叶节点，返回叶节点
            value = np.bincount(y).argmax()
            return DecisionNode(value=value)

        if self.feature_selection == "gini":
            feature_index, threshold = self.find_best_split_gini(X, y)
        elif self.feature_selection == "entropy":
            feature_index, threshold = self.find_best_split_entropy(X, y)
        else:
            raise ValueError(f"Invalid feature_selection: {self.feature_selection}")

        true_indices = X[:, feature_index] <= threshold
        false_indices = X[:, feature_index] > threshold
        true_branch = self.build_tree(X[true_indices], y[true_indices], depth + 1)
        false_branch = self.build_tree(X[false_indices], y[false_indices], depth + 1)

        return DecisionNode(feature_index=feature_index, threshold=threshold, true_branch=true_branch,
                            false_branch=false_branch)

    # 训练决策树模型
    def fit(self, X, y):
        self.root = self.build_tree(X, y)

    # 保存模型
    def save_model(self, filename):
        with open(filename, 'wb') as file:
            pickle.dump(self, file)

    # 预测单个样本
    def predict_sample(self, x, node):
        if node.value is not None:
            return node.value

        if x[node.feature_index] <= node.threshold:
            return self.predict_sample(x, node.true_branch)
        else:
            return self.predict_sample(x, node.false_branch)

    # 预测多个样本
    def predict(self, X):
        predictions = []
        for x in X:
            prediction = self.predict_sample(x, self.root)
            predictions.append(prediction)
        return np.array(predictions)

    # 计算准确率
    def score(self, X, y):
        y_pred = self.predict(X)
        accuracy = np.mean(y_pred == y)
        return accuracy


# 示例使用
data = load_breast_cancer()
X = data.data
y = data.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 创建决策树分类器实例，使用基尼不纯度特征选择策略
tree_gini = DecisionTreeClassifier(max_depth=3, feature_selection="gini")
# # 创建决策树分类器实例，使用信息熵特征选择策略
# tree_entropy = DecisionTreeClassifier(max_depth=3, feature_selection="entropy")
tree_gini.fit(X_train, y_train)
# 保存模型
tree_gini.save_model('decision_tree_model.pkl')
# 加载模型
with open('decision_tree_model.pkl', 'rb') as file:
    loaded_model = pickle.load(file)
# 预测
y_pred_gini = loaded_model.predict(X_test)
# 计算准确率
print(tree_gini.score(X_test, y_test))