import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split


# 计算信息增益
def information_gain(y, y1, y2):
    p = float(len(y1)) / len(y)
    return entropy(y) - p * entropy(y1) - (1 - p) * entropy(y2)


# 计算熵
def entropy(y):
    n_samples = len(y)
    counts = np.bincount(y)
    probs = counts / n_samples
    return -np.sum([p * np.log2(p) for p in probs if p > 0])


# 判断是否所有元素相同
def is_all_same(y):
    return len(np.unique(y)) == 1


# 多数投票
def majority_vote(y):
    return np.bincount(y).argmax()


# 分割数据集
def split_dataset(X, y, feature, threshold):
    left_indices = X[:, feature] < threshold
    right_indices = X[:, feature] >= threshold
    return X[left_indices], X[right_indices], y[left_indices], y[right_indices]


# 寻找最佳分割点
def find_best_split(X, y):
    best_feature, best_threshold = None, None
    best_info_gain = 0
    for feature in range(X.shape[1]):
        feature_values = np.unique(X[:, feature])
        for value in feature_values:
            X_left, X_right, y_left, y_right = split_dataset(X, y, feature, value)
            if len(y_left) == 0 or len(y_right) == 0:
                continue
            info_gain = information_gain(y, y_left, y_right)
            if info_gain > best_info_gain:
                best_info_gain = info_gain
                best_feature = feature
                best_threshold = value
    return best_feature, best_threshold


# 构建决策树
def build_tree(X, y, depth=0, max_depth=None):
    if is_all_same(y) or depth == max_depth or len(np.unique(y)) == 1:
        return majority_vote(y)

    feature, threshold = find_best_split(X, y)

    if feature is None:
        return majority_vote(y)

    X_left, X_right, y_left, y_right = split_dataset(X, y, feature, threshold)

    left_child = build_tree(X_left, y_left, depth + 1, max_depth)
    right_child = build_tree(X_right, y_right, depth + 1, max_depth)

    return (feature, threshold, left_child, right_child)


# 预测
def predict(tree, x):
    if isinstance(tree, tuple):
        feature, threshold, left_child, right_child = tree
        if x[feature] < threshold:
            return predict(left_child, x)
        else:
            return predict(right_child, x)
    else:
        return tree


# 加载数据集
iris = load_iris()
X, y = iris.data, iris.target

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)

# 构建决策树
tree = build_tree(X_train, y_train, max_depth=3)

# 进行预测
y_pred_train = [predict(tree, X_train[i]) for i in range(len(X_train))]
y_pred_test = [predict(tree, X_test[i]) for i in range(len(X_test))]

# 计算准确率
train_accuracy = np.mean(y_pred_train == y_train)
test_accuracy = np.mean(y_pred_test == y_test)

print(f"Training Accuracy: {train_accuracy:.3f}")
print(f"Test Accuracy: {test_accuracy:.3f}")
