import numpy as np
import pandas as pd
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from collections import Counter
import matplotlib.pyplot as plt

# 手动创建西瓜数据集2.0 (参考《机器学习》周志华)
watermelon_data = {
    'color': [2, 1, 1, 0, 2, 0, 1, 1, 1, 0, 2, 2, 0, 2, 1, 2, 0],        # 0:浅白, 1:青绿, 2:乌黑
    'root': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 0, 0, 1, 0, 0, 0],         # 0:蜷缩, 1:稍蜷, 2:硬挺
    'sound': [0, 1, 0, 1, 0, 0, 0, 0, 1, 2, 2, 0, 1, 0, 0, 0, 1],        # 0:浊响, 1:沉闷, 2:清脆
    'texture': [0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 2, 2, 1, 0, 1, 0, 2],      # 0:清晰, 1:稍糊, 2:模糊
    'navel': [0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 0, 1, 1, 1, 1],        # 0:凹陷, 1:稍凹, 2:平坦
    'touch': [0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0],        # 0:硬滑, 1:软粘
    'density': [0.697, 0.774, 0.634, 0.608, 0.556, 0.403, 0.481, 0.437,
                0.666, 0.243, 0.245, 0.343, 0.639, 0.657, 0.360, 0.593, 0.719],
    'sugar': [0.460, 0.376, 0.264, 0.318, 0.215, 0.237, 0.149, 0.211,
              0.091, 0.267, 0.057, 0.099, 0.161, 0.198, 0.370, 0.042, 0.103],
    'label': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]  # 1:好瓜, 0:坏瓜
}

X_watermelon = pd.DataFrame(watermelon_data).drop('label', axis=1)
y_watermelon = watermelon_data['label']

# 决策树训练与评估
dt_watermelon = DecisionTreeClassifier(random_state=42)
scores = cross_val_score(dt_watermelon, X_watermelon, y_watermelon, cv=5)
print(f"西瓜数据集决策树准确率: {np.mean(scores):.4f} (±{np.std(scores):.4f})")

# 加载Wine数据集
wine = load_wine()
X, y = wine.data, wine.target

# 数据集划分
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.3, random_state=42
)


class MyRandomForest:
    def __init__(self, n_trees=100, max_depth=None, max_features='sqrt', random_state=None):
        self.n_trees = n_trees
        self.max_depth = max_depth
        self.max_features = max_features
        self.random_state = random_state
        self.trees = []

    def fit(self, X, y):
        np.random.seed(self.random_state)
        n_samples, n_features = X.shape

        # 确定特征子集大小
        if self.max_features == 'sqrt':
            max_feats = int(np.sqrt(n_features))
        else:
            max_feats = n_features

        # 训练多棵决策树
        for _ in range(self.n_trees):
            # 1. 自助采样 (Bootstrap)
            sample_indices = np.random.choice(n_samples, n_samples, replace=True)
            X_sample = X[sample_indices]
            y_sample = y[sample_indices]

            # 2. 特征随机选择
            feat_indices = np.random.choice(n_features, max_feats, replace=False)
            X_sample = X_sample[:, feat_indices]

            # 3. 训练决策树
            tree = DecisionTreeClassifier(
                max_depth=self.max_depth,
                random_state=self.random_state
            )
            tree.fit(X_sample, y_sample)

            # 存储树和特征索引
            self.trees.append((tree, feat_indices))

    def predict(self, X):
        all_preds = []
        for tree, feat_indices in self.trees:
            # 使用相同的特征子集进行预测
            X_subset = X[:, feat_indices]
            pred = tree.predict(X_subset)
            all_preds.append(pred)

        # 投票机制
        all_preds = np.array(all_preds)
        final_pred = []
        for i in range(X.shape[0]):
            votes = all_preds[:, i]
            final_pred.append(Counter(votes).most_common(1)[0][0])

        return np.array(final_pred)
# 1. 决策树
dt = DecisionTreeClassifier(random_state=42)
dt.fit(X_train, y_train)
dt_pred = dt.predict(X_test)
dt_acc = accuracy_score(y_test, dt_pred)

# 2. Scikit-learn随机森林
rf_sklearn = RandomForestClassifier(
    n_estimators=100,
    max_depth=None,
    random_state=42
)
rf_sklearn.fit(X_train, y_train)
rf_sklearn_pred = rf_sklearn.predict(X_test)
rf_sklearn_acc = accuracy_score(y_test, rf_sklearn_pred)

# 3. 自定义随机森林
my_rf = MyRandomForest(
    n_trees=100,
    max_depth=None,
    max_features='sqrt',
    random_state=42
)
my_rf.fit(X_train, y_train)
my_rf_pred = my_rf.predict(X_test)
my_rf_acc = accuracy_score(y_test, my_rf_pred)

# 结果对比
print("\n" + "="*50)
print(f"{'模型':<20} {'测试准确率':<15}")
print("="*50)
print(f"{'决策树':<20} {dt_acc:.4f}")
print(f"{'Scikit-learn随机森林':<20} {rf_sklearn_acc:.4f}")
print(f"{'自定义随机森林':<20} {my_rf_acc:.4f}")
print("="*50)

# 可视化对比
models = ['Decision Tree', 'Sklearn RF', 'My RF']
accuracies = [dt_acc, rf_sklearn_acc, my_rf_acc]

plt.figure(figsize=(10, 6))
bars = plt.bar(models, accuracies, color=['skyblue', 'lightgreen', 'salmon'])
plt.ylabel('Accuracy')
plt.title('Model Comparison on Wine Dataset')
plt.ylim(0.8, 1.0)

# 在柱子上显示准确率
for bar in bars:
    height = bar.get_height()
    plt.text(bar.get_x() + bar.get_width()/2., height,
             f'{height:.4f}', ha='center', va='bottom')

plt.tight_layout()
plt.show()