import numpy as np
import pandas as pd
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns

# 使用用户提供的完整西瓜数据集
dataSet = [['青绿', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],
           ['乌黑', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', '好瓜'],
           ['乌黑', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],
           ['青绿', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', '好瓜'],
           ['浅白', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],
           ['青绿', '稍蜷', '浊响', '清晰', '稍凹', '软粘', '好瓜'],
           ['乌黑', '稍蜷', '浊响', '稍糊', '稍凹', '软粘', '好瓜'],
           ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '硬滑', '好瓜'],
           ['乌黑', '稍蜷', '沉闷', '稍糊', '稍凹', '硬滑', '坏瓜'],
           ['青绿', '硬挺', '清脆', '清晰', '平坦', '软粘', '坏瓜'],
           ['浅白', '硬挺', '清脆', '模糊', '平坦', '硬滑', '坏瓜'],
           ['浅白', '蜷缩', '浊响', '模糊', '平坦', '软粘', '坏瓜'],
           ['青绿', '稍蜷', '浊响', '稍糊', '凹陷', '硬滑', '坏瓜'],
           ['浅白', '稍蜷', '沉闷', '稍糊', '凹陷', '硬滑', '坏瓜'],
           ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '软粘', '坏瓜'],
           ['浅白', '蜷缩', '浊响', '模糊', '平坦', '硬滑', '坏瓜'],
           ['青绿', '蜷缩', '沉闷', '稍糊', '稍凹', '硬滑', '坏瓜'], ]

# 转换为DataFrame
columns = ['色泽', '根蒂', '敲声', '纹理', '脐部', '触感', '好瓜']
watermelon_df = pd.DataFrame(dataSet, columns=columns)

# 加载wine数据集
wine_data = load_wine()
X_wine = wine_data.data
y_wine = wine_data.target
feature_names = wine_data.feature_names


# 数据预处理
def preprocess_data(df):
    # 将分类特征转换为数值
    for col in df.columns:
        if df[col].dtype == 'object':
            df[col] = df[col].astype('category').cat.codes
    return df


watermelon_df = preprocess_data(watermelon_df)
X_watermelon = watermelon_df.drop('好瓜', axis=1)
y_watermelon = watermelon_df['好瓜']

# 分割数据集
X_watermelon_train, X_watermelon_test, y_watermelon_train, y_watermelon_test = train_test_split(
    X_watermelon, y_watermelon, test_size=0.3, random_state=42)

X_wine_train, X_wine_test, y_wine_train, y_wine_test = train_test_split(
    X_wine, y_wine, test_size=0.3, random_state=42)


# 决策树实现
class DecisionTree:
    def __init__(self, max_depth=None):
        self.max_depth = max_depth
        self.tree = None

    def fit(self, X, y, depth=0):
        n_samples, n_features = X.shape
        n_labels = len(np.unique(y))

        # 停止条件
        if (self.max_depth is not None and depth >= self.max_depth) or n_labels == 1 or n_samples < 2:
            return np.bincount(y).argmax()

        # 寻找最佳分割
        best_feature, best_threshold = self._best_split(X, y)

        if best_feature is None:
            return np.bincount(y).argmax()

        # 分割数据
        left_indices = X[:, best_feature] < best_threshold
        right_indices = ~left_indices

        # 递归构建树
        left_subtree = self.fit(X[left_indices], y[left_indices], depth + 1)
        right_subtree = self.fit(X[right_indices], y[right_indices], depth + 1)

        # 存储树节点
        self.tree = {
            'feature': best_feature,
            'threshold': best_threshold,
            'left': left_subtree,
            'right': right_subtree
        }

        return self.tree

    def _best_split(self, X, y):
        n_samples, n_features = X.shape

        if n_samples <= 1:
            return None, None

        # 计算当前的基尼指数
        parent_gini = self._gini(y)

        best_gini = 1.0
        best_feature = None
        best_threshold = None

        # 遍历所有特征
        for feature_idx in range(n_features):
            # 获取特征值
            feature_values = X[:, feature_idx]
            thresholds = np.unique(feature_values)

            # 遍历所有可能的分割阈值
            for threshold in thresholds:
                # 分割数据
                left_indices = feature_values < threshold
                right_indices = ~left_indices

                if len(left_indices) == 0 or len(right_indices) == 0:
                    continue

                # 计算分割后的基尼指数
                gini = self._weighted_gini(y[left_indices], y[right_indices])

                # 更新最佳分割
                if gini < best_gini:
                    best_gini = gini
                    best_feature = feature_idx
                    best_threshold = threshold

        # 如果没有找到更好的分割，返回None
        if best_gini >= parent_gini:
            return None, None

        return best_feature, best_threshold

    def _gini(self, y):
        if len(y) == 0:
            return 0

        # 计算每个类别的比例
        _, counts = np.unique(y, return_counts=True)
        proportions = counts / len(y)

        # 计算基尼指数
        gini = 1 - np.sum(proportions ** 2)
        return gini

    def _weighted_gini(self, left_y, right_y):
        n_left = len(left_y)
        n_right = len(right_y)
        n_total = n_left + n_right

        # 计算加权基尼指数
        weighted_gini = (n_left / n_total) * self._gini(left_y) + (n_right / n_total) * self._gini(right_y)
        return weighted_gini

    def predict(self, X):
        if not isinstance(self.tree, dict):
            return np.array([self.tree] * len(X))

        predictions = np.zeros(len(X))

        for i, x in enumerate(X):
            node = self.tree
            while isinstance(node, dict):
                if x[node['feature']] < node['threshold']:
                    node = node['left']
                else:
                    node = node['right']
            predictions[i] = node

        return predictions


# 随机森林实现
class MyRandomForest:
    def __init__(self, n_estimators=100, max_depth=None, max_features=None):
        self.n_estimators = n_estimators
        self.max_depth = max_depth
        self.max_features = max_features
        self.trees = []

    def fit(self, X, y):
        n_samples, n_features = X.shape

        # 确定每个树使用的特征数量
        if self.max_features is None:
            self.max_features = int(np.sqrt(n_features))

        # 训练多棵决策树
        for _ in range(self.n_estimators):
            # 自助采样（bootstrap）
            indices = np.random.choice(n_samples, size=n_samples, replace=True)
            X_bootstrap = X[indices]
            y_bootstrap = y[indices]

            # 随机选择特征
            feature_indices = np.random.choice(n_features, size=self.max_features, replace=False)
            X_bootstrap_features = X_bootstrap[:, feature_indices]

            # 训练决策树
            tree = DecisionTree(max_depth=self.max_depth)
            tree.fit(X_bootstrap_features, y_bootstrap)

            # 保存树和特征索引
            self.trees.append((tree, feature_indices))

    def predict(self, X):
        predictions = np.zeros((len(X), self.n_estimators))

        # 每棵树进行预测
        for i, (tree, feature_indices) in enumerate(self.trees):
            X_features = X[:, feature_indices]
            predictions[:, i] = tree.predict(X_features)

        # 多数投票
        final_predictions = np.apply_along_axis(
            lambda x: np.bincount(x.astype(int)).argmax(),
            axis=1,
            arr=predictions
        )

        return final_predictions


# 训练和评估模型
def evaluate_models(X_train, X_test, y_train, y_test, dataset_name):
    # 转换为numpy数组
    X_train = np.array(X_train)
    X_test = np.array(X_test)
    y_train = np.array(y_train)
    y_test = np.array(y_test)

    # 训练和评估sklearn决策树
    sklearn_tree = DecisionTreeClassifier(max_depth=5, random_state=42)
    sklearn_tree.fit(X_train, y_train)
    sklearn_tree_pred = sklearn_tree.predict(X_test)
    sklearn_tree_acc = accuracy_score(y_test, sklearn_tree_pred)

    # 训练和评估我们的决策树
    my_tree = DecisionTree(max_depth=5)
    my_tree.fit(X_train, y_train)
    my_tree_pred = my_tree.predict(X_test)
    my_tree_acc = accuracy_score(y_test, my_tree_pred)

    # 训练和评估sklearn随机森林
    sklearn_rf = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=42)
    sklearn_rf.fit(X_train, y_train)
    sklearn_rf_pred = sklearn_rf.predict(X_test)
    sklearn_rf_acc = accuracy_score(y_test, sklearn_rf_pred)

    # 训练和评估我们的随机森林
    my_rf = MyRandomForest(n_estimators=100, max_depth=5)
    my_rf.fit(X_train, y_train)
    my_rf_pred = my_rf.predict(X_test)
    my_rf_acc = accuracy_score(y_test, my_rf_pred)

    # 打印结果
    print(f"\n{dataset_name}数据集评估结果:")
    print(f"sklearn决策树准确率: {sklearn_tree_acc:.4f}")
    print(f"自定义决策树准确率: {my_tree_acc:.4f}")
    print(f"sklearn随机森林准确率: {sklearn_rf_acc:.4f}")
    print(f"自定义随机森林准确率: {my_rf_acc:.4f}")

    # 返回结果用于可视化
    return {
        'sklearn_tree': {'accuracy': sklearn_tree_acc, 'predictions': sklearn_tree_pred, 'model': sklearn_tree},
        'my_tree': {'accuracy': my_tree_acc, 'predictions': my_tree_pred, 'model': my_tree},
        'sklearn_rf': {'accuracy': sklearn_rf_acc, 'predictions': sklearn_rf_pred, 'model': sklearn_rf},
        'my_rf': {'accuracy': my_rf_acc, 'predictions': my_rf_pred, 'model': my_rf},
        'X_test': X_test,
        'y_test': y_test
    }


# 在两个数据集上评估模型
watermelon_results = evaluate_models(
    X_watermelon_train, X_watermelon_test, y_watermelon_train, y_watermelon_test, "西瓜"
)

wine_results = evaluate_models(
    X_wine_train, X_wine_test, y_wine_train, y_wine_test, "Wine"
)


# 可视化结果
def plot_results(watermelon_results, wine_results):
    # 1. 准确率对比柱状图
    models = list(watermelon_results.keys())[:4]
    watermelon_acc = [watermelon_results[model]['accuracy'] for model in models]
    wine_acc = [wine_results[model]['accuracy'] for model in models]

    x = np.arange(len(models))
    width = 0.35

    fig, ax = plt.subplots(figsize=(10, 6))
    ax.bar(x - width / 2, watermelon_acc, width, label='西瓜数据集')
    ax.bar(x + width / 2, wine_acc, width, label='Wine数据集')

    ax.set_ylabel('准确率')
    ax.set_title('不同模型在不同数据集上的准确率比较')
    ax.set_xticks(x)
    ax.set_xticklabels(['sklearn决策树', '自定义决策树', 'sklearn随机森林', '自定义随机森林'])
    ax.legend()

    plt.tight_layout()
    plt.show()

    # 2. 混淆矩阵热图
    for dataset_name, results in [('西瓜', watermelon_results), ('Wine', wine_results)]:
        fig, axes = plt.subplots(2, 2, figsize=(12, 10))
        axes = axes.flatten()

        for i, model in enumerate(models):
            cm = confusion_matrix(results['y_test'], results[model]['predictions'])

            if dataset_name == '西瓜':
                labels = ['坏瓜', '好瓜']
            else:
                labels = [f'类别{i}' for i in range(len(np.unique(results['y_test'])))]

            sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                        xticklabels=labels, yticklabels=labels,
                        ax=axes[i])

            model_name = 'sklearn决策树' if model == 'sklearn_tree' else \
                '自定义决策树' if model == 'my_tree' else \
                    'sklearn随机森林' if model == 'sklearn_rf' else '自定义随机森林'

            axes[i].set_title(f'{dataset_name}数据集 - {model_name}')
            axes[i].set_xlabel('预测标签')
            axes[i].set_ylabel('真实标签')

        plt.tight_layout()
        plt.show()


# 运行可视化
plot_results(watermelon_results, wine_results)
