import numpy as np
import pandas as pd
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, classification_report
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
import random
import copy
from collections import Counter

# 设置随机种子以确保结果可复现
np.random.seed(42)
random.seed(42)

# 自定义决策树实现
class MyDecisionTree:
    def __init__(self, max_depth=None):
        self.max_depth = max_depth
        
    def fit(self, X, y, feature_names=None):
        if feature_names is None:
            feature_names = [str(i) for i in range(X.shape[1])]
        self.tree = self._build_tree(X, y, feature_names, depth=0)
        return self
    
    def predict(self, X):
        return np.array([self._predict_sample(sample) for sample in X])
    
    def _predict_sample(self, sample):
        node = self.tree
        while isinstance(node, dict):
            feature_index = node['feature_index']
            value = sample[feature_index]
            if value in node['children']:
                node = node['children'][value]
            else:
                # 如果遇到未见过的值，返回多数类
                return node['majority_class']
        return node
    
    def _build_tree(self, X, y, feature_names, depth):
        # 如果所有样本属于同一类，返回该类
        if len(set(y)) == 1:
            return y[0]
        
        # 如果没有特征或达到最大深度，返回多数类
        if len(feature_names) == 0 or (self.max_depth is not None and depth >= self.max_depth):
            return Counter(y).most_common(1)[0][0]
        
        # 选择最佳划分特征
        best_feature_index, best_info_gain = self._choose_best_feature(X, y)
        
        if best_info_gain < 0.001:  # 信息增益阈值
            return Counter(y).most_common(1)[0][0]
        
        best_feature_name = feature_names[best_feature_index]
        tree = {'feature_index': best_feature_index, 'feature_name': best_feature_name, 'children': {}}
        
        # 获取该特征的所有可能取值
        feature_values = set(X[:, best_feature_index])
        
        # 构建子树
        remaining_feature_names = feature_names[:best_feature_index] + feature_names[best_feature_index+1:]
        
        # 保存当前节点的多数类，用于处理未见过的值
        tree['majority_class'] = Counter(y).most_common(1)[0][0]
        
        for value in feature_values:
            subset_indices = np.where(X[:, best_feature_index] == value)[0]
            if len(subset_indices) == 0:
                tree['children'][value] = Counter(y).most_common(1)[0][0]
            else:
                X_subset = X[subset_indices]
                y_subset = y[subset_indices]
                
                # 删除已使用的特征列
                X_subset = np.delete(X_subset, best_feature_index, axis=1)
                
                subtree = self._build_tree(X_subset, y_subset, remaining_feature_names, depth + 1)
                tree['children'][value] = subtree
        
        return tree
    
    def _choose_best_feature(self, X, y):
        base_entropy = self._calculate_entropy(y)
        num_features = X.shape[1]
        best_info_gain = -1
        best_feature_index = -1
        
        for feature_index in range(num_features):
            feature_values = X[:, feature_index]
            info_gain = self._calculate_info_gain(y, feature_values, base_entropy)
            if info_gain > best_info_gain:
                best_info_gain = info_gain
                best_feature_index = feature_index
        
        return best_feature_index, best_info_gain
    
    def _calculate_entropy(self, y):
        classes, counts = np.unique(y, return_counts=True)
        proportions = counts / len(y)
        entropy = -np.sum(proportions * np.log2(proportions + 1e-10))
        return entropy
    
    def _calculate_info_gain(self, y, feature_values, base_entropy):
        feature_values_unique = np.unique(feature_values)
        new_entropy = 0
        
        for value in feature_values_unique:
            subset_indices = np.where(feature_values == value)[0]
            subset_y = y[subset_indices]
            proportion = len(subset_y) / len(y)
            new_entropy += proportion * self._calculate_entropy(subset_y)
        
        info_gain = base_entropy - new_entropy
        return info_gain

# 自定义随机森林实现
class MyRandomForest:
    def __init__(self, n_estimators=10, max_depth=None, max_features=None):
        self.n_estimators = n_estimators
        self.max_depth = max_depth
        self.max_features = max_features
        self.trees = []
        
    def fit(self, X, y, feature_names=None):
        if feature_names is None:
            feature_names = [str(i) for i in range(X.shape[1])]
        
        for _ in range(self.n_estimators):
            # 自助采样（bootstrap）
            indices = np.random.choice(len(X), size=len(X), replace=True)
            X_bootstrap = X[indices]
            y_bootstrap = y[indices]
            
            # 随机选择特征子集
            if self.max_features is None:
                n_features = X.shape[1]
                selected_feature_indices = np.arange(n_features)
            else:
                n_features = min(self.max_features, X.shape[1])
                selected_feature_indices = np.random.choice(X.shape[1], size=n_features, replace=False)
            
            X_bootstrap_features = X_bootstrap[:, selected_feature_indices]
            selected_feature_names = [feature_names[i] for i in selected_feature_indices]
            
            # 训练决策树
            tree = MyDecisionTree(max_depth=self.max_depth)
            tree.fit(X_bootstrap_features, y_bootstrap, selected_feature_names)
            
            # 保存决策树及其选择的特征
            self.trees.append((tree, selected_feature_indices))
        
        return self
    
    def predict(self, X):
        predictions = []
        for tree, feature_indices in self.trees:
            X_subset = X[:, feature_indices]
            tree_predictions = tree.predict(X_subset)
            predictions.append(tree_predictions)
        
        # 多数投票
        predictions = np.array(predictions).T
        final_predictions = []
        for pred in predictions:
            final_predictions.append(Counter(pred).most_common(1)[0][0])
        
        return np.array(final_predictions)

# 处理西瓜数据集
def process_watermelon_data(dataSet):
    X = np.array([row[:-1] for row in dataSet])
    y = np.array([row[-1] for row in dataSet])
    
    # 划分训练集和测试集
    indices = np.arange(len(X))
    np.random.shuffle(indices)
    train_size = int(0.7 * len(X))
    X_train, X_test = X[indices[:train_size]], X[indices[train_size:]]
    y_train, y_test = y[indices[:train_size]], y[indices[train_size:]]
    
    feature_names = ['色泽', '根蒂', '敲声', '纹理', '脐部', '触感']
    
    return X_train, X_test, y_train, y_test, feature_names

# 处理wine数据集
def process_wine_data():
    wine = load_wine()
    X = wine.data
    y = wine.target
    feature_names = wine.feature_names
    
    # 为了适应我们的决策树实现，将连续特征离散化
    X_discrete = np.zeros_like(X, dtype=str)
    for i in range(X.shape[1]):
        median = np.median(X[:, i])
        X_discrete[:, i] = np.where(X[:, i] <= median, 'low', 'high')
    
    X_train, X_test, y_train, y_test = train_test_split(
        X_discrete, y, test_size=0.3, random_state=42
    )
    
    return X_train, X_test, y_train, y_test, feature_names

# 对西瓜数据集进行编码，使其适合sklearn模型
def encode_watermelon_data(X_train, X_test):
    encoder = OneHotEncoder(sparse_output=False, handle_unknown='ignore')
    X_train_encoded = encoder.fit_transform(X_train)
    X_test_encoded = encoder.transform(X_test)
    return X_train_encoded, X_test_encoded

# 评估模型
def evaluate_model(model, X_train, X_test, y_train, y_test, feature_names=None, is_sklearn_model=False):
    if is_sklearn_model:
        # sklearn模型需要编码后的数据
        X_train_encoded, X_test_encoded = encode_watermelon_data(X_train, X_test)
        model.fit(X_train_encoded, y_train)
        y_train_pred = model.predict(X_train_encoded)
        y_test_pred = model.predict(X_test_encoded)
    else:
        # 自定义模型可以直接处理原始数据
        model.fit(X_train, y_train, feature_names)
        y_train_pred = model.predict(X_train)
        y_test_pred = model.predict(X_test)
    
    train_accuracy = accuracy_score(y_train, y_train_pred)
    test_accuracy = accuracy_score(y_test, y_test_pred)
    
    return train_accuracy, test_accuracy

# 主函数
def main():
    # 定义西瓜数据集
    dataSet = [['青绿', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],
               ['乌黑', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', '好瓜'],
               ['乌黑', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],
               ['青绿', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', '好瓜'],
               ['浅白', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],
               ['青绿', '稍蜷', '浊响', '清晰', '稍凹', '软粘', '好瓜'],
               ['乌黑', '稍蜷', '浊响', '稍糊', '稍凹', '软粘', '好瓜'],
               ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '硬滑', '好瓜'],
               ['乌黑', '稍蜷', '沉闷', '稍糊', '稍凹', '硬滑', '坏瓜'],
               ['青绿', '硬挺', '清脆', '清晰', '平坦', '软粘', '坏瓜'],
               ['浅白', '硬挺', '清脆', '模糊', '平坦', '硬滑', '坏瓜'],
               ['浅白', '蜷缩', '浊响', '模糊', '平坦', '软粘', '坏瓜'],
               ['青绿', '稍蜷', '浊响', '稍糊', '凹陷', '硬滑', '坏瓜'],
               ['浅白', '稍蜷', '沉闷', '稍糊', '凹陷', '硬滑', '坏瓜'],
               ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '软粘', '坏瓜'],
               ['浅白', '蜷缩', '浊响', '模糊', '平坦', '硬滑', '坏瓜'],
               ['青绿', '蜷缩', '沉闷', '稍糊', '稍凹', '硬滑', '坏瓜'], ]
    
    # 评估西瓜数据集
    print("=== 西瓜数据集评估 ===")
    X_train_wm, X_test_wm, y_train_wm, y_test_wm, feature_names_wm = process_watermelon_data(dataSet)
    
    # 模型列表
    models = {
        "自定义决策树": (MyDecisionTree(max_depth=5), False),
        "sklearn决策树": (DecisionTreeClassifier(max_depth=5, random_state=42), True),
        "自定义随机森林": (MyRandomForest(n_estimators=10, max_depth=5, max_features=2), False),
        "sklearn随机森林": (RandomForestClassifier(n_estimators=10, max_depth=5, max_features=2, random_state=42), True)
    }
    
    results_wm = {}
    for name, (model, is_sklearn) in models.items():
        train_acc, test_acc = evaluate_model(
            model, X_train_wm, X_test_wm, y_train_wm, y_test_wm, feature_names_wm, is_sklearn
        )
        results_wm[name] = (train_acc, test_acc)
        print(f"{name} - 训练集准确率: {train_acc:.4f}, 测试集准确率: {test_acc:.4f}")
    
    # 评估wine数据集
    print("\n=== Wine数据集评估 ===")
    X_train_wine, X_test_wine, y_train_wine, y_test_wine, feature_names_wine = process_wine_data()
    
    results_wine = {}
    for name, (model, is_sklearn) in models.items():
        if name.startswith("sklearn"):
            # sklearn模型使用原始数值特征
            model.fit(X_train_wine, y_train_wine)
            y_train_pred = model.predict(X_train_wine)
            y_test_pred = model.predict(X_test_wine)
        else:
            # 自定义模型使用离散化特征
            model.fit(X_train_wine, y_train_wine, feature_names_wine)
            y_train_pred = model.predict(X_train_wine)
            y_test_pred = model.predict(X_test_wine)
        
        train_accuracy = accuracy_score(y_train_wine, y_train_pred)
        test_accuracy = accuracy_score(y_test_wine, y_test_pred)
        results_wine[name] = (train_accuracy, test_accuracy)
        print(f"{name} - 训练集准确率: {train_accuracy:.4f}, 测试集准确率: {test_accuracy:.4f}")
    
    # 可视化结果
    plt.figure(figsize=(12, 10))
    
    # 西瓜数据集结果
    plt.subplot(2, 1, 1)
    model_names = list(results_wm.keys())
    train_accs = [results_wm[name][0] for name in model_names]
    test_accs = [results_wm[name][1] for name in model_names]
    
    x = np.arange(len(model_names))
    width = 0.35
    
    plt.bar(x - width/2, train_accs, width, label='训练集准确率')
    plt.bar(x + width/2, test_accs, width, label='测试集准确率')
    
    plt.ylabel('准确率')
    plt.title('西瓜数据集模型性能对比')
    plt.xticks(x, model_names, rotation=45)
    plt.legend()
    plt.ylim(0, 1.1)
    
    # Wine数据集结果
    plt.subplot(2, 1, 2)
    model_names = list(results_wine.keys())
    train_accs = [results_wine[name][0] for name in model_names]
    test_accs = [results_wine[name][1] for name in model_names]
    
    x = np.arange(len(model_names))
    
    plt.bar(x - width/2, train_accs, width, label='训练集准确率')
    plt.bar(x + width/2, test_accs, width, label='测试集准确率')
    
    plt.ylabel('准确率')
    plt.title('Wine数据集模型性能对比')
    plt.xticks(x, model_names, rotation=45)
    plt.legend()
    plt.ylim(0, 1.1)
    
    plt.tight_layout()
    plt.savefig('model_comparison.png')
    plt.show()

if __name__ == "__main__":
    main()