import numpy as np
import pandas as pd
from collections import Counter
import math
from math import log2
import os

# 数据加载函数
def load_wine_data():
    """从本地文件加载Wine数据集"""
    column_names = [
        'Class', 'Alcohol', 'Malic_acid', 'Ash', 'Alcalinity_of_ash', 
        'Magnesium', 'Total_phenols', 'Flavanoids', 'Nonflavanoid_phenols',
        'Proanthocyanins', 'Color_intensity', 'Hue', 'OD280/OD315', 'Proline'
    ]
    
    # 检查文件是否存在
    if os.path.exists('wine.data'):
        try:
            data = pd.read_csv('wine.data', header=None, names=column_names)
            print("成功从本地文件加载Wine数据集！")
            return data
        except Exception as e:
            print(f"本地文件加载失败: {e}")
            return None
    else:
        print("wine.data文件不存在，请确保文件在当前目录")
        return None

# 数据预处理和标准化
def normalize_data(X):
    """Z-score标准化"""
    mean = np.mean(X, axis=0)
    std = np.std(X, axis=0)
    # 避免除零
    std = np.where(std == 0, 1, std)
    return (X - mean) / std

# ==================== KNN算法实现 ====================
class KNN:
    def __init__(self, k=3):
        self.k = k
        self.X_train = None
        self.y_train = None
    
    def euclidean_distance(self, x1, x2):
        """计算欧氏距离"""
        return math.sqrt(sum((x1 - x2) ** 2))
    
    def fit(self, X, y):
        """训练模型，存储训练数据"""
        self.X_train = X
        self.y_train = y
    
    def predict(self, X):
        """预测类别"""
        predictions = [self._predict(x) for x in X]
        return np.array(predictions)
    
    def _predict(self, x):
        """预测单个样本的类别"""
        # 计算所有距离
        distances = [self.euclidean_distance(x, x_train) for x_train in self.X_train]
        
        # 获取最近的k个样本的索引
        k_indices = np.argsort(distances)[:self.k]
        
        # 获取这k个样本的标签
        k_nearest_labels = [self.y_train[i] for i in k_indices]
        
        # 返回出现次数最多的标签
        most_common = Counter(k_nearest_labels).most_common(1)
        return most_common[0][0]
    
    def accuracy(self, y_true, y_pred):
        """计算准确率"""
        return np.sum(y_true == y_pred) / len(y_true)

# ==================== ID3决策树算法实现 ====================
class Node:
    """决策树节点类"""
    def __init__(self, feature=None, threshold=None, value=None, left=None, right=None):
        self.feature = feature      # 划分特征
        self.threshold = threshold  # 划分阈值（用于连续值）
        self.value = value          # 叶节点的类别值
        self.left = left            # 左子树
        self.right = right          # 右子树

class ID3DecisionTree:
    def __init__(self, max_depth=None, min_samples_split=2):
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.root = None
        self.feature_names = None
    
    def _entropy(self, y):
        """计算信息熵"""
        if len(y) == 0:
            return 0
        class_counts = np.bincount(y)
        probabilities = class_counts / len(y)
        entropy = -np.sum([p * log2(p) for p in probabilities if p > 0])
        return entropy
    
    def _information_gain(self, X, y, feature_idx):
        """计算信息增益"""
        # 父节点的熵
        parent_entropy = self._entropy(y)
        
        # 对于连续值属性，找到最佳划分点
        feature_values = X[:, feature_idx]
        unique_values = np.unique(feature_values)
        
        if len(unique_values) <= 1:
            return 0, None
        
        best_gain = 0
        best_threshold = None
        
        # 尝试所有可能的划分点（相邻值的中间值）
        for i in range(len(unique_values) - 1):
            threshold = (unique_values[i] + unique_values[i + 1]) / 2
            
            # 根据阈值划分数据
            left_indices = feature_values <= threshold
            right_indices = feature_values > threshold
            
            if np.sum(left_indices) == 0 or np.sum(right_indices) == 0:
                continue
            
            # 计算子节点的熵
            left_entropy = self._entropy(y[left_indices])
            right_entropy = self._entropy(y[right_indices])
            
            # 计算加权平均熵
            n_left, n_right = np.sum(left_indices), np.sum(right_indices)
            n_total = len(y)
            child_entropy = (n_left / n_total) * left_entropy + (n_right / n_total) * right_entropy
            
            # 计算信息增益
            gain = parent_entropy - child_entropy
            
            if gain > best_gain:
                best_gain = gain
                best_threshold = threshold
        
        return best_gain, best_threshold
    
    def _best_split(self, X, y):
        """找到最佳划分特征和阈值"""
        best_gain = -1
        best_feature = None
        best_threshold = None
        
        for feature_idx in range(X.shape[1]):
            gain, threshold = self._information_gain(X, y, feature_idx)
            
            if gain > best_gain:
                best_gain = gain
                best_feature = feature_idx
                best_threshold = threshold
        
        return best_feature, best_threshold
    
    def _build_tree(self, X, y, depth=0):
        """递归构建决策树"""
        # 终止条件
        n_samples, n_features = X.shape
        n_classes = len(np.unique(y))
        
        # 如果所有样本属于同一类别，或达到最大深度，或样本数太少
        if (n_classes == 1 or 
            (self.max_depth is not None and depth >= self.max_depth) or
            n_samples < self.min_samples_split):
            leaf_value = self._most_common_label(y)
            return Node(value=leaf_value)
        
        # 找到最佳划分
        best_feature, best_threshold = self._best_split(X, y)
        
        # 如果无法继续划分
        if best_feature is None:
            leaf_value = self._most_common_label(y)
            return Node(value=leaf_value)
        
        # 根据阈值划分数据
        left_indices = X[:, best_feature] <= best_threshold
        right_indices = X[:, best_feature] > best_threshold
        
        # 递归构建左右子树
        left_subtree = self._build_tree(X[left_indices], y[left_indices], depth + 1)
        right_subtree = self._build_tree(X[right_indices], y[right_indices], depth + 1)
        
        return Node(feature=best_feature, threshold=best_threshold,
                   left=left_subtree, right=right_subtree)
    
    def _most_common_label(self, y):
        """返回出现次数最多的类别"""
        if len(y) == 0:
            return 0
        return np.bincount(y).argmax()
    
    def fit(self, X, y, feature_names=None):
        """训练决策树"""
        self.feature_names = feature_names
        self.root = self._build_tree(X, y)
    
    def predict(self, X):
        """预测类别"""
        return np.array([self._traverse_tree(x, self.root) for x in X])
    
    def _traverse_tree(self, x, node):
        """遍历决策树进行预测"""
        if node.value is not None:
            return node.value
        
        if x[node.feature] <= node.threshold:
            return self._traverse_tree(x, node.left)
        else:
            return self._traverse_tree(x, node.right)
    
    def print_tree(self, node=None, depth=0, feature_names=None, max_depth=3):
        """打印决策树结构（限制深度）"""
        if node is None:
            node = self.root
        
        if feature_names is None:
            feature_names = self.feature_names
        
        # 限制打印深度
        if depth > max_depth:
            print("  " * depth + "...")
            return
        
        indent = "  " * depth
        
        if node.value is not None:
            print(f"{indent}类别: {node.value}")
        else:
            feature_name = feature_names[node.feature] if feature_names else f"特征{node.feature}"
            print(f"{indent}{feature_name} <= {node.threshold:.3f}")
            self.print_tree(node.left, depth + 1, feature_names, max_depth)
            print(f"{indent}{feature_name} > {node.threshold:.3f}")
            self.print_tree(node.right, depth + 1, feature_names, max_depth)

# 手动实现训练测试分割（避免依赖sklearn）
def train_test_split(X, y, test_size=0.3, random_state=None):
    """手动实现训练测试分割"""
    if random_state is not None:
        np.random.seed(random_state)
    
    n_samples = len(X)
    n_test = int(n_samples * test_size)
    
    # 随机打乱索引
    indices = np.random.permutation(n_samples)
    
    test_indices = indices[:n_test]
    train_indices = indices[n_test:]
    
    X_train = X[train_indices]
    X_test = X[test_indices]
    y_train = y[train_indices]
    y_test = y[test_indices]
    
    return X_train, X_test, y_train, y_test

# ==================== 测试函数 ====================
def test_knn():
    """测试KNN算法"""
    print("=" * 60)
    print("KNN算法实现")
    print("=" * 60)
    
    # 加载数据
    data = load_wine_data()
    if data is None:
        print("数据加载失败，请检查wine.data文件")
        return
    
    print("\nWine数据集基本信息:")
    print(f"数据集形状: {data.shape}")
    print(f"记录数: {data.shape[0]}, 属性数: {data.shape[1]-1}")
    print(f"类别分布:\n{data['Class'].value_counts().sort_index()}")
    
    print("\n数据内容（前5行）:")
    print(data.head())
    
    print("\n属性描述:")
    attributes = [
        "1. Alcohol - 酒精",
        "2. Malic acid - 苹果酸", 
        "3. Ash - 灰分",
        "4. Alcalinity of ash - 灰分的碱度",
        "5. Magnesium - 镁",
        "6. Total phenols - 总酚",
        "7. Flavanoids - 类黄酮",
        "8. Nonflavanoid phenols - 非类黄酮酚",
        "9. Proanthocyanins - 原花青素",
        "10. Color intensity - 颜色强度",
        "11. Hue - 色调",
        "12. OD280/OD315 - 稀释葡萄酒的OD280/OD315",
        "13. Proline - 脯氨酸"
    ]
    for attr in attributes:
        print(f"  {attr}")
    
    # 准备数据
    X = data.drop('Class', axis=1).values
    y = data['Class'].values
    
    # 数据标准化
    X = normalize_data(X)
    
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
    
    print(f"\n数据划分:")
    print(f"训练集大小: {X_train.shape[0]}")
    print(f"测试集大小: {X_test.shape[0]}")
    
    # 测试不同的k值
    k_values = [1, 3, 5, 7, 9]
    best_accuracy = 0
    best_k = 0
    
    print("\nKNN算法实验结果:")
    print("k值\t准确率")
    print("-" * 20)
    
    for k in k_values:
        knn = KNN(k=k)
        knn.fit(X_train, y_train)
        predictions = knn.predict(X_test)
        accuracy = knn.accuracy(y_test, predictions)
        
        print(f"{k}\t{accuracy:.4f}")
        
        if accuracy > best_accuracy:
            best_accuracy = accuracy
            best_k = k
    
    print(f"\n最佳k值: {best_k}")
    print(f"最高准确率: {best_accuracy:.4f}")
    
    # 显示部分预测结果
    print(f"\n详细预测结果（前10个测试样本）:")
    knn_best = KNN(k=best_k)
    knn_best.fit(X_train, y_train)
    sample_predictions = knn_best.predict(X_test[:10])
    
    correct_count = 0
    for i in range(min(10, len(X_test))):
        is_correct = y_test[i] == sample_predictions[i]
        if is_correct:
            correct_count += 1
        status = "✓" if is_correct else "✗"
        print(f"样本{i+1:2d}: 真实类别={y_test[i]}, 预测类别={sample_predictions[i]} {status}")
    
    print(f"\n前10个样本正确率: {correct_count}/10 = {correct_count/10:.2f}")

def test_id3():
    """测试ID3算法"""
    print("\n" + "=" * 60)
    print("ID3决策树算法实现")
    print("=" * 60)
    
    # 加载数据
    data = load_wine_data()
    if data is None:
        print("数据加载失败，请检查wine.data文件")
        return
    
    # 准备数据
    X = data.drop('Class', axis=1).values
    y = data['Class'].values
    feature_names = data.columns[1:].tolist()
    
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
    
    print("ID3决策树算法:")
    print(f"训练集大小: {X_train.shape[0]}")
    print(f"测试集大小: {X_test.shape[0]}")
    
    # 训练决策树
    print("\n训练决策树中...")
    tree = ID3DecisionTree(max_depth=5, min_samples_split=5)
    tree.fit(X_train, y_train, feature_names)
    
    # 预测
    predictions = tree.predict(X_test)
    accuracy = np.sum(y_test == predictions) / len(y_test)
    
    print(f"\nID3算法实验结果:")
    print(f"测试集准确率: {accuracy:.4f}")
    
    # 打印决策树结构
    print("\n决策树结构（前3层）:")
    print("-" * 40)
    tree.print_tree(max_depth=3)
    
    # 显示部分预测结果
    print(f"\n详细预测结果（前10个测试样本）:")
    correct_count = 0
    for i in range(min(10, len(X_test))):
        is_correct = y_test[i] == predictions[i]
        if is_correct:
            correct_count += 1
        status = "✓" if is_correct else "✗"
        print(f"样本{i+1:2d}: 真实类别={y_test[i]}, 预测类别={predictions[i]} {status}")
    
    print(f"\n前10个样本正确率: {correct_count}/10 = {correct_count/10:.2f}")

# ==================== 主程序 ====================
if __name__ == "__main__":
    print("机器学习算法实现 - Wine数据集分类")
    print("包含KNN和ID3决策树算法")
    print("确保wine.data文件在当前目录\n")
    
    # 测试KNN算法
    test_knn()
    
    # 测试ID3算法
    test_id3()
    
    print("\n" + "=" * 60)
    print("算法测试完成！")
    print("=" * 60)