import numpy as np
import pandas as pd
from collections import Counter
import math

class KNN:
    def __init__(self, k=3):
        self.k = k
        self.X_train = None
        self.y_train = None
    
    def euclidean_distance(self, x1, x2):
        """计算欧氏距离"""
        return math.sqrt(sum((x1 - x2) ** 2))
    
    def fit(self, X, y):
        """训练模型，存储训练数据"""
        self.X_train = X
        self.y_train = y
    
    def predict(self, X):
        """预测类别"""
        predictions = [self._predict(x) for x in X]
        return np.array(predictions)
    
    def _predict(self, x):
        """预测单个样本的类别"""
        # 计算所有距离
        distances = [self.euclidean_distance(x, x_train) for x_train in self.X_train]
        
        # 获取最近的k个样本的索引
        k_indices = np.argsort(distances)[:self.k]
        
        # 获取这k个样本的标签
        k_nearest_labels = [self.y_train[i] for i in k_indices]
        
        # 返回出现次数最多的标签
        most_common = Counter(k_nearest_labels).most_common(1)
        return most_common[0][0]
    
    def accuracy(self, y_true, y_pred):
        """计算准确率"""
        return np.sum(y_true == y_pred) / len(y_true)

# 数据预处理和标准化
def normalize_data(X):
    """Z-score标准化"""
    mean = np.mean(X, axis=0)
    std = np.std(X, axis=0)
    return (X - mean) / std

def load_wine_data():
    """加载Wine数据集"""
    # 从UCI机器学习仓库下载数据
    url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
    column_names = [
        'Class', 'Alcohol', 'Malic_acid', 'Ash', 'Alcalinity_of_ash', 
        'Magnesium', 'Total_phenols', 'Flavanoids', 'Nonflavanoid_phenols',
        'Proanthocyanins', 'Color_intensity', 'Hue', 'OD280/OD315', 'Proline'
    ]
    
    try:
        data = pd.read_csv(url, header=None, names=column_names)
    except:
        # 如果在线下载失败，使用本地文件
        print("在线下载失败，请确保文件wine.data在当前目录")
        return None
    
    return data

# 测试KNN算法
def test_knn():
    # 加载数据
    data = load_wine_data()
    if data is None:
        return
    
    print("Wine数据集基本信息:")
    print(f"数据集形状: {data.shape}")
    print(f"类别分布:\n{data['Class'].value_counts().sort_index()}")
    print("\n前5行数据:")
    print(data.head())
    
    # 准备数据
    X = data.drop('Class', axis=1).values
    y = data['Class'].values
    
    # 数据标准化
    X = normalize_data(X)
    
    # 划分训练集和测试集
    from sklearn.model_selection import train_test_split
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
    
    print(f"\n训练集大小: {X_train.shape[0]}")
    print(f"测试集大小: {X_test.shape[0]}")
    
    # 测试不同的k值
    k_values = [1, 3, 5, 7, 9]
    best_accuracy = 0
    best_k = 0
    
    print("\nKNN算法结果:")
    print("k值\t准确率")
    print("-" * 15)
    
    for k in k_values:
        knn = KNN(k=k)
        knn.fit(X_train, y_train)
        predictions = knn.predict(X_test)
        accuracy = knn.accuracy(y_test, predictions)
        
        print(f"{k}\t{accuracy:.4f}")
        
        if accuracy > best_accuracy:
            best_accuracy = accuracy
            best_k = k
    
    print(f"\n最佳k值: {best_k}, 准确率: {best_accuracy:.4f}")
    
    # 显示部分预测结果
    print(f"\n前10个测试样本的真实类别 vs 预测类别:")
    knn_best = KNN(k=best_k)
    knn_best.fit(X_train, y_train)
    sample_predictions = knn_best.predict(X_test[:10])
    
    for i in range(10):
        print(f"样本{i+1}: 真实={y_test[i]}, 预测={sample_predictions[i]}, {'正确' if y_test[i] == sample_predictions[i] else '错误'}")

if __name__ == "__main__":
    test_knn()