import numpy as np

class KNN:
    def __init__(self, k=3):
        self.k = k
        self.X_train = None
        self.y_train = None
        self.X_test = None  # 测试集特征
        self.y_test = None  # 测试集标签
    
    def load_iris(self, file_path="iris.data", test_size=0.2, random_state=42):
        """
        加载Iris数据并随机划分训练集/测试集
        :param file_path: 数据文件路径
        :param test_size: 测试集占比（默认20%）
        :param random_state: 随机种子（保证结果可复现）
        """
        data = []
        with open(file_path, 'r') as f:
            for line in f:
                if line.strip():  # 跳过空行
                    parts = line.strip().split(',')
                    features = list(map(float, parts[:4]))  # 4个特征
                    label = parts[4]  # 类别标签
                    # 标签映射为数字
                    label_map = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
                    data.append(features + [label_map[label]])
        
        # 转换为numpy数组并打乱顺序
        data = np.array(data)
        np.random.seed(random_state)  # 固定随机种子，结果可复现
        np.random.shuffle(data)  # 随机打乱所有样本
        
        # 提取特征和标签
        X = data[:, :4]  # 前4列为特征
        y = data[:, 4].astype(int)  # 第5列为标签（转为整数）
        
        # 按比例划分训练集和测试集
        test_num = int(len(X) * test_size)  # 测试集样本数（150*0.2=30）
        self.X_train, self.X_test = X[test_num:], X[:test_num]  # 后80%为训练集，前20%为测试集
        self.y_train, self.y_test = y[test_num:], y[:test_num]
        
        return self.X_train, self.X_test, self.y_train, self.y_test
    
    def euclidean_distance(self, x1, x2):
        # 计算欧氏距离
        return np.sqrt(np.sum((x1 - x2) **2))
    
    def predict(self, X_test):
        y_pred = []
        for test_sample in X_test:
            # 计算与所有训练样本的距离
            distances = [self.euclidean_distance(test_sample, train_sample) for train_sample in self.X_train]
            # 取前k个近邻的索引
            k_indices = np.argsort(distances)[:self.k]
            # 投票决定预测类别
            k_labels = [self.y_train[i] for i in k_indices]
            unique_labels, counts = np.unique(k_labels, return_counts=True)
            y_pred.append(unique_labels[np.argmax(counts)])
        return np.array(y_pred)
    
    def evaluate(self, y_pred):
        # 计算准确率
        accuracy = np.sum(y_pred == self.y_test) / len(self.y_test)
        return accuracy

# 测试
if __name__ == "__main__":
    # 初始化模型（k=5）
    knn = KNN(k=5)
    # 加载数据（测试集占20%，随机种子42保证结果一致）
    knn.load_iris(test_size=0.2, random_state=42)
    # 预测测试集
    y_pred = knn.predict(knn.X_test)
    # 计算准确率
    accuracy = knn.evaluate(y_pred)
    
    # 标签反向映射（数字→原始类别名称）
    label_map_rev = {
        0: 'Iris-setosa',
        1: 'Iris-versicolor',
        2: 'Iris-virginica'
    }
    
    # 转换标签为类别名称
    y_test_names = [label_map_rev[label] for label in knn.y_test]
    y_pred_names = [label_map_rev[label] for label in y_pred]
    
    # 打印测试集类别分布（验证是否包含3个类别）
    print("===== 测试集类别分布 =====")
    for label, name in label_map_rev.items():
        count = np.sum(knn.y_test == label)
        print(f"{name}: {count}个样本")
    print()
    
    # 打印分类结果（逐条显示）
    print("===== 测试集分类结果 =====")
    for i in range(len(y_test_names)):
        result = "正确" if y_test_names[i] == y_pred_names[i] else "错误"
        print(f"样本{i+1}:")
        print(f"  真实类别: {y_test_names[i]}")
        print(f"  预测类别: {y_pred_names[i]}")
        print(f"  结果: {result}\n")
    
    # 打印准确率
    print(f"===== 模型评估 =====")
    print(f"KNN准确率: {accuracy:.2f} (共{len(knn.y_test)}个测试样本)")