import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from urllib.request import urlretrieve
import random

# 设置全局中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 黑体
plt.rcParams['axes.unicode_minus'] = False     # 解决负号显示问题

class KNNClassifier:
    def __init__(self, k=3):
        """初始化KNN分类器"""
        self.k = k
        self.X_train = None
        self.y_train = None
        
    def fit(self, X, y):
        """训练模型，存储训练数据"""
        self.X_train = X
        self.y_train = y
        
    def euclidean_distance(self, x1, x2):
        """计算欧氏距离"""
        return np.sqrt(np.sum((x1 - x2) **2))
    
    def predict(self, X):
        """预测样本类别"""
        predictions = []
        for x in X:
            # 计算与所有训练样本的距离
            distances = [self.euclidean_distance(x, x_train) for x_train in self.X_train]
            
            # 获取最近的k个样本的索引
            k_indices = np.argsort(distances)[:self.k]
            
            # 获取这k个样本的标签
            k_nearest_labels = [self.y_train[i] for i in k_indices]
            
            # 多数投票决定预测类别
            most_common = self._most_common(k_nearest_labels)
            predictions.append(most_common)
            
        return np.array(predictions)
    
    def _most_common(self, lst):
        """找出列表中出现次数最多的元素"""
        count = {}
        for item in lst:
            if item in count:
                count[item] += 1
            else:
                count[item] = 1
        return max(count, key=count.get)
    
    def accuracy(self, y_true, y_pred):
        """计算预测准确率"""
        return np.sum(y_true == y_pred) / len(y_true)

# 加载Iris数据集
def load_iris_data():
    """加载Iris数据集并返回特征和标签"""
    url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
    urlretrieve(url, "iris.data")
    
    # 定义列名
    columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'class']
    df = pd.read_csv("iris.data", header=None, names=columns)
    
    # 将类别转换为数字
    class_mapping = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
    df['class'] = df['class'].map(class_mapping)
    
    # 提取特征和标签
    X = df.iloc[:, :-1].values
    y = df.iloc[:, -1].values
    
    return X, y

# 主函数
def main():
    # 加载数据
    X, y = load_iris_data()
    
    # 数据集基本情况
    print("Iris数据集基本情况：")
    print(f"记录数：{X.shape[0]}")
    print(f"属性数：{X.shape[1]}")
    print("数据内容：4个特征（花萼长度、花萼宽度、花瓣长度、花瓣宽度）和1个类别标签")
    print(f"类别分布：{np.bincount(y)}")
    
    # 随机划分训练集和测试集（70%训练，30%测试）
    indices = list(range(len(X)))
    random.shuffle(indices)
    split_idx = int(0.7 * len(X))
    train_indices, test_indices = indices[:split_idx], indices[split_idx:]
    
    X_train, X_test = X[train_indices], X[test_indices]
    y_train, y_test = y[train_indices], y[test_indices]
    
    # 测试不同k值的效果
    k_values = range(1, 16)
    accuracies = []
    
    for k in k_values:
        knn = KNNClassifier(k=k)
        knn.fit(X_train, y_train)
        y_pred = knn.predict(X_test)
        acc = knn.accuracy(y_test, y_pred)
        accuracies.append(acc)
        print(f"k={k}时，准确率：{acc:.4f}")
    
    # 绘制不同k值的准确率曲线
    plt.figure(figsize=(10, 6))
    plt.plot(k_values, accuracies, marker='o')
    plt.title('K值与准确率关系')
    plt.xlabel('k值')
    plt.ylabel('准确率')
    plt.xticks(k_values)
    plt.grid(True)
    plt.show()
    
    # 使用最佳k值进行最终预测
    best_k = k_values[np.argmax(accuracies)]
    print(f"\n最佳k值为：{best_k}")
    
    knn = KNNClassifier(k=best_k)
    knn.fit(X_train, y_train)
    y_pred = knn.predict(X_test)
    
    # 打印部分预测结果
    print("\n部分预测结果对比：")
    print("真实标签 | 预测标签")
    print("-" * 20)
    for true, pred in zip(y_test[:10], y_pred[:10]):
        print(f"    {true}    |    {pred}")

if __name__ == "__main__":
    main()