import random
import math
from urllib.request import urlopen  # 标准库，用于从官网加载数据
from collections import Counter

# 1. 数据加载（直接从UCI官网获取iris.data，不依赖ucimlrepo）
def load_iris_data():
    """从UCI官网加载Iris数据集，纯Python解析为列表格式"""
    # Iris数据集官网直接下载地址
    url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
    try:
        with urlopen(url) as f:
            # 读取数据并按行分割（处理编码和空行）
            data = f.read().decode('utf-8').strip().splitlines()
    except Exception as e:
        print(f"网络加载失败：{e}，请检查网络连接后重试")
        return [], []
    
    # 解析特征和标签（4个特征+1个类别标签）
    X = []  # 特征：[花萼长度, 花萼宽度, 花瓣长度, 花瓣宽度]
    y = []  # 标签：鸢尾花类别（Iris-setosa/Iris-versicolor/Iris-virginica）
    for line in data:
        if not line:  # 跳过空行
            continue
        parts = line.split(',')  # 按逗号分割数据
        # 前4个是数值特征，转换为float；最后1个是标签
        features = list(map(float, parts[:4]))
        label = parts[4]
        X.append(features)
        y.append(label)
    return X, y

# 2. 数据预处理（划分训练集+特征归一化）
def train_test_split(X, y, test_size=0.2, random_state=42):
    """固定随机种子，划分训练集（80%）和测试集（20%）"""
    random.seed(random_state)
    n_samples = len(X)
    # 随机选择测试集索引
    test_indices = random.sample(range(n_samples), int(n_samples * test_size))
    
    # 分离训练集和测试集
    X_train, X_test = [], []
    y_train, y_test = [], []
    for i in range(n_samples):
        if i in test_indices:
            X_test.append(X[i])
            y_test.append(y[i])
        else:
            X_train.append(X[i])
            y_train.append(y[i])
    return X_train, X_test, y_train, y_test

def normalize_features(X):
    """特征归一化到[0,1]区间（消除量纲对KNN距离的影响）"""
    if not X:
        return []
    n_features = len(X[0])  # Iris数据集固定4个特征
    # 计算每个特征的最小值和最大值
    min_vals = [min(sample[i] for sample in X) for i in range(n_features)]
    max_vals = [max(sample[i] for sample in X) for i in range(n_features)]
    
    # 归一化每个样本
    X_normalized = []
    for sample in X:
        normalized_sample = []
        for i in range(n_features):
            denominator = max_vals[i] - min_vals[i]
            # 避免分母为0（若特征值全相同，归一化为0）
            if denominator == 0:
                normalized_sample.append(0.0)
            else:
                normalized_sample.append((sample[i] - min_vals[i]) / denominator)
        X_normalized.append(normalized_sample)
    return X_normalized

# 3. KNN算法核心实现（纯Python，无第三方库）
class KNNClassifier:
    def __init__(self, k=3):
        self.k = k  # 近邻数量（Iris数据集k=3效果优）
        self.X_train = None  # 存储训练特征
        self.y_train = None  # 存储训练标签

    def _euclidean_distance(self, x1, x2):
        """计算两个样本的欧氏距离（衡量相似度）"""
        distance = 0.0
        for a, b in zip(x1, x2):
            distance += (a - b) **2
        return math.sqrt(distance)

    def fit(self, X, y):
        """KNN是惰性学习，仅存储训练数据（无实际“训练”过程）"""
        self.X_train = X
        self.y_train = y

    def _predict_single(self, x):
        """预测单个样本的类别：近邻投票"""
        # 1. 计算当前样本与所有训练样本的距离
        distances = []
        for i in range(len(self.X_train)):
            dist = self._euclidean_distance(x, self.X_train[i])
            distances.append((dist, self.y_train[i]))  # （距离，对应标签）
        
        # 2. 按距离升序排序，取前k个近邻
        distances.sort()  # 距离越小越相似，排在前面
        k_nearest = distances[:self.k]
        k_labels = [label for (_, label) in k_nearest]  # 提取近邻的标签
        
        # 3. 多数表决：返回出现次数最多的标签
        return Counter(k_labels).most_common(1)[0][0]

    def predict(self, X):
        """批量预测测试集所有样本"""
        return [self._predict_single(x) for x in X]

    def accuracy(self, X, y):
        """计算预测准确率（正确数/总样本数）"""
        y_pred = self.predict(X)
        correct_count = sum(1 for pred, true in zip(y_pred, y) if pred == true)
        return correct_count / len(y) if len(y) > 0 else 0.0

# 4. 完整流程：加载数据→预处理→训练→评估
if __name__ == "__main__":
    # 步骤1：加载数据
    X, y = load_iris_data()
    if not X:  # 若加载失败，退出程序
        exit()
    print(f"✅ 成功加载Iris数据集：{len(X)}个样本，{len(X[0])}个特征")
    
    # 步骤2：划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
    print(f"📊 数据划分完成：训练集{len(X_train)}个样本，测试集{len(X_test)}个样本")
    
    # 步骤3：特征归一化
    X_train_norm = normalize_features(X_train)
    X_test_norm = normalize_features(X_test)
    
    # 步骤4：训练KNN模型
    knn = KNNClassifier(k=3)
    knn.fit(X_train_norm, y_train)
    print(f"🔧 KNN模型训练完成（k={knn.k}）")
    
    # 步骤5：评估模型性能
    train_acc = knn.accuracy(X_train_norm, y_train)
    test_acc = knn.accuracy(X_test_norm, y_test)
    
    # 输出结果
    print("\n===== KNN分类器性能结果 =====")
    print(f"训练集准确率：{train_acc:.4f}")
    print(f"测试集准确率：{test_acc:.4f}")
