import csv
import math
import random
from collections import Counter


def load_iris_data(filename='iris.data'):
    """加载Iris数据集"""
    data = []
    try:
        # 尝试从本地文件加载
        with open(filename, 'r') as file:
            reader = csv.reader(file)
            for row in reader:
                if len(row) == 5:  # 确保是有效的数据行
                    # 将前4列转换为浮点型特征，最后一列是类别
                    features = [float(value) for value in row[:4]]
                    label = row[4]
                    data.append((features, label))
    except FileNotFoundError:
        # 如果本地文件不存在，从UCI网站下载
        print("本地文件未找到，尝试从UCI网站下载...")
        import urllib.request
        url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
        with urllib.request.urlopen(url) as response:
            content = response.read().decode('utf-8')
            rows = content.split('\n')
            for row in rows:
                if row.strip() and len(row.split(',')) == 5:
                    parts = row.split(',')
                    features = [float(value) for value in parts[:4]]
                    label = parts[4]
                    data.append((features, label))

    print(f"成功加载Iris数据集，共{len(data)}个样本")
    return data


def split_data(data, test_ratio=0.3):
    """将数据分为训练集和测试集"""
    # 随机打乱数据
    random.shuffle(data)
    split_index = int(len(data) * (1 - test_ratio))
    train_set = data[:split_index]
    test_set = data[split_index:]
    print(f"数据集拆分完成：训练集{len(train_set)}个样本，测试集{len(test_set)}个样本")
    return train_set, test_set


def euclidean_distance(instance1, instance2):
    """计算两个样本之间的欧氏距离"""
    distance = 0.0
    for i in range(len(instance1)):
        distance += (instance1[i] - instance2[i]) ** 2
    return math.sqrt(distance)


def get_neighbors(train_set, test_instance, k):
    """找到测试样本的k个最近邻"""
    distances = []
    for train_instance in train_set:
        dist = euclidean_distance(test_instance[0], train_instance[0])
        distances.append((train_instance, dist))

    # 按距离排序
    distances.sort(key=lambda x: x[1])

    # 取前k个最近邻
    neighbors = [distances[i][0] for i in range(k)]
    return neighbors


def predict_class(neighbors):
    """根据最近邻的类别进行投票预测"""
    class_votes = [neighbor[1] for neighbor in neighbors]
    # 多数投票
    vote_result = Counter(class_votes).most_common(1)
    return vote_result[0][0]


def knn_classify(train_set, test_set, k):
    """使用KNN算法对测试集进行分类"""
    predictions = []
    for test_instance in test_set:
        neighbors = get_neighbors(train_set, test_instance, k)
        predicted_class = predict_class(neighbors)
        predictions.append((test_instance, predicted_class))
    return predictions


def calculate_accuracy(predictions):
    """计算分类准确率"""
    correct = 0
    for (test_instance, predicted_class) in predictions:
        if test_instance[1] == predicted_class:
            correct += 1
    accuracy = correct / len(predictions) * 100
    return accuracy


def main():
    # 加载数据
    data = load_iris_data()

    # 拆分训练集和测试集
    train_set, test_set = split_data(data)

    # 尝试不同的k值
    for k in [3, 5, 7, 9]:
        # 进行KNN分类
        predictions = knn_classify(train_set, test_set, k)

        # 计算准确率
        accuracy = calculate_accuracy(predictions)
        print(f"当k={k}时，分类准确率为: {accuracy:.2f}%")

    # 展示部分预测结果
    print("\n部分预测结果示例：")
    sample_predictions = knn_classify(train_set, test_set, 5)[:5]
    for i, (test_instance, predicted_class) in enumerate(sample_predictions):
        actual_class = test_instance[1]
        print(f"样本{i + 1}: 特征={[round(x, 2) for x in test_instance[0]]}, "
              f"实际类别={actual_class}, 预测类别={predicted_class}, "
              f"{'正确' if actual_class == predicted_class else '错误'}")


if __name__ == "__main__":
    main()
