import math
import random
from collections import Counter


# 加载Iris数据集
def load_iris_data():
    dataset = []
    try:
        with open('iris.data', 'r') as file:
            lines = file.readlines()
            for line in lines:
                line = line.strip()
                if line:
                    parts = line.split(',')
                    features = [float(feature) for feature in parts[:-1]]
                    label = parts[-1]
                    dataset.append((features, label))
    except FileNotFoundError:
        print("找不到 iris.data 文件，请检查文件路径是否正确。")
    return dataset


# 计算欧氏距离
def euclidean_distance(p1, p2):
    squares = [(a - b) ** 2 for a, b in zip(p1, p2)]
    return math.sqrt(sum(squares))


# KNN分类算法
def knn_classify(train_data, test_point, k):
    distances_and_labels = [
        (euclidean_distance(feature, test_point), label)
        for feature, label in train_data
    ]
    distances_and_labels.sort()
    k_nearest = distances_and_labels[:k]
    k_labels = [label for _, label in k_nearest]
    label_counts = Counter(k_labels)
    return label_counts.most_common(1)[0][0]


# 划分训练集和测试集
def train_test_split(data, test_size=0.2):
    random.shuffle(data)
    train_size = int(len(data) * (1 - test_size))
    return data[:train_size], data[train_size:]


if __name__ == "__main__":
    # 加载数据
    iris_data = load_iris_data()
    train_data, test_data = train_test_split(iris_data)

    # 设置最近邻数量
    k = 3

    correct_count = 0
    total_count = len(test_data)

    for feature, label in test_data:
        prediction = knn_classify(train_data, feature, k)
        if prediction == label:
            correct_count += 1

    accuracy = correct_count / total_count
    print(f"KNN分类准确率: {accuracy * 100:.2f}%")