import random
import copy
import math
import csv

# 加载数据
def load_data(file_path):
    data = []
    labels = []
    with open(file_path, 'r') as csvfile:
        reader = csv.reader(csvfile)
        for row in reader:
            if len(row) > 0:
                features = [float(x) for x in row[:-1]]
                label = row[-1]
                data.append(features)
                labels.append(label)
    return data, labels


# 最小 - 最大归一化函数
def normalize_data(data):
    min_vals = [min([row[i] for row in data]) for i in range(len(data[0]))]
    max_vals = [max([row[i] for row in data]) for i in range(len(data[0]))]
    normalized_data = []
    for row in data:
        normalized_row = [(row[i] - min_vals[i]) / (max_vals[i] - min_vals[i]) for i in range(len(row))]
        normalized_data.append(normalized_row)
    return normalized_data


# 计算欧氏距离
def euclidean_distance(instance1, instance2):
    distance = 0
    for i in range(len(instance1)):
        diff = instance1[i] - instance2[i]
        distance += diff ** 2
    return math.sqrt(distance)


# 计算曼哈顿距离（可切换的距离度量方式）
def manhattan_distance(instance1, instance2):
    distance = 0
    for i in range(len(instance1)):
        diff = abs(instance1[i] - instance2[i])
        distance += diff
    return distance


# 找到K个最近邻
def get_neighbors(train_data, train_labels, test_instance, k, distance_func=euclidean_distance):
    distances = []
    for i in range(len(train_data)):
        dist = distance_func(train_data[i], test_instance)
        distances.append((train_data[i], train_labels[i], dist))
    distances.sort(key=lambda x: x[2])
    neighbors = []
    for i in range(k):
        neighbors.append((distances[i][0], distances[i][1]))
    return neighbors


# 通过多数表决预测类别
def predict_class(neighbors):
    class_votes = {}
    for neighbor in neighbors:
        label = neighbor[1]
        if label in class_votes:
            class_votes[label] += 1
        else:
            class_votes[label] = 1
    sorted_votes = sorted(class_votes.items(), key=lambda x: x[1], reverse=True)
    return sorted_votes[0][0]


# 随机划分训练集和测试集的函数
def split_data_random(data, labels, test_size=0.2):
    combined_data = list(zip(data, labels))
    random.shuffle(combined_data)
    split_index = int(len(combined_data) * (1 - test_size))
    train_data, train_labels = zip(*combined_data[:split_index])
    test_data, test_labels = zip(*combined_data[split_index:])
    return list(train_data), list(train_labels), list(test_data), list(test_labels)


# 手动实现简单的K折交叉验证来寻找合适的K值
def cross_validation(data, labels, k_values, k_folds=5, distance_func=euclidean_distance):
    best_k = None
    best_accuracy = 0
    for k in k_values:
        total_accuracy = 0
        fold_size = len(data) // k_folds
        for fold in range(k_folds):
            start_index = fold * fold_size
            end_index = (fold + 1) * fold_size if fold < k_folds - 1 else len(data)
            # 划分当前折的测试集和训练集
            test_data = data[start_index:end_index]
            test_labels = labels[start_index:end_index]
            train_data = data[:start_index] + data[end_index:]
            train_labels = labels[:start_index] + labels[end_index:]

            correct_predictions = 0
            for i in range(len(test_data)):
                neighbors = get_neighbors(train_data, train_labels, test_data[i], k, distance_func)
                predicted_class = predict_class(neighbors)
                if predicted_class == test_labels[i]:
                    correct_predictions += 1

            accuracy = correct_predictions / len(test_data)
            total_accuracy += accuracy

        average_accuracy = total_accuracy / k_folds
        if average_accuracy > best_accuracy:
            best_accuracy = average_accuracy
            best_k = k
    return best_k


if __name__ == "__main__":
    data_path = "iris/iris.data"
    data, labels = load_data(data_path)
    normalized_data = normalize_data(data)

    # 设定K值的搜索范围
    k_values = range(1, 11)
    best_k = cross_validation(normalized_data, labels, k_values)

    # 使用找到的最佳K值进行最终的训练和测试
    train_data, train_labels, test_data, test_labels = split_data_random(normalized_data, labels)
    correct_predictions = 0
    for i in range(len(test_data)):
        neighbors = get_neighbors(train_data, train_labels, test_data[i], best_k)
        predicted_class = predict_class(neighbors)
        if predicted_class == test_labels[i]:
            correct_predictions += 1

    accuracy = correct_predictions / len(test_data)
    print(f"最佳的k值为: {best_k}，对应的准确率为: {accuracy}")