import numpy as np

# 加载数据集
def load_dataset(filename):
    """加载数据集并返回特征和标签"""
    data = np.loadtxt(filename, delimiter=',', dtype=str)
    features = data[:, 0:4].astype(float)  # 将特征转换为浮点数
    labels = data[:, 4]  # 保留字符串标签
    return features, labels

# 计算两个样本之间的欧氏距离
def euclidean_distance(sample1, sample2):
    """计算两个样本之间的欧氏距离"""
    return np.sqrt(np.sum((sample1 - sample2) ** 2))

# KNN算法
def knn(features, labels, query, k=3):
    """KNN算法实现"""
    distances = []
    for i in range(len(features)):
        dist = euclidean_distance(features[i], query)
        distances.append((dist, labels[i]))
    distances.sort(key=lambda x: x[0])  # 根据距离排序
    top_k = distances[:k]
    top_k_labels = [label for _, label in top_k]
    prediction = max(set(top_k_labels), key=top_k_labels.count)
    return prediction

# 手动实现特征缩放（标准化）
def standard_scaler(features):
    """特征标准化"""
    mean = np.mean(features, axis=0)
    std = np.std(features, axis=0)
    return (features - mean) / std

# 手动实现数据分割
def train_test_split(features, labels, test_size=0.2, random_state=42):
    """数据分割"""
    np.random.seed(random_state)
    indices = np.random.permutation(len(features))
    test_set_size = int(len(features) * test_size)
    test_indices = indices[:test_set_size]
    train_indices = indices[test_set_size:]
    train_features, train_labels = features[train_indices], labels[train_indices]
    test_features, test_labels = features[test_indices], labels[test_indices]
    return train_features, test_features, train_labels, test_labels

# 计算准确率
def calculate_accuracy(predictions, true_labels):
    """计算准确率"""
    correct_count = sum(predictions[i] == true_labels[i] for i in range(len(predictions)))
    return correct_count / len(predictions)

# 主函数
def main():
    filename = 'iris.data'  # 请确保文件路径正确
    features, labels = load_dataset(filename)
    
    # 特征缩放
    features = standard_scaler(features)
    
    # 数据分割
    train_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size=0.2, random_state=42)
    
    # 训练KNN模型并进行预测
    predictions = []
    for query in test_features:
        prediction = knn(train_features, train_labels, query, k=3)
        predictions.append(prediction)
    
    # 计算准确率
    accuracy = calculate_accuracy(predictions, test_labels)
    print("Test samples:")
    print(test_features)
    print("Predictions:")
    print(predictions)
    print("Accuracy:", accuracy)

if __name__ == "__main__":
    main()