import math
import random
from collections import Counter


class KNNClassifier:
    def __init__(self, k=5):
        """初始化KNN分类器"""
        self.k = k
        self.train_data = None

    def euclidean_distance(self, sample1, sample2):
        """计算两个样本之间的欧氏距离"""
        distance = 0.0
        # 样本格式为(特征列表, 标签)，只比较特征部分
        for i in range(len(sample1[0])):
            distance += (sample1[0][i] - sample2[0][i]) ** 2
        return math.sqrt(distance)

    def fit(self, train_data):
        """训练模型（KNN是惰性学习，此处仅存储训练数据）"""
        self.train_data = train_data
        return self

    def predict_sample(self, sample):
        """预测单个样本的类别"""
        if not self.train_data:
            raise Exception("模型尚未训练，请先调用fit方法")

        # 计算与所有训练样本的距离
        distances = []
        for train_sample in self.train_data:
            dist = self.euclidean_distance(sample, train_sample)
            distances.append((train_sample, dist))

        # 按距离排序并取前k个最近邻
        distances.sort(key=lambda x: x[1])
        neighbors = [distance[0] for distance in distances[:self.k]]

        # 多数投票决定预测类别
        neighbor_labels = [neighbor[1] for neighbor in neighbors]
        most_common = Counter(neighbor_labels).most_common(1)
        return most_common[0][0]

    def predict(self, test_data):
        """预测测试集所有样本的类别"""
        return [self.predict_sample(sample) for sample in test_data]

    def evaluate(self, test_data):
        """评估模型准确率"""
        predictions = self.predict(test_data)
        correct = 0
        for i in range(len(predictions)):
            if predictions[i] == test_data[i][1]:
                correct += 1
        return correct / len(test_data) * 100


# 数据集加载和处理函数
def load_wine_data(filename='wine.data'):
    """加载Wine数据集"""
    data = []
    try:
        # 尝试从本地文件加载
        with open(filename, 'r') as file:
            for line in file:
                line = line.strip()
                if line:
                    parts = line.split(',')
                    # 第一列是类别，后面是特征
                    label = int(parts[0])
                    features = [float(x) for x in parts[1:]]
                    data.append((features, label))
    except FileNotFoundError:
        # 如果本地文件不存在，从UCI网站下载
        print("本地文件未找到，尝试从UCI网站下载...")
        import urllib.request
        url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
        with urllib.request.urlopen(url) as response:
            content = response.read().decode('utf-8')
            lines = content.split('\n')
            for line in lines:
                line = line.strip()
                if line:
                    parts = line.split(',')
                    label = int(parts[0])
                    features = [float(x) for x in parts[1:]]
                    data.append((features, label))

    print(f"成功加载Wine数据集，共{len(data)}个样本")
    return data


def split_data(data, test_ratio=0.3, random_state=None):
    """将数据分为训练集和测试集"""
    if random_state:
        random.seed(random_state)
    # 随机打乱数据
    random.shuffle(data)
    split_index = int(len(data) * (1 - test_ratio))
    train_set = data[:split_index]
    test_set = data[split_index:]
    print(f"数据集拆分完成：训练集{len(train_set)}个样本，测试集{len(test_set)}个样本")
    return train_set, test_set


def normalize_features(dataset):
    """对特征进行归一化处理（min-max标准化）"""
    # 提取所有特征
    features = [sample[0] for sample in dataset]
    num_features = len(features[0])
    num_samples = len(features)

    # 计算每个特征的最小值和最大值
    min_vals = [min(features[i][j] for i in range(num_samples)) for j in range(num_features)]
    max_vals = [max(features[i][j] for i in range(num_samples)) for j in range(num_features)]

    # 归一化处理
    normalized_dataset = []
    for features, label in dataset:
        normalized = []
        for j in range(num_features):
            # 避免除以零
            if max_vals[j] - min_vals[j] == 0:
                normalized.append(0.0)
            else:
                normalized.append((features[j] - min_vals[j]) / (max_vals[j] - min_vals[j]))
        normalized_dataset.append((normalized, label))

    return normalized_dataset


# 主函数
def main():
    # 特征名称
    feature_names = [
        "Alcohol", "Malic_acid", "Ash", "Alcalinity_of_ash", "Magnesium",
        "Total_phenols", "Flavanoids", "Nonflavanoid_phenols", "Proanthocyanins",
        "Color_intensity", "Hue", "OD280/OD315_of_diluted_wines", "Proline"
    ]

    # 加载数据
    data = load_wine_data()

    # 特征归一化（KNN对特征尺度敏感，归一化可提高性能）
    data = normalize_features(data)

    # 拆分训练集和测试集
    train_set, test_set = split_data(data, test_ratio=0.3, random_state=42)

    # 测试不同的k值
    k_values = [3, 5, 7, 9, 11]
    print("\n不同k值下的模型性能：")
    for k in k_values:
        # 创建并训练KNN模型
        knn = KNNClassifier(k=k)
        knn.fit(train_set)

        # 评估模型
        train_accuracy = knn.evaluate(train_set)
        test_accuracy = knn.evaluate(test_set)
        print(f"k={k}: 训练集准确率={train_accuracy:.2f}%, 测试集准确率={test_accuracy:.2f}%")

    # 使用最佳k值（此处选择k=5）展示部分预测结果
    best_k = 5
    print(f"\n使用最佳k值k={best_k}的部分预测结果：")
    knn = KNNClassifier(k=best_k)
    knn.fit(train_set)
    predictions = knn.predict(test_set)

    for i in range(min(5, len(test_set))):
        features, actual = test_set[i]
        predicted = predictions[i]
        # 只显示前3个特征以便查看
        display_features = [round(f, 4) for f in features[:3]]
        print(f"样本{i + 1}: 部分特征={display_features}, 实际类别={actual}, 预测类别={predicted}, "
              f"{'正确' if actual == predicted else '错误'}")


if __name__ == "__main__":
    main()
