import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from collections import Counter

# ==================== 数据加载与预处理 ====================
# 加载 Wine Quality 数据集（红酒数据）
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
df = pd.read_csv(url, sep=';')

# 将质量评分二值化（>=6为1，<6为0）
df['label'] = (df['quality'] >= 6).astype(int)  # 1表示高质量，0表示低质量

# 提取所有特征（排除 'quality' 和 'label' 列）
features = df.columns.drop(['quality', 'label'])
data = np.array(df[features].join(df['label']))  # 合并特征和标签

# ==================== 以下代码无需修改（仅适配变量名） ====================
X, y = data[:, :-1], data[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

class KNN:
    def __init__(self, X_train, y_train, n_neighbors=3, p=2):
        self.n = n_neighbors
        self.p = p
        self.X_train = X_train
        self.y_train = y_train

    def predict(self, X):
        knn_list = []
        for i in range(self.n):
            dist = np.linalg.norm(X - self.X_train[i], ord=self.p)
            knn_list.append((dist, self.y_train[i]))

        for i in range(self.n, len(self.X_train)):
            max_index = knn_list.index(max(knn_list, key=lambda x: x[0]))
            dist = np.linalg.norm(X - self.X_train[i], ord=self.p)
            if knn_list[max_index][0] > dist:
                knn_list[max_index] = (dist, self.y_train[i])

        knn = [k[-1] for k in knn_list]
        count_pairs = Counter(knn)
        max_count = sorted(count_pairs.items(), key=lambda x: x[1])[-1][0]
        return max_count

    def score(self, X_test, y_test):
        right_count = 0
        for X, y in zip(X_test, y_test):
            label = self.predict(X)
            if label == y:
                right_count += 1
        return right_count / len(X_test)

# 测试KNN
clf = KNN(X_train, y_train)
test_score = clf.score(X_test, y_test)
print('Test Score:', test_score)


# 寻找最佳k值
def find_best_k(X_train, y_train, X_test, y_test, max_k=10):
    best_k = 1
    best_score = 0
    for k in range(1, max_k + 1):
        model = KNN(X_train, y_train, n_neighbors=k)
        score = model.score(X_test, y_test)
        print(f"k={k}, accuracy={score:.2f}")
        if score > best_score:
            best_score = score
            best_k = k
    return best_k

print("\n寻找最佳k值:")
best_k = find_best_k(X_train, y_train, X_test, y_test)
print(f"最佳k值为: {best_k}")

# 加权KNN测试
class WeightedKNN(KNN):

    def predict(self, X):
        knn_list = []
        # 初始化K个最近邻
        for i in range(self.n):
            dist = np.linalg.norm(X - self.X_train[i], ord=self.p)
            knn_list.append((dist, self.y_train[i]))

        # 加权投票（距离倒数作为权重）
        weight_sum = {}
        for dist, label in knn_list:
            if dist == 0:  # 避免除零错误
                return label
            weight = 1.0 / dist  # 距离越近权重越大
            if label in weight_sum:
                weight_sum[label] += weight
            else:
                weight_sum[label] = weight

        # 返回权重和最大的标签
        return max(weight_sum.items(), key=lambda x: x[1])[0]


# 测试加权KNN（使用之前找到的最佳k值）
print("\n测试加权KNN:")
weighted_clf = WeightedKNN(X_train, y_train, n_neighbors=best_k)
weighted_test_score = weighted_clf.score(X_test, y_test)
print(f"加权KNN (k={best_k}) 测试准确率: {weighted_test_score:.4f}")


# 寻找加权KNN的最佳k值
def find_best_weighted_k(X_train, y_train, X_test, y_test, max_k=10):
    best_k = 1
    best_score = 0
    for k in range(1, max_k + 1):
        model = WeightedKNN(X_train, y_train, n_neighbors=k)
        score = model.score(X_test, y_test)
        print(f"加权KNN k={k}, 准确率={score:.4f}")
        if score > best_score:
            best_score = score
            best_k = k
    return best_k, best_score


# 寻找并输出加权KNN的最佳参数
print("\n寻找加权KNN最佳k值:")
best_weighted_k, best_weighted_score = find_best_weighted_k(X_train, y_train, X_test, y_test)
print(f"加权KNN最佳k值: {best_weighted_k}, 最佳准确率: {best_weighted_score:.4f}")