import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 自定义KNN算法类
class MyKNN:
    def __init__(self, k=5, distance_metric='euclidean'):
        self.k = k
        self.distance_metric = distance_metric
        self.X_train = None
        self.y_train = None
    
    def fit(self, X, y):
        self.X_train = X
        self.y_train = y
        return self
    
    def euclidean_distance(self, x1, x2):
        return np.sqrt(np.sum((x1 - x2) ** 2))
    
    def manhattan_distance(self, x1, x2):
        return np.sum(np.abs(x1 - x2))
    
    def chebyshev_distance(self, x1, x2):
        return np.max(np.abs(x1 - x2))
    
    def minkowski_distance(self, x1, x2, p=3):
        return np.power(np.sum(np.power(np.abs(x1 - x2), p)), 1/p)
    
    def calculate_distance(self, x1, x2):
        if self.distance_metric == 'euclidean':
            return self.euclidean_distance(x1, x2)
        elif self.distance_metric == 'manhattan':
            return self.manhattan_distance(x1, x2)
        elif self.distance_metric == 'chebyshev':
            return self.chebyshev_distance(x1, x2)
        elif self.distance_metric == 'minkowski':
            return self.minkowski_distance(x1, x2)
        else:
            raise ValueError(f"不支持的距离度量方式: {self.distance_metric}")
    
    def predict(self, X):
        y_pred = [self._predict(x) for x in X]
        return np.array(y_pred)
    
    def _predict(self, x):
        # 计算与所有训练样本的距离
        distances = [self.calculate_distance(x, x_train) for x_train in self.X_train]
        
        # 获取最近的k个样本的索引
        k_indices = np.argsort(distances)[:self.k]
        
        # 获取这k个样本的标签
        k_nearest_labels = [self.y_train[i] for i in k_indices]
        
        # 返回出现最多的标签
        most_common = np.bincount(k_nearest_labels).argmax()
        return most_common

# 加载数据集
def load_data():
    # 读取CSV文件
    data = pd.read_csv(r'C:\Users\31351\Desktop\20221202663-exp1.赖\实验5\src\iris.csv', header=None)
    
    # 第一行是标题行，需要处理
    if data.iloc[0, 0] == 150:
        # 如果第一行是标题行，删除它
        data = data.iloc[1:].reset_index(drop=True)
    
    # 分离特征和标签
    X = data.iloc[:, :4].values.astype(float)
    y = data.iloc[:, 4].values.astype(int)
    
    return X, y

# 主函数
def main():
    # 加载数据
    X, y = load_data()
    
    # 打印数据集信息
    print("数据集形状:", X.shape)
    print("类别分布:", np.bincount(y))
    
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
    
    # 标准化数据
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    
    print("训练集形状:", X_train.shape)
    print("测试集形状:", X_test.shape)
    
    # 测试不同的K值
    k_values = range(1, 21)
    accuracies = []
    
    print("\n测试不同的K值:")
    for k in k_values:
        knn = MyKNN(k=k)
        knn.fit(X_train_scaled, y_train)
        y_pred = knn.predict(X_test_scaled)
        accuracy = accuracy_score(y_test, y_pred)
        accuracies.append(accuracy)
        print(f"K = {k}, 准确率 = {accuracy:.4f}")
    
    # 找出最优的K值
    best_k = k_values[np.argmax(accuracies)]
    best_accuracy = max(accuracies)
    print(f"\n最优K值为 {best_k}，准确率为 {best_accuracy:.4f}")
    
    # 使用最优K值测试不同的距离度量
    distance_metrics = ['euclidean', 'manhattan', 'chebyshev', 'minkowski']
    print("\n不同距离度量的准确率:")
    for metric in distance_metrics:
        knn = MyKNN(k=best_k, distance_metric=metric)
        knn.fit(X_train_scaled, y_train)
        y_pred = knn.predict(X_test_scaled)
        accuracy = accuracy_score(y_test, y_pred)
        print(f"{metric}: {accuracy:.4f}")
    
    # 与sklearn的KNN进行对比
    print("\nScikit-learn KNN的准确率:")
    sklearn_knn = KNeighborsClassifier(n_neighbors=best_k)
    sklearn_knn.fit(X_train_scaled, y_train)
    sklearn_pred = sklearn_knn.predict(X_test_scaled)
    sklearn_accuracy = accuracy_score(y_test, sklearn_pred)
    print(f"准确率: {sklearn_accuracy:.4f}")
    
    # 比较自定义KNN和sklearn KNN的准确率
    print("\n比较:")
    print(f"自定义KNN (欧式距离): {best_accuracy:.4f}")
    print(f"Sklearn KNN: {sklearn_accuracy:.4f}")
    
    # 可视化不同K值的准确率
    plt.figure(figsize=(10, 6))
    plt.plot(k_values, accuracies, marker='o', linestyle='-')
    plt.title('不同K值的准确率')
    plt.xlabel('K值')
    plt.ylabel('准确率')
    plt.grid(True)

    # 使用最优K值测试不同的距离度量
    distance_metrics = ['euclidean', 'manhattan', 'chebyshev', 'minkowski']
    print("不同距离度量的准确率:")
    metric_accuracies = []

    for metric in distance_metrics:
        knn = MyKNN(k=best_k, distance_metric=metric)
        knn.fit(X_train_scaled, y_train)
        y_pred = knn.predict(X_test_scaled)
        accuracy = accuracy_score(y_test, y_pred)
        metric_accuracies.append(accuracy)
        print(f"{metric}: {accuracy:.4f}")

    # 可视化不同距离度量的准确率
    plt.figure(figsize=(10, 6))
    plt.bar(distance_metrics, metric_accuracies)
    plt.title(f'不同距离度量的准确率 (K={best_k})')
    plt.xlabel('距离度量')
    plt.ylabel('准确率')
    plt.ylim(0.8, 1.0)  # 设置y轴范围，更好地显示差异
    plt.grid(True, axis='y')
    
    # 与sklearn的KNN进行对比
    print("Scikit-learn KNN的准确率:")
    sklearn_knn = KNeighborsClassifier(n_neighbors=best_k)
    sklearn_knn.fit(X_train_scaled, y_train)
    sklearn_pred = sklearn_knn.predict(X_test_scaled)
    sklearn_accuracy = accuracy_score(y_test, sklearn_pred)
    print(f"准确率: {sklearn_accuracy:.4f}")

    # 比较自定义KNN和sklearn KNN的准确率
    print("\n比较:")
    print(f"自定义KNN (欧式距离): {best_accuracy:.4f}")
    print(f"Sklearn KNN: {sklearn_accuracy:.4f}")

    plt.show()

if __name__ == "__main__":
    main()