import numpy as np
from sklearn.model_selection import KFold

def distance(a, b, metric='euclidean'):
    
    if metric == 'euclidean':
        return np.sqrt(np.sum((a - b) ** 2))
    elif metric == 'manhattan':
        return np.sum(np.abs(a - b))
    else:
        raise ValueError(f"不支持的距离度量: {metric}。请使用 'euclidean' 或 'manhattan'。")

class KNN:
   
    def __init__(self, k=3, label_num=None, metric='euclidean', weights='uniform'):
        "
        self.k = k
        self.label_num = label_num
        self.metric = metric
        self.weights = weights
        self.x_train = None
        self.y_train = None
        self.best_k = None # 用于存储通过交叉验证找到的最佳k值

    def fit(self, x_train, y_train):
        
        self.x_train = x_train
        self.y_train = y_train
        if self.label_num is None:
            self.label_num = len(np.unique(y_train))
            print(f"自动检测到类别数量: {self.label_num}")

    def _get_weights(self, distances):
        """
        根据距离计算权重 (内部辅助函数)。
        """
        if self.weights == 'uniform':
            return np.ones_like(distances)
        elif self.weights == 'distance':
            # 避免除以零
            return 1.0 / (distances + 1e-10)
        else:
            raise ValueError(f"不支持的权重方式: {self.weights}。请使用 'uniform' 或 'distance'。")

    def get_knn_info(self, x):
        """
        获取单个测试样本的k个最近邻的索引和距离。
        """
        dis = np.array([distance(a, x, self.metric) for a in self.x_train])
        sorted_indices = np.argsort(dis)
        knn_indices = sorted_indices[:self.k]
        knn_distances = dis[knn_indices]
        return knn_indices, knn_distances

    def get_label(self, x):
        """
        对单个测试样本进行预测。
        """
        knn_indices, knn_distances = self.get_knn_info(x)
        
        weights = self._get_weights(knn_distances)
        
        label_statistic = np.zeros(shape=[self.label_num])
        for i, index in enumerate(knn_indices):
            label = int(self.y_train[index])
            label_statistic[label] += weights[i]
            
        return np.argmax(label_statistic)

    def predict(self, x_test):
        """
        对多个测试样本进行预测。
        """
        if self.x_train is None:
            raise ValueError("模型尚未训练，请先调用 fit() 方法。")
            
        predicted_test_labels = np.zeros(shape=[len(x_test)], dtype=int)
        for i, x in enumerate(x_test):
            predicted_test_labels[i] = self.get_label(x)
        return predicted_test_labels

    def score(self, x_test, y_test):
        """
        计算模型在给定测试集上的准确率。
        """
        y_pred = self.predict(x_test)
        return np.mean(y_pred == y_test)

    def find_best_k(self, k_range, x_val, y_val, cv=5):
        """
        通过交叉验证在给定的范围内寻找最佳的k值。
        参数:
        k_range -- k值的搜索范围, 例如 range(1, 31)
        x_val -- 用于交叉验证的特征数据
        y_val -- 用于交叉验证的标签数据
        cv -- 交叉验证的折数
        """
        kf = KFold(n_splits=cv, shuffle=True, random_state=42)
        best_k = None
        best_score = 0

        print(f"开始交叉验证寻找最佳k值 (范围: {list(k_range)})...")
        
        for k in k_range:
            self.k = k
            fold_scores = []
            for fold, (train_idx, val_idx) in enumerate(kf.split(x_val)):
                x_fold_train, x_fold_val = x_val[train_idx], x_val[val_idx]
                y_fold_train, y_fold_val = y_val[train_idx], y_val[val_idx]
                
                self.fit(x_fold_train, y_fold_train)
                fold_score = self.score(x_fold_val, y_fold_val)
                fold_scores.append(fold_score)
            
            mean_score = np.mean(fold_scores)
            print(f"k={k}, 平均准确率: {mean_score:.4f}")
            
            if mean_score > best_score:
                best_score = mean_score
                best_k = k

        self.best_k = best_k
        self.k = best_k # 将模型的k值设置为最佳k
        print(f"\n找到的最佳k值为: {best_k} (准确率: {best_score:.4f})")
        return best_k, best_score