import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import seaborn as sns

class KNN:
    def __init__(self, k=3, distance_metric='euclidean', weights='uniform'):
        """
        k-最近邻算法实现
        
        参数:
        k: 邻居数量
        distance_metric: 距离度量方法 ('euclidean', 'manhattan', 'minkowski')
        weights: 权重类型 ('uniform', 'distance')
        """
        self.k = k
        self.distance_metric = distance_metric
        self.weights = weights
        self.X_train = None
        self.y_train = None
        
    def _calculate_distance(self, x1, x2):
        """计算两个样本点之间的距离"""
        if self.distance_metric == 'euclidean':
            return np.sqrt(np.sum((x1 - x2) ** 2))
        elif self.distance_metric == 'manhattan':
            return np.sum(np.abs(x1 - x2))
        elif self.distance_metric == 'minkowski':
            # 这里使用p=3作为示例
            return np.sum(np.abs(x1 - x2) ** 3) ** (1/3)
        else:
            raise ValueError("不支持的距離度量方法")
    
    def _get_neighbors(self, x):
        """获取测试样本x的k个最近邻居"""
        distances = []
        
        # 计算与所有训练样本的距离
        for i, train_sample in enumerate(self.X_train):
            dist = self._calculate_distance(x, train_sample)
            distances.append((i, dist))
        
        # 按距离排序并选择前k个
        distances.sort(key=lambda x: x[1])
        neighbors = distances[:self.k]
        
        return neighbors
    
    def _predict_single(self, x):
        """预测单个样本的类别"""
        # 获取k个最近邻居
        neighbors = self._get_neighbors(x)
        
        if self.weights == 'uniform':
            # 均匀权重 - 简单多数投票
            neighbor_labels = [self.y_train[idx] for idx, _ in neighbors]
            most_common = Counter(neighbor_labels).most_common(1)
            return most_common[0][0]
        
        elif self.weights == 'distance':
            # 距离加权投票
            votes = {}
            
            for idx, dist in neighbors:
                label = self.y_train[idx]
                
                # 避免除零错误
                if dist == 0:
                    weight = float('inf')
                else:
                    weight = 1 / dist
                
                if label in votes:
                    votes[label] += weight
                else:
                    votes[label] = weight
            
            # 返回权重和最大的类别
            return max(votes.items(), key=lambda x: x[1])[0]
    
    def fit(self, X, y):
        """
        训练kNN模型（实际上只是存储数据）
        
        参数:
        X: 训练特征，形状 (n_samples, n_features)
        y: 训练标签，形状 (n_samples,)
        """
        self.X_train = np.array(X)
        self.y_train = np.array(y)
        return self
    
    def predict(self, X):
        """
        预测样本类别
        
        参数:
        X: 测试特征，形状 (n_samples, n_features)
        
        返回:
        predictions: 预测标签，形状 (n_samples,)
        """
        X = np.array(X)
        predictions = []
        
        for x in X:
            pred = self._predict_single(x)
            predictions.append(pred)
        
        return np.array(predictions)
    
    def predict_proba(self, X):
        """
        预测样本属于每个类别的概率
        
        参数:
        X: 测试特征，形状 (n_samples, n_features)
        
        返回:
        probabilities: 概率矩阵，形状 (n_samples, n_classes)
        """
        X = np.array(X)
        n_samples = X.shape[0]
        classes = np.unique(self.y_train)
        n_classes = len(classes)
        probabilities = np.zeros((n_samples, n_classes))
        
        for i, x in enumerate(X):
            neighbors = self._get_neighbors(x)
            neighbor_labels = [self.y_train[idx] for idx, _ in neighbors]
            
            if self.weights == 'uniform':
                # 计算每个类别的比例
                for j, cls in enumerate(classes):
                    probabilities[i, j] = neighbor_labels.count(cls) / self.k
            
            elif self.weights == 'distance':
                # 基于距离加权计算概率
                total_weight = 0
                class_weights = {cls: 0 for cls in classes}
                
                for idx, dist in neighbors:
                    label = self.y_train[idx]
                    weight = 1 / dist if dist != 0 else float('inf')
                    class_weights[label] += weight
                    total_weight += weight
                
                for j, cls in enumerate(classes):
                    probabilities[i, j] = class_weights[cls] / total_weight
        
        return probabilities

# 测试和可视化函数
def test_knn():
    """测试kNN算法并进行可视化"""
    # 生成分类数据
    X, y = make_classification(n_samples=300, n_features=2, n_redundant=0,
                              n_informative=2, n_clusters_per_class=1,
                              n_classes=3, random_state=42)
    
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, 
                                                        random_state=42)
    
    # 创建并训练kNN模型
    knn = KNN(k=5, weights='distance')
    knn.fit(X_train, y_train)
    
    # 预测
    y_pred = knn.predict(X_test)
    y_proba = knn.predict_proba(X_test)
    
    # 计算准确率
    accuracy = accuracy_score(y_test, y_pred)
    print(f"测试集准确率: {accuracy:.4f}")
    print("\n分类报告:")
    print(classification_report(y_test, y_pred))
    
    # 可视化结果
    plt.figure(figsize=(18, 5))
    
    # 1. 训练数据分布
    plt.subplot(1, 3, 1)
    scatter = plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap='viridis', 
                         s=50, alpha=0.7, edgecolors='k')
    plt.colorbar(scatter)
    plt.title('训练数据分布')
    plt.xlabel('特征 1')
    plt.ylabel('特征 2')
    
    # 2. 测试集预测结果
    plt.subplot(1, 3, 2)
    scatter = plt.scatter(X_test[:, 0], X_test[:, 1], c=y_pred, cmap='viridis', 
                         s=50, alpha=0.7, edgecolors='k')
    plt.colorbar(scatter)
    plt.title(f'测试集预测结果 (准确率: {accuracy:.3f})')
    plt.xlabel('特征 1')
    plt.ylabel('特征 2')
    
    # 3. 混淆矩阵
    plt.subplot(1, 3, 3)
    cm = confusion_matrix(y_test, y_pred)
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
    plt.title('混淆矩阵')
    plt.xlabel('预测标签')
    plt.ylabel('真实标签')
    
    plt.tight_layout()
    plt.show()
    
    return knn, X_test, y_test, y_pred

def find_best_k(X_train, X_test, y_train, y_test, k_range=range(1, 16)):
    """寻找最佳的k值"""
    accuracies = []
    
    for k in k_range:
        knn = KNN(k=k)
        knn.fit(X_train, y_train)
        y_pred = knn.predict(X_test)
        accuracy = accuracy_score(y_test, y_pred)
        accuracies.append(accuracy)
    
    # 绘制准确率随k值变化的曲线
    plt.figure(figsize=(10, 6))
    plt.plot(k_range, accuracies, 'bo-', linewidth=2, markersize=8)
    plt.xlabel('k值')
    plt.ylabel('准确率')
    plt.title('k值选择对准确率的影响')
    plt.grid(True, alpha=0.3)
    
    best_k = k_range[np.argmax(accuracies)]
    best_accuracy = max(accuracies)
    
    plt.axvline(x=best_k, color='red', linestyle='--', 
                label=f'最佳k值: {best_k}, 准确率: {best_accuracy:.3f}')
    plt.legend()
    plt.show()
    
    print(f"最佳k值: {best_k}, 对应准确率: {best_accuracy:.4f}")
    
    return best_k, best_accuracy

def demo_different_metrics():
    """比较不同距离度量的效果"""
    # 生成数据
    X, y = make_classification(n_samples=200, n_features=2, n_redundant=0,
                              n_informative=2, n_classes=2, random_state=42)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, 
                                                        random_state=42)
    
    metrics = ['euclidean', 'manhattan']
    results = {}
    
    plt.figure(figsize=(15, 5))
    
    for i, metric in enumerate(metrics):
        knn = KNN(k=5, distance_metric=metric)
        knn.fit(X_train, y_train)
        y_pred = knn.predict(X_test)
        accuracy = accuracy_score(y_test, y_pred)
        results[metric] = accuracy
        
        # 绘制决策边界
        plt.subplot(1, 3, i+1)
        
        # 创建网格点
        x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
                            np.arange(y_min, y_max, 0.1))
        
        # 预测网格点
        Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
        Z = Z.reshape(xx.shape)
        
        # 绘制决策边界
        plt.contourf(xx, yy, Z, alpha=0.4, cmap='viridis')
        scatter = plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, 
                             cmap='viridis', s=50, edgecolors='k')
        plt.title(f'{metric}距离 (准确率: {accuracy:.3f})')
        plt.xlabel('特征 1')
        plt.ylabel('特征 2')
    
    # 比较结果
    plt.subplot(1, 3, 3)
    metrics_names = list(results.keys())
    acc_values = list(results.values())
    bars = plt.bar(metrics_names, acc_values, color=['skyblue', 'lightcoral'])
    plt.title('不同距离度量的准确率比较')
    plt.ylabel('准确率')
    
    # 在柱状图上显示数值
    for bar, acc in zip(bars, acc_values):
        plt.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01,
                f'{acc:.3f}', ha='center', va='bottom')
    
    plt.tight_layout()
    plt.show()

if __name__ == "__main__":
    print("=== kNN算法实现与测试 ===\n")
    
    # 基本测试
    print("1. 基本kNN测试:")
    knn_model, X_test, y_test, y_pred = test_knn()
    
    # 寻找最佳k值
    print("\n2. 寻找最佳k值:")
    X, y = make_classification(n_samples=300, n_features=2, n_classes=3, 
                              random_state=42)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, 
                                                        random_state=42)
    find_best_k(X_train, X_test, y_train, y_test)
    
    # 比较不同距离度量
    print("\n3. 不同距离度量比较:")
    demo_different_metrics()
    
    # 演示概率预测
    print("\n4. 概率预测示例:")
    sample_proba = knn_model.predict_proba(X_test[:3])
    print(f"前3个测试样本的类别概率:\n{sample_proba}")