import math
import operator
from collections import Counter

def euclidean_distance(point1, point2):
    """
    计算两点之间的欧几里得距离
    """
    if len(point1) != len(point2):
        raise ValueError("点的维度不一致")
    
    squared_distance = 0
    for i in range(len(point1)):
        squared_distance += (point1[i] - point2[i]) ** 2
    
    return math.sqrt(squared_distance)

def manhattan_distance(point1, point2):
    """
    计算两点之间的曼哈顿距离
    """
    if len(point1) != len(point2):
        raise ValueError("点的维度不一致")
    
    distance = 0
    for i in range(len(point1)):
        distance += abs(point1[i] - point2[i])
    
    return distance

def cosine_similarity(point1, point2):
    """
    计算两点之间的余弦相似度（返回的是相似度，不是距离）
    """
    if len(point1) != len(point2):
        raise ValueError("点的维度不一致")
    
    dot_product = 0
    norm1 = 0
    norm2 = 0
    
    for i in range(len(point1)):
        dot_product += point1[i] * point2[i]
        norm1 += point1[i] ** 2
        norm2 += point2[i] ** 2
    
    norm1 = math.sqrt(norm1)
    norm2 = math.sqrt(norm2)
    
    if norm1 == 0 or norm2 == 0:
        return 0
    
    return dot_product / (norm1 * norm2)

class KNN:
    """
    K近邻分类器
    """
    
    def __init__(self, k=3, distance_metric='euclidean', weights='uniform'):
        """
        初始化KNN分类器
        
        参数:
        k: 近邻数量
        distance_metric: 距离度量方法 ('euclidean', 'manhattan', 'cosine')
        weights: 权重类型 ('uniform', 'distance')
        """
        self.k = k
        self.distance_metric = distance_metric
        self.weights = weights
        self.X_train = None
        self.y_train = None
        
    def fit(self, X, y):
        """
        训练KNN分类器（实际上只是存储数据）
        
        参数:
        X: 训练特征，列表的列表
        y: 训练标签，列表
        """
        if len(X) != len(y):
            raise ValueError("特征和标签的数量必须一致")
        
        self.X_train = X
        self.y_train = y
        print(f"KNN训练完成，存储了 {len(X)} 个训练样本")
        
    def _calculate_distance(self, point1, point2):
        """
        根据指定的距离度量方法计算距离
        """
        if self.distance_metric == 'euclidean':
            return euclidean_distance(point1, point2)
        elif self.distance_metric == 'manhattan':
            return manhattan_distance(point1, point2)
        elif self.distance_metric == 'cosine':
            # 余弦相似度转换为距离：1 - 相似度
            similarity = cosine_similarity(point1, point2)
            return 1 - similarity
        else:
            raise ValueError("不支持的距離度量方法")
    
    def predict_single(self, x):
        """
        预测单个样本的类别
        
        参数:
        x: 单个样本的特征向量
        
        返回:
        预测的类别
        """
        if self.X_train is None or self.y_train is None:
            raise ValueError("模型尚未训练")
        
        # 计算与所有训练样本的距离
        distances = []
        for i, train_point in enumerate(self.X_train):
            dist = self._calculate_distance(x, train_point)
            distances.append((dist, self.y_train[i]))
        
        # 按距离排序
        distances.sort(key=operator.itemgetter(0))
        
        # 获取前k个最近邻
        k_nearest = distances[:self.k]
        
        if self.weights == 'uniform':
            # 均匀权重：简单多数投票
            k_labels = [label for _, label in k_nearest]
            most_common = Counter(k_labels).most_common(1)
            return most_common[0][0]
        
        elif self.weights == 'distance':
            # 距离加权投票
            class_weights = {}
            
            for dist, label in k_nearest:
                # 避免除零错误
                weight = 1 / (dist + 1e-8)
                
                if label in class_weights:
                    class_weights[label] += weight
                else:
                    class_weights[label] = weight
            
            # 返回权重最大的类别
            return max(class_weights.items(), key=operator.itemgetter(1))[0]
        
        else:
            raise ValueError("不支持的权重类型")
    
    def predict(self, X):
        """
        预测多个样本的类别
        
        参数:
        X: 测试特征，列表的列表
        
        返回:
        预测的类别列表
        """
        predictions = []
        for i, x in enumerate(X):
            if i % 100 == 0 and len(X) > 100:
                print(f"预测进度: {i}/{len(X)}")
            predictions.append(self.predict_single(x))
        
        return predictions
    
    def score(self, X, y):
        """
        计算模型在测试集上的准确率
        
        参数:
        X: 测试特征
        y: 真实标签
        
        返回:
        准确率
        """
        predictions = self.predict(X)
        correct = 0
        
        for pred, true in zip(predictions, y):
            if pred == true:
                correct += 1
        
        accuracy = correct / len(y)
        return accuracy

def normalize_data(X):
    """
    对数据进行最小-最大归一化
    """
    if not X:
        return X
    
    # 转置矩阵以便按特征处理
    transposed = list(zip(*X))
    normalized_transposed = []
    
    for feature in transposed:
        min_val = min(feature)
        max_val = max(feature)
        
        # 避免除零错误
        if max_val == min_val:
            normalized_feature = [0.5] * len(feature)
        else:
            normalized_feature = [(x - min_val) / (max_val - min_val) for x in feature]
        
        normalized_transposed.append(normalized_feature)
    
    # 转置回来
    return list(zip(*normalized_transposed))

def train_test_split(X, y, test_size=0.2, random_state=None):
    """
    手动实现训练测试集分割
    """
    if len(X) != len(y):
        raise ValueError("特征和标签的数量必须一致")
    
    if random_state is not None:
        import random
        random.seed(random_state)
    
    # 创建索引并打乱
    indices = list(range(len(X)))
    import random
    random.shuffle(indices)
    
    # 计算测试集大小
    test_count = int(len(X) * test_size)
    
    # 分割索引
    test_indices = indices[:test_count]
    train_indices = indices[test_count:]
    
    # 分割数据
    X_train = [X[i] for i in train_indices]
    X_test = [X[i] for i in test_indices]
    y_train = [y[i] for i in train_indices]
    y_test = [y[i] for i in test_indices]
    
    return X_train, X_test, y_train, y_test

def load_iris_dataset():
    """
    手动加载鸢尾花数据集（简化版）
    """
    # 这里我们手动创建一个小型数据集用于演示
    # 在实际应用中，您可以从文件加载真实数据
    
    # 特征：花萼长度, 花萼宽度, 花瓣长度, 花瓣宽度
    # 标签：0-山鸢尾, 1-变色鸢尾, 2-维吉尼亚鸢尾
    
    X = [
        [5.1, 3.5, 1.4, 0.2],  # 山鸢尾
        [4.9, 3.0, 1.4, 0.2],
        [4.7, 3.2, 1.3, 0.2],
        [7.0, 3.2, 4.7, 1.4],  # 变色鸢尾
        [6.4, 3.2, 4.5, 1.5],
        [6.9, 3.1, 4.9, 1.5],
        [6.3, 3.3, 6.0, 2.5],  # 维吉尼亚鸢尾
        [5.8, 2.7, 5.1, 1.9],
        [7.1, 3.0, 5.9, 2.1]
    ]
    
    y = [0, 0, 0, 1, 1, 1, 2, 2, 2]
    
    return X, y

def test_knn():
    """
    测试KNN算法的简单示例
    """
    print("=" * 50)
    print("K近邻算法测试")
    print("=" * 50)
    
    # 加载数据
    X, y = load_iris_dataset()
    print(f"数据集大小: {len(X)} 个样本")
    print(f"特征维度: {len(X[0])}")
    print(f"类别分布: {Counter(y)}")
    
    # 数据归一化
    X_normalized = normalize_data(X)
    print("数据归一化完成")
    
    # 分割训练测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X_normalized, y, test_size=0.3, random_state=42
    )
    
    print(f"训练集: {len(X_train)} 个样本")
    print(f"测试集: {len(X_test)} 个样本")
    
    # 测试不同的k值
    k_values = [1, 3, 5]
    
    for k in k_values:
        print(f"\n--- 测试 k={k} ---")
        
        # 创建并训练KNN分类器
        knn = KNN(k=k, distance_metric='euclidean', weights='uniform')
        knn.fit(X_train, y_train)
        
        # 预测
        predictions = knn.predict(X_test)
        
        # 计算准确率
        accuracy = knn.score(X_test, y_test)
        
        print(f"预测结果: {predictions}")
        print(f"真实标签: {y_test}")
        print(f"准确率: {accuracy:.4f} ({sum(1 for p, t in zip(predictions, y_test) if p == t)}/{len(y_test)})")
    
    # 测试距离加权
    print(f"\n--- 测试距离加权 (k=3) ---")
    knn_weighted = KNN(k=3, distance_metric='euclidean', weights='distance')
    knn_weighted.fit(X_train, y_train)
    accuracy_weighted = knn_weighted.score(X_test, y_test)
    print(f"距离加权准确率: {accuracy_weighted:.4f}")
    
    return knn

def demo_prediction():
    """
    演示如何使用训练好的KNN模型进行预测
    """
    print("\n" + "=" * 50)
    print("KNN预测演示")
    print("=" * 50)
    
    # 创建并训练一个简单的KNN模型
    X_train = [
        [1, 2],
        [1, 4],
        [2, 1],
        [2, 3],
        [3, 2],
        [5, 6],
        [5, 8],
        [6, 5],
        [6, 7],
        [7, 6]
    ]
    
    y_train = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]  # 两个类别
    
    # 创建KNN分类器
    knn = KNN(k=3)
    knn.fit(X_train, y_train)
    
    # 预测新样本
    test_points = [
        [1.5, 2.5],  # 应该属于类别0
        [5.5, 6.5],  # 应该属于类别1
        [3, 5]       # 边界点
    ]
    
    for i, point in enumerate(test_points):
        prediction = knn.predict_single(point)
        print(f"测试点 {point} 的预测类别: {prediction}")
        
        # 显示最近的k个邻居
        distances = []
        for j, train_point in enumerate(X_train):
            dist = euclidean_distance(point, train_point)
            distances.append((dist, y_train[j], train_point))
        
        distances.sort(key=operator.itemgetter(0))
        print(f"  最近的 {knn.k} 个邻居:")
        for dist, label, neighbor in distances[:knn.k]:
            print(f"    邻居 {neighbor}, 距离: {dist:.4f}, 类别: {label}")

if __name__ == "__main__":
    # 运行测试
    knn_model = test_knn()
    
    # 运行预测演示
    demo_prediction()