import math
import operator
from collections import Counter

def euclidean_distance(point1, point2):

    if len(point1) != len(point2):
        raise ValueError("Points must have the same dimensions")
    
    squared_distance = 0
    for i in range(len(point1)):
        squared_distance += (point1[i] - point2[i]) ** 2
    
    return math.sqrt(squared_distance)

def manhattan_distance(point1, point2):

    if len(point1) != len(point2):
        raise ValueError("Points must have the same dimensions")
    
    distance = 0
    for i in range(len(point1)):
        distance += abs(point1[i] - point2[i])
    
    return distance

def cosine_similarity(point1, point2):

    if len(point1) != len(point2):
        raise ValueError("Points must have the same dimensions")
    
    dot_product = 0
    norm1 = 0
    norm2 = 0
    
    for i in range(len(point1)):
        dot_product += point1[i] * point2[i]
        norm1 += point1[i] ** 2
        norm2 += point2[i] ** 2
    
    norm1 = math.sqrt(norm1)
    norm2 = math.sqrt(norm2)
    
    if norm1 == 0 or norm2 == 0:
        return 0
    
    return dot_product / (norm1 * norm2)

class KNN:

    
    def __init__(self, k=3, distance_metric='euclidean', weights='uniform'):

        self.k = k
        self.distance_metric = distance_metric
        self.weights = weights
        self.X_train = None
        self.y_train = None
        
    def fit(self, X, y):
  
        if len(X) != len(y):
            raise ValueError("X and y must have the same length")
        
        self.X_train = X
        self.y_train = y
        return self
    
    def _calculate_distance(self, point1, point2):

        if self.distance_metric == 'euclidean':
            return euclidean_distance(point1, point2)
        elif self.distance_metric == 'manhattan':
            return manhattan_distance(point1, point2)
        elif self.distance_metric == 'cosine':
            # 余弦相似度转换为距离:1 - 相似度
            return 1 - cosine_similarity(point1, point2)
        else:
            raise ValueError("Unsupported distance metric")
    
    def predict_single(self, x):

        if self.X_train is None or self.y_train is None:
            raise ValueError("Model must be fitted before prediction")
        
        # 计算与所有训练样本的距离
        distances = []
        for i, train_point in enumerate(self.X_train):
            dist = self._calculate_distance(x, train_point)
            distances.append((dist, self.y_train[i]))
        
        # 按距离排序
        distances.sort(key=operator.itemgetter(0))
        
        # 获取前k个最近邻
        k_neighbors = distances[:self.k]
        
        if self.weights == 'uniform':
            # 均匀权重：简单投票
            neighbor_labels = [label for _, label in k_neighbors]
            most_common = Counter(neighbor_labels).most_common(1)
            return most_common[0][0]
        
        elif self.weights == 'distance':
            # 距离权重：距离越近权重越大
            weight_dict = {}
            
            for dist, label in k_neighbors:
                # 避免除零错误
                if dist == 0:
                    weight = float('inf')
                else:
                    weight = 1 / dist
                
                if label in weight_dict:
                    weight_dict[label] += weight
                else:
                    weight_dict[label] = weight
            
            # 返回权重最大的类别
            return max(weight_dict.items(), key=operator.itemgetter(1))[0]
        
        else:
            raise ValueError("Unsupported weight type")
    
    def predict(self, X):

        return [self.predict_single(x) for x in X]
    
    def predict_proba_single(self, x):
 
        if self.X_train is None or self.y_train is None:
            raise ValueError("Model must be fitted before prediction")
        # 计算与所有训练样本的距离
        distances = []
        for i, train_point in enumerate(self.X_train):
            dist = self._calculate_distance(x, train_point)
            distances.append((dist, self.y_train[i]))
        
        # 按距离排序
        distances.sort(key=operator.itemgetter(0))
        
        # 获取前k个最近邻
        k_neighbors = distances[:self.k]
        
        if self.weights == 'uniform':
            # 均匀权重：简单投票
            neighbor_labels = [label for _, label in k_neighbors]
            most_common = Counter(neighbor_labels).most_common(1)
            return most_common[0][0]
        
        elif self.weights == 'distance':
            # 距离权重：距离越近权重越大
            weight_dict = {}
            
            for dist, label in k_neighbors:
                # 避免除零错误
                if dist == 0:
                    weight = float('inf')
                else:
                    weight = 1 / dist
                
                if label in weight_dict:
                    weight_dict[label] += weight
                else:
                    weight_dict[label] = weight
            
            # 返回权重最大的类别
            return max(weight_dict.items(), key=operator.itemgetter(1))[0]
        
        else:
            raise ValueError("Unsupported weight type")
    
    def predict(self, X):
 
        return [self.predict_single(x) for x in X]
    
    def predict_proba_single(self, x):

        if self.X_train is None or self.y_train is None:
            raise ValueError("Model must be fitted before prediction")
        
        # 计算与所有训练样本的距离
        distances = []
        for i, train_point in enumerate(self.X_train):
            dist = self._calculate_distance(x, train_point)
            distances.append((dist, self.y_train[i]))
        
        # 按距离排序
        distances.sort(key=operator.itemgetter(0))
        
        # 获取前k个最近邻
        k_neighbors = distances[:self.k]
        
        # 统计各类别的权重
        class_weights = {}
        total_weight = 0
        
        for dist, label in k_neighbors:
            if self.weights == 'uniform':
                weight = 1
            else:  # distance weights
                weight = 1 / dist if dist != 0 else float('inf')
            
            if label in class_weights:
                class_weights[label] += weight
            else:
                class_weights[label] = weight
            
            total_weight += weight
        
        # 计算概率
        probabilities = {}
        for label, weight in class_weights.items():
            probabilities[label] = weight / total_weight
        
        return probabilities
    
    def predict_proba(self, X):

        return [self.predict_proba_single(x) for x in X]
    
    def score(self, X, y):

        predictions = self.predict(X)
        correct = 0
        for pred, true in zip(predictions, y):
            if pred == true:
                correct += 1
        return correct / len(y)

def normalize_data(X):

    if not X:
        return X
    
    # 转置数据以便按特征处理
    features = list(zip(*X))
    normalized_features = []
    
    for feature in features:
        min_val = min(feature)
        max_val = max(feature)
        range_val = max_val - min_val
        
        if range_val == 0:
            # 如果所有值相同，设为0.5
            normalized_feature = [0.5] * len(feature)
        else:
            normalized_feature = [(x - min_val) / range_val for x in feature]
        
        normalized_features.append(normalized_feature)
    
    # 转置回来
    return list(zip(*normalized_features))

def train_test_split(X, y, test_size=0.2, random_state=None):

    if len(X) != len(y):
        raise ValueError("X and y must have the same length")
    
    if random_state is not None:
        random.seed(random_state)
    
    # 创建索引并打乱
    indices = list(range(len(X)))
    random.shuffle(indices)
    
    # 计算测试集大小
    test_count = int(len(X) * test_size)
    
    # 分割索引
    test_indices = indices[:test_count]
    train_indices = indices[test_count:]
    
    # 分割数据
    X_train = [X[i] for i in train_indices]
    X_test = [X[i] for i in test_indices]
    y_train = [y[i] for i in train_indices]
    y_test = [y[i] for i in test_indices]
    
    return X_train, X_test, y_train, y_test
# 测试代码
def generate_sample_data(n_samples=100, n_features=2, n_classes=3):

    X = []
    y = []
    
    for class_idx in range(n_classes):
        # 为每个类别生成一个中心点
        center = [class_idx * 5 + random.uniform(-1, 1) for _ in range(n_features)]
        
        for _ in range(n_samples // n_classes):
            # 围绕中心点生成数据
            point = [coord + random.gauss(0, 1) for coord in center]
            X.append(point)
            y.append(class_idx)
    
    return X, y

# 演示使用
if __name__ == "__main__":
    import random
    
    print("生成测试数据...")
    X, y = generate_sample_data(n_samples=150, n_features=2, n_classes=3)
    
    print(f"数据点数量: {len(X)}")
    print(f"特征数量: {len(X[0])}")
    print(f"类别数量: {len(set(y))}")
    
    # 数据归一化
    print("\n数据归一化...")
    X_normalized = normalize_data(X)
    
    # 分割训练测试集
    print("分割训练测试集...")
    X_train, X_test, y_train, y_test = train_test_split(
        X_normalized, y, test_size=0.2, random_state=42
    )
    
    print(f"训练集大小: {len(X_train)}")
    print(f"测试集大小: {len(X_test)}")
    
    # 测试不同的k值
    k_values = [1, 3, 5, 7]
    
    for k in k_values:
        print(f"\n=== k={k} ===")
        
        # 创建并训练KNN模型
        knn = KNN(k=k, distance_metric='euclidean', weights='uniform')
        knn.fit(X_train, y_train)
        
        # 预测并计算准确率
        accuracy = knn.score(X_test, y_test)
        print(f"准确率: {accuracy:.4f}")
        
        # 显示前5个测试样本的预测结果
        print("前5个测试样本的预测:")
        for i in range(min(5, len(X_test))):
            pred = knn.predict_single(X_test[i])
            proba = knn.predict_proba_single(X_test[i])
            print(f"  真实: {y_test[i]}, 预测: {pred}, 概率: {proba}")
    
    # 测试不同距离度量
    print(f"\n=== 不同距离度量比较 (k=3) ===")
    metrics = ['euclidean', 'manhattan', 'cosine']
    
    for metric in metrics:
        knn = KNN(k=3, distance_metric=metric, weights='uniform')
        knn.fit(X_train, y_train)
        accuracy = knn.score(X_test, y_test)
        print(f"{metric}: {accuracy:.4f}")