import math
import random
from collections import defaultdict

def euclidean_distance(point1, point2):
    """
    计算两点之间的欧几里得距离
    """
    if len(point1) != len(point2):
        raise ValueError("点的维度不一致")
    
    squared_distance = 0
    for i in range(len(point1)):
        squared_distance += (point1[i] - point2[i]) ** 2
    
    return math.sqrt(squared_distance)

def assign_cluster(x, centroids):
    """
    将数据点x分配到最近的聚类中心
    """
    min_distance = float('inf')
    closest_centroid = 0
    
    for i, centroid in enumerate(centroids):
        distance = euclidean_distance(x, centroid)
        if distance < min_distance:
            min_distance = distance
            closest_centroid = i
    
    return closest_centroid

def initialize_centroids(data, k):
    """
    随机初始化k个聚类中心
    """
    # 从数据中随机选择k个不同的点作为初始中心
    indices = random.sample(range(len(data)), k)
    centroids = [data[i] for i in indices]
    return centroids

def update_centroids(clusters, data, k, dimensions):
    """
    更新聚类中心为每个簇的均值
    """
    new_centroids = []
    
    for cluster_id in range(k):
        if cluster_id not in clusters or len(clusters[cluster_id]) == 0:
            # 如果某个簇为空，随机重新初始化
            new_centroids.append(data[random.randint(0, len(data)-1)])
            continue
        
        # 计算簇中所有点的均值
        cluster_points = [data[i] for i in clusters[cluster_id]]
        new_centroid = []
        
        for dim in range(dimensions):
            dim_sum = sum(point[dim] for point in cluster_points)
            dim_mean = dim_sum / len(cluster_points)
            new_centroid.append(dim_mean)
        
        new_centroids.append(new_centroid)
    
    return new_centroids

def calculate_centroid_change(old_centroids, new_centroids):
    """
    计算聚类中心的变化量
    """
    total_change = 0
    for old, new in zip(old_centroids, new_centroids):
        total_change += euclidean_distance(old, new)
    
    return total_change

def Kmeans(data, k, epsilon=1e-4, max_iterations=100):
    """
    手动实现的K-means聚类算法
    
    参数:
    data: 输入数据，列表形式，每个元素是一个数据点（列表或元组）
    k: 聚类数量
    epsilon: 收敛阈值，当中心点变化小于该值时停止迭代
    max_iterations: 最大迭代次数
    
    返回:
    clusters: 聚类结果，字典形式 {cluster_id: [point_indices]}
    centroids: 最终的聚类中心
    labels: 每个数据点的标签
    """
    
    if len(data) == 0:
        raise ValueError("数据不能为空")
    
    if k <= 0 or k > len(data):
        raise ValueError("聚类数量k必须在1到数据点数量之间")
    
    # 获取数据维度
    dimensions = len(data[0])
    
    # 1. 初始化聚类中心
    centroids = initialize_centroids(data, k)
    
    # 存储迭代历史
    iteration = 0
    centroid_changes = []
    
    print(f"开始K-means聚类，数据点数量: {len(data)}, 聚类数: {k}, 数据维度: {dimensions}")
    
    while iteration < max_iterations:
        # 2. 分配数据点到最近的聚类中心
        clusters = defaultdict(list)  # cluster_id -> list of point indices
        labels = []  # 每个数据点的标签
        
        for i, point in enumerate(data):
            cluster_id = assign_cluster(point, centroids)
            clusters[cluster_id].append(i)
            labels.append(cluster_id)
        
        # 3. 更新聚类中心
        new_centroids = update_centroids(clusters, data, k, dimensions)
        
        # 4. 检查收敛条件
        change = calculate_centroid_change(centroids, new_centroids)
        centroid_changes.append(change)
        
        print(f"迭代 {iteration + 1}: 中心点变化量 = {change:.6f}")
        
        if change < epsilon:
            print(f"聚类已收敛，迭代次数: {iteration + 1}")
            break
        
        centroids = new_centroids
        iteration += 1
    
    if iteration == max_iterations:
        print(f"达到最大迭代次数: {max_iterations}")
    
    # 计算每个簇的紧密度（SSE）
    sse = 0
    for cluster_id, point_indices in clusters.items():
        centroid = centroids[cluster_id]
        for point_idx in point_indices:
            point = data[point_idx]
            sse += euclidean_distance(point, centroid) ** 2
    
    print(f"最终SSE (误差平方和): {sse:.4f}")
    
    return {
        'clusters': dict(clusters),
        'centroids': centroids,
        'labels': labels,
        'sse': sse,
        'iterations': iteration + 1,
        'centroid_changes': centroid_changes
    }

# 测试函数
def test_kmeans():
    """
    测试K-means算法的简单示例
    """
    # 生成简单的测试数据
    random.seed(42)  # 设置随机种子以便重现结果
    
    # 创建3个明显分离的簇
    data = []
    
    # 簇1: 中心在(1,1)附近
    for _ in range(30):
        data.append([random.gauss(1, 0.3), random.gauss(1, 0.3)])
    
    # 簇2: 中心在(5,5)附近
    for _ in range(30):
        data.append([random.gauss(5, 0.3), random.gauss(5, 0.3)])
    
    # 簇3: 中心在(9,1)附近
    for _ in range(30):
        data.append([random.gauss(9, 0.3), random.gauss(1, 0.3)])
    
    print("测试数据生成完成，开始聚类...")
    print("-" * 50)
    
    # 运行K-means聚类
    result = Kmeans(data, k=3, epsilon=1e-4, max_iterations=100)
    
    print("-" * 50)
    print("聚类结果:")
    for cluster_id, point_indices in result['clusters'].items():
        centroid = result['centroids'][cluster_id]
        print(f"簇 {cluster_id}: {len(point_indices)} 个点, 中心点: {[round(c, 2) for c in centroid]}")
    
    return result



if __name__ == "__main__":
    # 运行测试
    result = test_kmeans()
    