# sort_for_DBSCAN
from sklearn.cluster import DBSCAN
from sklearn.neighbors import NearestNeighbors
import numpy as np
from sort_API import parse_page_data


def get_eps_k_distance(x_centers, k):
    """
    使用K距离图自动计算DBSCAN的eps参数
    Args:
        x_centers: x中心坐标数组
        k: 最近邻数量，通常设为min_samples-1
    Returns:
        float: 推荐的eps值
    """
    # 确保数据是二维格式
    X = np.array(x_centers).reshape(-1, 1)

    # 计算每个点到第k个最近邻的距离
    nbrs = NearestNeighbors(n_neighbors=k).fit(X)
    distances, indices = nbrs.kneighbors(X)

    # 提取每个点的第k个距离（最后一个最近邻）
    k_distances = distances[:, -1]

    # 按距离排序
    k_distances_sorted = np.sort(k_distances)

    # 找到拐点：距离变化最大的点
    differences = np.diff(k_distances_sorted)
    elbow_index = np.argmax(differences) + 1
    eps = k_distances_sorted[elbow_index]

    return eps


def calculate_min_samples_auto(n_points):
    """
    自动计算min_samples参数
    """
    if n_points < 10:
        return 2
    elif n_points < 30:
        return 3
    elif n_points < 50:
        return 4
    else:
        return max(4, n_points // 15)


def assign_noise_to_clusters(noise_points, clustered_points, x_centers, labels):
    """
    将噪声点分配到最近的簇中
    Args:
        noise_points: 噪声点列表
        clustered_points: 已聚类的点列表
        x_centers: 所有点的x中心坐标
        labels: DBSCAN生成的标签

    Returns:
        list: 更新后的标签
    """
    # 找出所有非噪声点的索引和坐标
    non_noise_indices = np.where(labels != -1)[0]
    non_noise_centers = [x_centers[i] for i in non_noise_indices]
    non_noise_labels = [labels[i] for i in non_noise_indices]

    # 对于每个噪声点，找到最近的非噪声点并分配相同的标签
    updated_labels = labels.copy()

    for i, point in enumerate(labels):
        if point == -1:  # 如果是噪声点
            # 计算到所有非噪声点的距离
            distances = [abs(x_centers[i] - center) for center in non_noise_centers]
            # 找到最近的非噪声点的索引
            nearest_idx = np.argmin(distances)
            # 分配相同的标签
            updated_labels[i] = non_noise_labels[nearest_idx]

    return updated_labels


def dbscan_distinguish_left_right(page_data):
    """通过密度聚类算法来区分左右分栏"""
    coords, _, _ = parse_page_data(page_data)

    # 提取所有字符的x中心坐标用来充当中心点
    x_centers = []
    for item in coords:
        x_centers.append(item['x_center'])

    # 转换为numpy数组
    X = np.array(x_centers).reshape(-1, 1)

    min_samples = calculate_min_samples_auto(len(coords))
    # TODO:如何精准动态获取eps
    # eps = get_eps_k_distance(x_centers, min_samples - 1)
    eps = 167

    # print(f"DBSCAN参数: eps={eps:.2f}, min_samples={min_samples}")

    # 运行DBSCAN聚类
    dbscan = DBSCAN(eps=eps, min_samples=min_samples)
    labels = dbscan.fit_predict(X)

    # 计算初始聚类结果
    unique_labels, counts = np.unique(labels, return_counts=True)
    # print(f"初始聚类结果: 标签{unique_labels}, 数量{counts}")

    # 将噪声点分配到最近的簇中
    updated_labels = assign_noise_to_clusters(
        noise_points=[i for i, label in enumerate(labels) if label == -1],
        clustered_points=[i for i, label in enumerate(labels) if label != -1],
        x_centers=x_centers,
        labels=labels
    )

    # 更新后的聚类结果
    unique_labels_updated, counts_updated = np.unique(updated_labels, return_counts=True)
    # print(f"噪声点分配后: 标签{unique_labels_updated}, 数量{counts_updated}")

    # 根据更新后的聚类结果分组
    columns = {}
    for i, (item, label) in enumerate(zip(coords, updated_labels)):
        if label not in columns:
            columns[label] = []
        # 添加标签信息到数据中
        item_with_label = item.copy()
        columns[label].append(item_with_label)

    # 处理聚类结果
    sorted_columns = []

    for label, column_data in columns.items():
        # 按y坐标排序每个栏的数据
        column_data.sort(key=lambda x: x['y_start'])
        sorted_columns.append(column_data)

    # 按各栏的平均x坐标排序（从左到右）
    sorted_columns.sort(key=lambda col: np.mean([item['x_center'] for item in col]))

    # 输出分栏统计信息
    # print("\n分栏结果统计:")
    for i, column in enumerate(sorted_columns):
        avg_x = np.mean([item['x_center'] for item in column])
        # print(f"栏 {i + 1}: {len(column)} 个字符, 平均x位置: {avg_x:.1f}")

    return sorted_columns
