from sklearn.cluster import DBSCAN
from sklearn.neighbors import NearestNeighbors
from kneed import KneeLocator
import matplotlib.pyplot as plt
import numpy as np


def get_eps_k_distance(x_centers, k, cluster_idx):
    """
    使用K距离图自动计算DBSCAN的eps参数，优化了古籍数据的拐点检测
    Args:
        x_centers: x中心坐标数组
        k: 最近邻数量，通常设为min_samples-1
        cluster_idx: 簇索引，用于图表标题
    Returns:
        float: 推荐的eps值
    """
    try:
        # 确保数据是二维格式
        X = np.array(x_centers).reshape(-1, 1)
        n_samples = len(X)

        if n_samples <= k:
            # 样本数量不足时的处理，返回基于数据分布的合理值
            if n_samples <= 1:
                return 0.5  # 单个样本时的最小合理值
            return np.max(X) - np.min(X) * 0.3  # 基于数据范围的比例值

        # 计算k近邻距离
        neigh = NearestNeighbors(n_neighbors=k + 1)  # +1 因为包含自身
        nbrs = neigh.fit(X)
        distances, indices = nbrs.kneighbors(X)

        # 取每个点的第k个最近邻的距离（跳过自身）
        k_distances = np.sort(distances[:, k], axis=0)

        # 使用kneed库自动检测拐点，针对古籍特性调整参数
        kneedle = KneeLocator(
            range(len(k_distances)),
            k_distances,
            curve='convex',
            direction='increasing',
            S=1.0  # 增加灵敏度，适应古籍列间隙特征
        )

        # 备选方案：如果没有检测到拐点，使用二阶导数法
        eps = None
        if kneedle.knee is not None:
            eps = k_distances[kneedle.knee]
        else:
            # 计算二阶导数找突变点（列间隙特征）
            second_deriv = np.diff(np.diff(k_distances))
            if len(second_deriv) > 0:
                max_deriv_idx = np.argmax(second_deriv) + 1  # +1因为二阶导数少两个点
                eps = k_distances[max_deriv_idx]
            else:
                # 最终备选：使用90百分位值，比原95%更保守
                eps = np.percentile(k_distances, 90)

        # 基于古籍列宽特性的二次校验
        if len(x_centers) > 1:
            data_range = np.max(x_centers) - np.min(x_centers)
            data_std = np.std(x_centers)

            # 限制eps不超过数据范围的1/5，避免过度合并
            eps = min(eps, data_range / 5)
            # 确保eps不小于1.5倍标准差，保证列内聚集
            eps = max(eps, data_std * 1.5)

        # 可视化K距离图
        plt.figure(figsize=(10, 6))
        plt.plot(k_distances, 'b-', label=f'{k}-th NN Distance')
        if kneedle.knee is not None:
            plt.axvline(x=kneedle.knee, color='r', linestyle='--',
                        label=f'Elbow at index {kneedle.knee}')
        plt.axhline(y=eps, color='g', linestyle='--',
                    label=f'Suggested eps: {eps:.3f}')
        plt.xlabel('Points sorted by distance')
        plt.ylabel(f'{k}-th nearest neighbor distance')
        plt.title(f'K-Distance Graph (Cluster {cluster_idx + 1}) for Column Detection')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.close()  # 改为关闭，避免批量处理时弹出过多窗口

        return eps

    except Exception as e:
        print(f"计算eps时发生错误: {str(e)}")
        # 发生错误时返回基于数据特征的稳健默认值
        if len(x_centers) > 1:
            return np.std(x_centers) * 2  # 更稳健的默认值
        else:
            return 1.0


def calculate_min_samples_auto(n_points):
    """
    自动计算min_samples参数，针对古籍列的字符数量特性优化
    """
    # 古籍列通常包含多个字符，调整阈值适应不同规模
    if n_points <= 5:
        return 2  # 极少量字符时降低阈值
    elif n_points <= 15:
        return 3  # 少量字符时的适中阈值
    elif n_points <= 50:
        return 4  # 中等数量字符
    else:
        # 大量字符时，采用比例+最低值的组合策略
        return max(5, min(10, n_points // 12))  # 更保守的比例


def assign_noise_to_clusters(noise_points, clustered_points, x_centers, labels, cluster_data):
    """
    优化噪声点分配策略，结合古籍列的空间连续性
    """
    try:
        # 找出所有非噪声点的索引和坐标
        non_noise_indices = np.where(labels != -1)[0]
        if len(non_noise_indices) == 0:
            # 所有点都是噪声时，按y坐标分组（尝试垂直方向聚集）
            sorted_by_y = sorted(enumerate(x_centers), key=lambda x: cluster_data[x[0]]['y_start'])
            grouped = []
            current_group = [sorted_by_y[0]]

            for i in range(1, len(sorted_by_y)):
                # 垂直方向距离近的分到一组
                y_diff = cluster_data[sorted_by_y[i][0]]['y_start'] - cluster_data[sorted_by_y[i - 1][0]]['y_end']
                if y_diff < np.mean([item['height'] for item in cluster_data]) * 0.5:
                    current_group.append(sorted_by_y[i])
                else:
                    grouped.append(current_group)
                    current_group = [sorted_by_y[i]]
            grouped.append(current_group)

            # 重新分配标签
            updated_labels = np.zeros_like(labels)
            for group_idx, group in enumerate(grouped):
                for idx, _ in group:
                    updated_labels[idx] = group_idx
            return updated_labels

        non_noise_centers = [x_centers[i] for i in non_noise_indices]
        non_noise_labels = [labels[i] for i in non_noise_indices]

        # 计算每个簇的中心和宽度，用于更智能的分配
        cluster_centers = {}
        cluster_widths = {}
        for i, label in enumerate(non_noise_labels):
            if label not in cluster_centers:
                cluster_centers[label] = []
            cluster_centers[label].append(non_noise_centers[i])

        for label, centers in cluster_centers.items():
            cluster_centers[label] = np.mean(centers)
            cluster_widths[label] = np.max(centers) - np.min(centers)  # 簇宽度

        # 对于每个噪声点，找到最合适的簇
        updated_labels = labels.copy()
        char_avg_height = np.mean([item['height'] for item in cluster_data]) if cluster_data else 10

        for i, point in enumerate(labels):
            if point == -1:  # 如果是噪声点
                # 计算到每个簇中心的距离
                distances = {
                    label: abs(x_centers[i] - center)
                    for label, center in cluster_centers.items()
                }
                # 找到最近的簇
                nearest_label = min(distances, key=distances.get)
                nearest_dist = distances[nearest_label]

                # 检查距离是否在合理范围内（簇宽度的1.2倍内）
                if nearest_dist <= cluster_widths[nearest_label] * 1.2:
                    updated_labels[i] = nearest_label
                else:
                    # 检查垂直方向是否有同一列的字符（古籍列的垂直连续性）
                    y_pos = cluster_data[i]['y_start']
                    same_col_candidates = [
                        j for j, item in enumerate(cluster_data)
                        if labels[j] == nearest_label and
                           abs(item['y_start'] - y_pos) < char_avg_height * 2
                    ]
                    if same_col_candidates:
                        updated_labels[i] = nearest_label

        return updated_labels
    except Exception as e:
        print(f"分配噪声点时发生错误: {str(e)}")
        return labels


def validate_and_adjust_columns(columns, cluster_data):
    """
    基于古籍排版特性验证并调整列，确保列宽一致和间距均匀
    """
    if len(columns) <= 1:
        return columns

    # 计算每列的宽度和中心
    column_info = []
    for label, data in columns.items():
        x_centers = [item['x_center'] for item in data]
        col_center = np.mean(x_centers)
        col_width = np.max(x_centers) - np.min(x_centers) if len(x_centers) > 1 else 0
        column_info.append({
            'label': label,
            'data': data,
            'center': col_center,
            'width': col_width,
            'count': len(data)
        })

    # 按中心位置排序
    column_info.sort(key=lambda x: x['center'])

    # 计算平均列宽和平均间距，用于验证
    avg_col_width = np.mean([info['width'] for info in column_info if info['width'] > 0])
    gaps = []
    for i in range(1, len(column_info)):
        gaps.append(column_info[i]['center'] - column_info[i - 1]['center'])
    avg_gap = np.mean(gaps) if gaps else 0

    # 合并过近的列（间距小于平均列宽的0.5倍）
    merged_columns = [column_info[0]]
    for i in range(1, len(column_info)):
        prev = merged_columns[-1]
        current = column_info[i]
        gap = current['center'] - prev['center']

        # 如果间距过小，合并列
        if gap < avg_col_width * 0.5 and avg_col_width > 0:
            merged_data = prev['data'] + current['data']
            merged_x = [item['x_center'] for item in merged_data]
            merged_columns[-1] = {
                'label': f"{prev['label']}+{current['label']}",
                'data': merged_data,
                'center': np.mean(merged_x),
                'width': np.max(merged_x) - np.min(merged_x),
                'count': len(merged_data)
            }
        else:
            merged_columns.append(current)

    # 过滤过短的列（字符数少于平均的1/3）
    if len(merged_columns) > 1:
        avg_count = np.mean([info['count'] for info in merged_columns])
        merged_columns = [
            info for info in merged_columns
            if info['count'] >= max(2, avg_count * 0.3)
        ]

    # 重新构建columns字典
    result = {}
    for i, info in enumerate(merged_columns):
        result[i] = info['data']  # 重新编号

    return result


def dbscan_clusters_lines(sorted_data_kmeans):
    """经过K_means聚类区分上下簇后，按簇为单位进行分列"""
    sorted_data_dbscan = []

    # 遍历每个K-means聚类结果
    print(f'k-dbscan分栏:')
    for cluster_idx, cluster_data in enumerate(sorted_data_kmeans):
        try:
            print(f"处理第 {cluster_idx + 1} 个簇，共 {len(cluster_data)} 个字符")

            if not cluster_data:
                continue

            # 提取当前簇的所有x中心坐标和y坐标（用于后续验证）
            x_centers = [item['x_center'] for item in cluster_data]

            # 转换为numpy数组
            X = np.array(x_centers).reshape(-1, 1)
            min_samples = calculate_min_samples_auto(len(cluster_data))

            # 获取eps参数，传入簇索引用于图表标题
            eps = get_eps_k_distance(x_centers, min_samples - 1, cluster_idx)

            # 确保eps是一个合理的正值，增加古籍场景的边界处理
            if eps <= 0:
                print(f"  修正eps值: 从 {eps} 改为 1.0")
                eps = 1.0
            # 对于字符密集的簇，适当调小eps
            if len(cluster_data) > 50 and len(np.unique(x_centers)) > 10:
                eps = eps * 0.8

            print(f"  簇 {cluster_idx + 1} 参数: eps={eps:.2f}, min_samples={min_samples}")

            # 运行DBSCAN聚类
            dbscan = DBSCAN(eps=eps, min_samples=min_samples)
            labels = dbscan.fit_predict(X)

            # 将噪声点分配到最近的簇中，传入完整的cluster_data用于更智能的分配
            updated_labels = assign_noise_to_clusters(
                noise_points=[i for i, label in enumerate(labels) if label == -1],
                clustered_points=[i for i, label in enumerate(labels) if label != -1],
                x_centers=x_centers,
                labels=labels,
                cluster_data=cluster_data  # 新增参数，用于垂直方向验证
            )

            # 根据更新后的聚类结果分组
            columns = {}
            for i, (item, label) in enumerate(zip(cluster_data, updated_labels)):
                if label not in columns:
                    columns[label] = []
                item_with_label = item.copy()
                # 新增列标签，便于后续追踪
                item_with_label['column_label'] = f"{cluster_idx}_{label}"
                columns[label].append(item_with_label)

            # 基于古籍排版特性验证并调整列
            columns = validate_and_adjust_columns(columns, cluster_data)

            # 处理当前簇的聚类结果
            cluster_columns = []
            for label, column_data in columns.items():
                # 按y坐标排序每个栏的数据（从上到下）
                column_data.sort(key=lambda x: x['y_start'])
                cluster_columns.append(column_data)

            # 按各栏的平均x坐标排序（从左到右）
            cluster_columns.sort(key=lambda col: np.mean([item['x_center'] for item in col]))

            # 添加到最终结果中
            sorted_data_dbscan.extend(cluster_columns)

            # 输出统计信息
            print(f"  簇 {cluster_idx + 1} 分成 {len(cluster_columns)} 列")
            for i, column in enumerate(cluster_columns):
                avg_x = np.mean([item['x_center'] for item in column])
                print(f"    列 {i + 1}: {len(column)} 个字符, 平均x位置: {avg_x:.1f}")
        except Exception as e:
            print(f"处理第 {cluster_idx + 1} 个簇时发生错误: {str(e)}")
            # 发生错误时，尝试按y坐标分组作为备选方案
            try:
                # 按y坐标排序并尝试简单分组
                cluster_data_sorted = sorted(cluster_data, key=lambda x: x['y_start'])
                group_size = max(3, len(cluster_data_sorted) // 10)  # 自适应分组大小
                groups = [cluster_data_sorted[i:i + group_size] for i in range(0, len(cluster_data_sorted), group_size)]
                sorted_data_dbscan.extend(groups)
                print(f"  错误恢复: 将第 {cluster_idx + 1} 个簇分成 {len(groups)} 列")
            except:
                # 最终备选方案：将整个簇作为单列处理
                cluster_data.sort(key=lambda x: x['y_start'])
                sorted_data_dbscan.append(cluster_data)
                print(f"  错误处理: 将第 {cluster_idx + 1} 个簇作为单列处理")

    print(f"\n最终分列结果: 共 {len(sorted_data_dbscan)} 列")

    return sorted_data_dbscan
