'''使用机器学习模型计算相似度矩阵，根据部门分组，将报表名称和数据项拼接后聚类'''

import pandas as pd
import numpy as np
from sentence_transformers import SentenceTransformer, util
from sklearn.cluster import KMeans, DBSCAN
from sklearn.metrics import silhouette_score
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics.pairwise import cosine_distances
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import warnings

warnings.filterwarnings('ignore')

# 加载模型
model = SentenceTransformer('C:/Users/xingwenzheng/models/sentence-transformer')    # paraphrase-multilingual-MiniLM-L12-v2


def load_and_preprocess_data(file_path, sheet_name):
    df = pd.read_excel(file_path, sheet_name=sheet_name)

    # 1. 先填缺失值
    df['报表名称'] = df['报表名称'].fillna('')
    df['数据项'] = df['数据项'].fillna('')

    # 2. 逐行检查类型（只会在报错前停住）
    for idx, row in df.iterrows():
        name, item = row['报表名称'], row['数据项']
        if not isinstance(name, str):
            print(f'第 {idx} 行 “报表名称” 类型不是 str：{name!r} {type(name)}')
        if not isinstance(item, str):
            print(f'第 {idx} 行 “数据项” 类型不是 str：{item!r} {type(item)}')

    # 3. 再拼接
    df['综合文本'] = df['报表名称'].astype(str) + '。' + df['数据项'].astype(str)
    return df


def calculate_similarity_and_sort(df):
    """
    按部门分组，计算相似度并排序
    """
    results = []

    for department, group in df.groupby('部门名称'):
        print(f"处理部门: {department}")

        # 获取该部门的文本列表
        texts = group['综合文本'].tolist()

        if len(texts) <= 1:
            # 如果只有一个报表，直接添加到结果
            results.append(group.assign(相似度得分=1.0))
            continue

        # 生成嵌入向量
        embeddings = model.encode(texts, convert_to_tensor=True)

        # 计算相似度矩阵
        cosine_scores = util.cos_sim(embeddings, embeddings)

        # 找到最优排序（使用旅行商问题近似解法）
        sorted_indices = optimize_ordering(cosine_scores.numpy())

        # 计算每个条目的相似度得分（与相邻条目的平均相似度）
        similarity_scores = calculate_similarity_scores(cosine_scores.numpy(), sorted_indices)

        # 按排序顺序重新组织数据
        sorted_group = group.iloc[sorted_indices].copy()
        sorted_group['相似度得分'] = similarity_scores

        results.append(sorted_group)

    return pd.concat(results, ignore_index=True)

# 新的相似度计算函数
# def calculate_similarity_and_sort(df):
#     """
#     按部门分组，分别计算报表名称和数据项的相似度并排序（基于报表名称排序）
#     返回的DataFrame包含两列：'报表相似度得分' 和 '数据项相似度得分'
#     """
#     results = []
#
#     for department, group in df.groupby('部门名称'):
#         print(f"处理部门: {department}")
#
#         # 获取该部门的报表名称和数据项列表
#         report_texts = group['报表名称'].tolist()
#         item_texts = group['数据项'].tolist()
#
#         # --- 计算报表名称相似度 ---
#         if len(report_texts) > 1:
#             report_embeddings = model.encode(report_texts, convert_to_tensor=True)
#             report_cosine_scores = util.cos_sim(report_embeddings, report_embeddings)
#             report_sorted_indices = optimize_ordering(report_cosine_scores.numpy())
#             report_similarity_scores = calculate_similarity_scores(report_cosine_scores.numpy(), report_sorted_indices)
#         else:
#             report_sorted_indices = list(range(len(group)))
#             report_similarity_scores = [1.0] * len(group)
#
#         # --- 计算数据项相似度 ---
#         if len(item_texts) > 1:
#             item_embeddings = model.encode(item_texts, convert_to_tensor=True)
#             item_cosine_scores = util.cos_sim(item_embeddings, item_embeddings)
#             # 注意：这里我们计算数据项的相似度得分，但不改变排序（排序依然基于报表）
#             item_similarity_scores = calculate_similarity_scores(item_cosine_scores.numpy(), list(range(len(item_texts))))
#         else:
#             item_similarity_scores = [1.0] * len(group)
#
#         # 按报表名称的排序顺序重新组织数据
#         sorted_group = group.iloc[report_sorted_indices].copy()
#         sorted_group['报表相似度得分'] = report_similarity_scores
#         sorted_group['数据项相似度得分'] = item_similarity_scores  # 数据项得分按原始索引对应
#
#         results.append(sorted_group)
#
#     return pd.concat(results, ignore_index=True)


def optimize_ordering(similarity_matrix):
    """
    使用最近邻算法优化排序，使相似报表尽可能靠近
    """
    n = len(similarity_matrix)
    visited = [False] * n
    ordering = []

    # 从最相似的配对开始
    current = np.unravel_index(np.argmax(similarity_matrix), similarity_matrix.shape)[0]
    visited[current] = True
    ordering.append(current)

    # 逐步添加最相似的未访问项目
    while len(ordering) < n:
        best_similarity = -1
        best_index = -1

        for i in range(n):
            if not visited[i]:
                similarity = similarity_matrix[current, i]
                if similarity > best_similarity:
                    best_similarity = similarity
                    best_index = i

        if best_index != -1:
            visited[best_index] = True
            ordering.append(best_index)
            current = best_index

    return ordering


def calculate_similarity_scores(similarity_matrix, ordering):
    """
    计算每个条目的相似度得分
    """
    n = len(ordering)
    scores = []

    for i, idx in enumerate(ordering):
        if n == 1:
            scores.append(1.0)
        elif i == 0:
            # 第一个条目，只与下一个比较
            scores.append(similarity_matrix[idx, ordering[i + 1]])
        elif i == n - 1:
            # 最后一个条目，只与前一个比较
            scores.append(similarity_matrix[idx, ordering[i - 1]])
        else:
            # 中间条目，与前一个和后一个的平均相似度
            prev_similarity = similarity_matrix[idx, ordering[i - 1]]
            next_similarity = similarity_matrix[idx, ordering[i + 1]]
            scores.append((prev_similarity + next_similarity) / 2)

    return scores


# def perform_clustering_analysis(df):
#     """
#     执行聚类分析并识别中心簇
#     """
#     clustering_results = []
#
#     for department, group in df.groupby('部门名称'):
#         print(f"对部门 {department} 进行聚类分析...")
#
#         texts = group['综合文本'].tolist()
#
#         if len(texts) <= 1:
#             # 单个项目无法聚类
#             cluster_result = group.copy()
#             cluster_result['聚类标签'] = 0
#             cluster_result['距中心距离'] = 0.0
#             clustering_results.append(cluster_result)
#             continue
#
#         # 生成嵌入向量
#         embeddings = model.encode(texts)
#
#         # 确定最佳聚类数量
#         best_k, best_score = find_optimal_clusters(embeddings, max_k=min(10, len(texts)))
#
#         if best_k == 1:
#             # 如果最佳聚类数为1，直接分配标签
#             kmeans = KMeans(n_clusters=1, random_state=42, n_init=10)
#             labels = kmeans.fit_predict(embeddings)
#             distances = np.linalg.norm(embeddings - kmeans.cluster_centers_[labels], axis=1)
#         else:
#             # 使用最佳K值进行K-means聚类
#             kmeans = KMeans(n_clusters=best_k, random_state=42, n_init=10)
#             labels = kmeans.fit_predict(embeddings)
#             distances = np.linalg.norm(embeddings - kmeans.cluster_centers_[labels], axis=1)
#
#         # 为每个聚类找到中心点（最接近质心的样本）
#         cluster_centers = {}
#         for cluster_id in range(best_k):
#             cluster_indices = np.where(labels == cluster_id)[0]
#             if len(cluster_indices) > 0:
#                 cluster_embeddings = embeddings[cluster_indices]
#                 center_embedding = kmeans.cluster_centers_[cluster_id]
#
#                 # 找到最接近质心的样本
#                 center_distances = np.linalg.norm(cluster_embeddings - center_embedding, axis=1)
#                 center_index = cluster_indices[np.argmin(center_distances)]
#
#                 cluster_centers[cluster_id] = {
#                     '中心报表': group.iloc[center_index]['报表名称'],
#                     '中心数据项': group.iloc[center_index]['数据项'],
#                     '聚类大小': len(cluster_indices)
#                 }
#
#         # 添加到结果
#         cluster_result = group.copy()
#         cluster_result['聚类标签'] = labels
#         cluster_result['距中心距离'] = distances
#         clustering_results.append((cluster_result, cluster_centers))
#
#     return clustering_results
#
#
# def find_optimal_clusters(embeddings, max_k=10):
#     """
#     使用轮廓系数找到最佳聚类数量
#     """
#     if len(embeddings) <= 2:
#         return 1, 0
#
#     best_score = -1
#     best_k = 1
#
#     for k in range(2, min(max_k + 1, len(embeddings))):
#         kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
#         labels = kmeans.fit_predict(embeddings)
#         score = silhouette_score(embeddings, labels)
#
#         if score > best_score:
#             best_score = score
#             best_k = k
#
#     return best_k, best_score


def perform_clustering_analysis(df):
    """
    执行聚类分析并识别中心簇 - 改进版本
    """
    clustering_results = []

    for department, group in df.groupby('部门名称'):
        print(f"对部门 {department} 进行聚类分析...")

        texts = group['综合文本'].tolist()

        if len(texts) <= 1:
            # 单个项目无法聚类
            cluster_result = group.copy()
            cluster_result['聚类标签'] = 0
            cluster_result['距中心距离'] = 0.0
            clustering_results.append(cluster_result)
            continue

        # 生成嵌入向量
        embeddings = model.encode(texts)

        # 确定最佳聚类数量 - 使用更严格的标准
        best_k, best_score = find_optimal_clusters_improved(embeddings, max_k=min(15, len(texts)))

        print(f"部门 {department} 最佳聚类数: {best_k}, 轮廓系数: {best_score:.4f}")

        if best_k == 1:
            # 如果最佳聚类数为1，检查是否真的应该只有一个聚类
            if should_split_single_cluster(embeddings, texts):
                # 强制分割为2个聚类
                best_k = 2
                kmeans = KMeans(n_clusters=best_k, random_state=42, n_init=20)
                labels = kmeans.fit_predict(embeddings)
                distances = np.linalg.norm(embeddings - kmeans.cluster_centers_[labels], axis=1)
            else:
                cluster_result = group.copy()
                cluster_result['聚类标签'] = 0
                cluster_result['距中心距离'] = 0.0
                clustering_results.append(cluster_result)
                continue
        else:
            # 使用最佳K值进行K-means聚类，增加迭代次数和随机初始化次数
            kmeans = KMeans(n_clusters=best_k, random_state=42, n_init=20, max_iter=300)
            labels = kmeans.fit_predict(embeddings)
            distances = np.linalg.norm(embeddings - kmeans.cluster_centers_[labels], axis=1)

        # 后处理：移除过小的聚类或重新分配边界点
        labels = post_process_clusters(labels, distances, embeddings, min_cluster_size=2)

        # 为每个聚类找到中心点
        cluster_centers = {}
        unique_labels = np.unique(labels)
        for cluster_id in unique_labels:
            cluster_indices = np.where(labels == cluster_id)[0]
            if len(cluster_indices) > 0:
                cluster_embeddings = embeddings[cluster_indices]
                center_embedding = kmeans.cluster_centers_[cluster_id]

                # 找到最接近质心的样本
                center_distances = np.linalg.norm(cluster_embeddings - center_embedding, axis=1)
                center_index = cluster_indices[np.argmin(center_distances)]

                cluster_centers[cluster_id] = {
                    '中心报表': group.iloc[center_index]['报表名称'],
                    '中心数据项': group.iloc[center_index]['数据项'],
                    '聚类大小': len(cluster_indices)
                }

        # 添加到结果
        cluster_result = group.copy()
        cluster_result['聚类标签'] = labels
        cluster_result['距中心距离'] = distances
        clustering_results.append((cluster_result, cluster_centers))

    return clustering_results


def find_optimal_clusters_improved(embeddings, max_k=15):
    """
    使用改进的方法找到最佳聚类数量
    """
    if len(embeddings) <= 2:
        return 1, 0

    best_score = -1
    best_k = 1
    scores = []

    for k in range(2, min(max_k + 1, len(embeddings))):
        kmeans = KMeans(n_clusters=k, random_state=42, n_init=15)
        labels = kmeans.fit_predict(embeddings)

        # 只有当所有聚类都有至少2个点时才计算轮廓系数
        unique, counts = np.unique(labels, return_counts=True)
        if np.all(counts >= 2):  # 所有聚类至少2个点
            score = silhouette_score(embeddings, labels)
        else:
            # 如果有聚类只有一个点，给一个较低的分数
            score = -0.5

        scores.append(score)

        # 选择轮廓系数最高的k，但倾向于选择更多的聚类
        if score > best_score or (abs(score - best_score) < 0.05 and k > best_k):
            best_score = score
            best_k = k

    # 如果最佳分数仍然很低，尝试使用肘部法则
    if best_score < 0.2:
        best_k = find_elbow_point(embeddings, max_k)
        best_score = silhouette_score(embeddings, KMeans(n_clusters=best_k).fit_predict(embeddings))

    return best_k, best_score


def should_split_single_cluster(embeddings, texts, similarity_threshold=0.6):
    """
    判断单个聚类是否应该被分割
    """
    if len(embeddings) <= 3:
        return False

    # 计算所有点之间的余弦相似度
    cosine_scores = util.cos_sim(embeddings, embeddings).numpy()

    # 找到最小相似度
    min_similarity = np.min(cosine_scores[np.triu_indices_from(cosine_scores, k=1)])

    # 如果存在相似度很低的对，说明应该分割
    return min_similarity < similarity_threshold


def find_elbow_point(embeddings, max_k):
    """
    使用肘部法则找到最佳聚类数
    """
    inertias = []
    k_range = range(1, min(max_k + 1, len(embeddings)))

    for k in k_range:
        kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
        kmeans.fit(embeddings)
        inertias.append(kmeans.inertia_)

    # 计算二阶差分找到肘点
    if len(inertias) > 2:
        first_diff = np.diff(inertias)
        second_diff = np.diff(first_diff)
        elbow_point = np.argmin(second_diff) + 2  # +2 因为二阶差分少了两个点
        return min(elbow_point, max_k)
    else:
        return 2


def post_process_clusters(labels, distances, embeddings, min_cluster_size=2):
    """
    后处理聚类结果，合并过小的聚类或重新分配边界点
    """
    unique_labels, counts = np.unique(labels, return_counts=True)

    # 找出过小的聚类
    small_clusters = unique_labels[counts < min_cluster_size]

    if len(small_clusters) == 0:
        return labels

    # 重新分配过小聚类中的点
    new_labels = labels.copy()
    for small_cluster in small_clusters:
        small_indices = np.where(labels == small_cluster)[0]

        for idx in small_indices:
            # 找到最近的其它聚类中心
            point_embedding = embeddings[idx]
            other_clusters = [l for l in unique_labels if
                              l != small_cluster and counts[np.where(unique_labels == l)[0][0]] >= min_cluster_size]

            if other_clusters:
                # 计算到其他聚类中心的距离，分配到最近的聚类
                min_distance = float('inf')
                best_cluster = other_clusters[0]

                for cluster_id in other_clusters:
                    cluster_indices = np.where(labels == cluster_id)[0]
                    cluster_center = np.mean(embeddings[cluster_indices], axis=0)
                    distance = np.linalg.norm(point_embedding - cluster_center)

                    if distance < min_distance:
                        min_distance = distance
                        best_cluster = cluster_id

                new_labels[idx] = best_cluster

    return new_labels


def perform_clustering_dbscan(df):
    """
    使用DBSCAN进行聚类分析 - 修复中心点计算
    """
    clustering_results = []

    for department, group in df.groupby('部门名称'):
        print(f"对部门 {department} 进行DBSCAN聚类分析...")

        texts = group['综合文本'].tolist()

        if len(texts) <= 1:
            cluster_result = group.copy()
            cluster_result['聚类标签'] = 0
            cluster_result['聚类类型'] = '核心点'
            clustering_results.append(cluster_result)
            continue

        # 生成嵌入向量
        embeddings = model.encode(texts)

        # 自动确定DBSCAN参数
        eps = find_optimal_eps(embeddings, method='statistical')
        min_samples = max(2, min(3, len(texts) // 5))

        print(f"DBSCAN参数: eps={eps:.4f}, min_samples={min_samples}")

        # 标准化数据并使用欧几里得距离
        scaler = StandardScaler()
        embeddings_scaled = scaler.fit_transform(embeddings)
        eps_scaled = eps * np.sqrt(embeddings_scaled.shape[1])

        dbscan = DBSCAN(eps=eps_scaled, min_samples=min_samples, metric='euclidean')
        labels = dbscan.fit_predict(embeddings_scaled)

        # 处理噪声点
        unique_labels = np.unique(labels)
        noise_mask = labels == -1
        noise_count = np.sum(noise_mask)

        # 为每个噪声点分配单独的聚类ID
        if noise_count > 0:
            print(f"  发现 {noise_count} 个噪声点，将它们作为单独的聚类")
            next_label = np.max(labels) + 1 if len(unique_labels) > 1 else 0
            for i in np.where(noise_mask)[0]:
                labels[i] = next_label
                next_label += 1

        # 重新计算唯一标签
        unique_labels = np.unique(labels)

        # 为每个聚类找到中心点
        cluster_centers = {}

        for cluster_id in unique_labels:
            cluster_indices = np.where(labels == cluster_id)[0]

            if len(cluster_indices) == 0:
                continue

            # 对于单点聚类（包括噪声点），中心点就是它自己
            if len(cluster_indices) == 1:
                center_index = cluster_indices[0]
                center_report = group.iloc[center_index]['报表名称']
                center_item = group.iloc[center_index]['数据项']

                cluster_centers[cluster_id] = {
                    '中心报表': center_report,
                    '中心数据项': center_item,
                    '聚类大小': 1,
                    '聚类类型': '噪声点' if noise_count > 0 and center_index in np.where(noise_mask)[0] else '单点',
                    '中心点索引': center_index
                }
            else:
                # 对于多点聚类，找到密度最高的点作为中心点
                cluster_embeddings = embeddings[cluster_indices]
                center_index = find_density_center_for_dbscan(cluster_embeddings, embeddings, eps)

                center_report = group.iloc[center_index]['报表名称']
                center_item = group.iloc[center_index]['数据项']

                cluster_centers[cluster_id] = {
                    '中心报表': center_report,
                    '中心数据项': center_item,
                    '聚类大小': len(cluster_indices),
                    '聚类类型': '核心簇',
                    '中心点索引': center_index
                }

        # 检查并记录中心点信息
        print_center_info(cluster_centers, group, labels)

        # 添加到结果
        cluster_result = group.copy()
        cluster_result['聚类标签'] = labels

        # 确定每个点的类型
        point_types = []
        for label in labels:
            cluster_size = np.sum(labels == label)
            if cluster_size == 1:
                # 检查是否是原始噪声点
                original_noise = noise_count > 0 and label >= (np.max(labels) - noise_count + 1)
                point_types.append('噪声点' if original_noise else '单点')
            else:
                point_types.append('核心点' if cluster_size >= min_samples else '边界点')
        cluster_result['聚类类型'] = point_types

        clustering_results.append((cluster_result, cluster_centers))

        print(f"  生成 {len(unique_labels)} 个聚类")

    return clustering_results


def print_center_info(cluster_centers, group, labels):
    """
    输出中心点详细信息
    """
    print("  中心点详细信息:")
    for cluster_id, center_info in cluster_centers.items():
        center_idx = center_info['中心点索引']
        cluster_size = center_info['聚类大小']
        cluster_type = center_info['聚类类型']

        print(f"    聚类 {cluster_id}:")
        print(f"      类型: {cluster_type}")
        print(f"      大小: {cluster_size}")
        print(f"      中心报表: {center_info['中心报表']}")
        print(f"      中心数据项: {center_info['中心数据项']}")
        print(f"      中心点索引: {center_idx}")

        # 如果是单点聚类，验证中心点确实属于该聚类
        if cluster_size == 1:
            cluster_indices = np.where(labels == cluster_id)[0]
            if len(cluster_indices) == 1 and cluster_indices[0] == center_idx:
                print(f"      验证: 中心点正确")
            else:
                print(f"      警告: 中心点不正确!")

    # 检查是否有重复的中心点
    center_reports = [(info['中心报表'], info['中心数据项']) for info in cluster_centers.values()]
    if len(center_reports) != len(set(center_reports)):
        duplicate_centers = set([x for x in center_reports if center_reports.count(x) > 1])
        print(f"  警告: 发现重复的中心点: {duplicate_centers}")
    else:
        print("  所有中心点都是唯一的")


def find_density_center_for_dbscan(cluster_embeddings, all_embeddings, eps):
    """
    为DBSCAN聚类找到密度中心点
    """
    from sklearn.metrics.pairwise import cosine_distances

    # 计算每个点在其邻域内的点数
    densities = []
    for i, embedding in enumerate(cluster_embeddings):
        # 计算与所有点的余弦距离
        distances = cosine_distances([embedding], all_embeddings)[0]
        # 统计在eps范围内的点数
        density = np.sum(distances <= eps)
        densities.append(density)

    # 返回密度最高的点的索引
    return np.argmax(densities)


def find_optimal_eps(embeddings, method='percentile'):
    """
    自动确定DBSCAN的最佳eps参数 - 多种方法
    """
    from sklearn.neighbors import NearestNeighbors
    from sklearn.metrics.pairwise import cosine_distances

    if len(embeddings) <= 1:
        return 0.5

    # 方法1: 使用余弦距离的统计信息
    if method == 'statistical':
        # 计算所有点之间的平均距离
        distances = cosine_distances(embeddings)
        np.fill_diagonal(distances, np.inf)  # 忽略对角线上的0
        avg_distance = np.mean(distances[distances < np.inf])

        # 使用平均距离的一半作为eps
        eps = avg_distance * 0.5
        eps = max(eps, 0.1)  # 确保最小值
        eps = min(eps, 0.8)  # 确保最大值

    # 方法2: 使用k距离图（肘部法则）
    elif method == 'knee':
        k = min(5, len(embeddings) - 1)
        neighbors = NearestNeighbors(n_neighbors=k, metric='cosine')
        neighbors_fit = neighbors.fit(embeddings)
        distances, indices = neighbors_fit.kneighbors(embeddings)

        k_distances = distances[:, -1]
        k_distances_sorted = np.sort(k_distances)

        # 寻找"肘部"点
        if len(k_distances_sorted) > 10:
            # 计算二阶差分找到变化最大的点
            first_diff = np.diff(k_distances_sorted)
            second_diff = np.diff(first_diff)
            knee_index = np.argmax(np.abs(second_diff)) + 2
            eps = k_distances_sorted[knee_index]
        else:
            # 小数据集使用中位数
            eps = np.median(k_distances_sorted)

    # 方法3: 使用百分位数（原始方法，但更稳健）
    else:  # method == 'percentile'
        k = min(5, len(embeddings) - 1)
        neighbors = NearestNeighbors(n_neighbors=k, metric='cosine')
        neighbors_fit = neighbors.fit(embeddings)
        distances, indices = neighbors_fit.kneighbors(embeddings)

        k_distances = distances[:, -1]
        k_distances_sorted = np.sort(k_distances)

        # 使用更高的百分位数避免0值
        eps = np.percentile(k_distances_sorted, 50)  # 使用中位数而不是30百分位数

        # 如果仍有问题，使用非零值的最小值
        if eps <= 0:
            non_zero_distances = k_distances_sorted[k_distances_sorted > 0]
            if len(non_zero_distances) > 0:
                eps = np.min(non_zero_distances)
            else:
                eps = 0.3  # 默认值

    # 最终检查
    eps = max(eps, 0.1)
    eps = min(eps, 0.8)

    return eps


def find_density_center(cluster_embeddings, all_embeddings, eps):
    """
    找到密度最高的点作为中心点
    """
    from sklearn.metrics.pairwise import cosine_distances

    # 计算每个点在其邻域内的点数
    densities = []
    for i, embedding in enumerate(cluster_embeddings):
        # 计算与所有点的余弦距离
        distances = cosine_distances([embedding], all_embeddings)[0]
        # 统计在eps范围内的点数
        density = np.sum(distances <= eps)
        densities.append(density)

    # 返回密度最高的点的索引
    return np.argmax(densities)



def visualize_clusters(clustering_results, output_path):   # output_path="聚类可视化.png"
    """
    可视化聚类结果
    """
    fig, axes = plt.subplots(1, 2, figsize=(15, 6))

    # 准备所有数据用于可视化
    all_embeddings = []
    all_labels = []
    all_departments = []

    for result in clustering_results:
        if isinstance(result, tuple):
            cluster_result, _ = result
            department = cluster_result['部门名称'].iloc[0]

            texts = cluster_result['综合文本'].tolist()
            embeddings = model.encode(texts)

            all_embeddings.append(embeddings)
            all_labels.extend(cluster_result['聚类标签'].tolist())
            all_departments.extend([department] * len(texts))

    if not all_embeddings:
        print("没有足够的数据进行可视化")
        return
    #合并所有 embeddings 为一个二维数组
    all_embeddings = np.vstack(all_embeddings)

    # 使用t-SNE降维
    tsne = TSNE(n_components=2, random_state=42)
    embeddings_2d = tsne.fit_transform(all_embeddings)

    # 绘制聚类结果
    scatter = axes[0].scatter(embeddings_2d[:, 0], embeddings_2d[:, 1],
                              c=all_labels, cmap='tab10', alpha=0.6)
    axes[0].set_title('报表聚类可视化')
    axes[0].set_xlabel('t-SNE特征1')
    axes[0].set_ylabel('t-SNE特征2')

    # 绘制部门分布
    departments = list(set(all_departments))
    dept_colors = {dept: i for i, dept in enumerate(departments)}
    dept_numeric = [dept_colors[dept] for dept in all_departments]

    scatter = axes[1].scatter(embeddings_2d[:, 0], embeddings_2d[:, 1],
                              c=dept_numeric, cmap='Set3', alpha=0.6)
    axes[1].set_title('部门分布可视化')
    axes[1].set_xlabel('t-SNE特征1')
    axes[1].set_ylabel('t-SNE特征2')

    plt.tight_layout()
    plt.savefig(output_path, dpi=300, bbox_inches='tight')
    plt.close()

    print(f"聚类可视化已保存至: {output_path}")


# def generate_cluster_report(clustering_results, output_file):   #output_file="聚类分析报告.xlsx"
#     """
#     生成详细的聚类分析报告
#     """
#     with pd.ExcelWriter(output_file) as writer:
#         # 创建聚类汇总表
#         summary_data = []
#
#         for i, result in enumerate(clustering_results):
#             if isinstance(result, tuple):
#                 cluster_result, cluster_centers = result
#                 department = cluster_result['部门名称'].iloc[0]
#
#                 for cluster_id, center_info in cluster_centers.items():
#                     cluster_size = center_info['聚类大小']
#                     cluster_rows = cluster_result[cluster_result['聚类标签'] == cluster_id]
#
#                     summary_data.append({
#                         '部门': department,
#                         '聚类ID': f"{department}_集群{cluster_id}",
#                         '集群大小': cluster_size,
#                         '中心报表': center_info['中心报表'],
#                         '中心数据项': center_info['中心数据项'],
#                         '平均相似度': np.mean(
#                             cluster_rows['相似度得分']) if '相似度得分' in cluster_rows.columns else 'N/A'
#                     })
#
#         # 写入汇总表
#         if summary_data:
#             summary_df = pd.DataFrame(summary_data)
#             summary_df.to_excel(writer, sheet_name='聚类汇总', index=False)
#
#         # 写入详细数据
#         all_results = []
#         for result in clustering_results:
#             if isinstance(result, tuple):
#                 all_results.append(result[0])
#             else:
#                 all_results.append(result)
#
#         detailed_df = pd.concat(all_results, ignore_index=True)
#         detailed_df.to_excel(writer, sheet_name='详细数据', index=False)
#
#     print(f"聚类分析报告已保存至: {output_file}")

def generate_cluster_report(clustering_results, output_file):
    """
    生成详细的聚类分析报告 - 修复版本，确保两个表数据量一致
    """
    with pd.ExcelWriter(output_file) as writer:
        # 创建聚类汇总表
        summary_data = []

        # 用于收集所有详细数据
        all_detailed_data = []

        for i, result in enumerate(clustering_results):
            if isinstance(result, tuple):
                # 有聚类结果的部门
                cluster_result, cluster_centers = result
                department = cluster_result['部门名称'].iloc[0]

                # 收集详细数据
                all_detailed_data.append(cluster_result)

                for cluster_id, center_info in cluster_centers.items():
                    cluster_size = center_info['聚类大小']
                    cluster_rows = cluster_result[cluster_result['聚类标签'] == cluster_id]

                    # 为聚类中的每个数据点创建记录
                    for idx, row in cluster_rows.iterrows():
                        record = {
                            '部门': department,
                            '聚类ID': f"{department}_集群{cluster_id}",
                            '集群大小': cluster_size,
                            '中心报表': center_info['中心报表'],
                            '中心数据项': center_info['中心数据项'],
                            '平均相似度': np.mean(
                                cluster_rows['相似度得分']) if '相似度得分' in cluster_rows.columns else 'N/A'
                        }

                        # 添加源表的所有字段
                        for col in cluster_result.columns:
                            if col not in record:  # 避免覆盖已添加的字段
                                record[col] = row[col]

                        summary_data.append(record)
            else:
                # 单个数据点的部门
                cluster_result = result
                department = cluster_result['部门名称'].iloc[0]

                # 收集详细数据
                all_detailed_data.append(cluster_result)

                # 为单个数据点创建汇总记录
                for idx, row in cluster_result.iterrows():
                    record = {
                        '部门': department,
                        '聚类ID': f"{department}_单点",
                        '集群大小': 1,
                        '中心报表': row['报表名称'],
                        '中心数据项': row['数据项'],
                        '平均相似度': row['相似度得分'] if '相似度得分' in cluster_result.columns else 'N/A'
                    }

                    # 添加源表的所有字段
                    for col in cluster_result.columns:
                        if col not in record:  # 避免覆盖已添加的字段
                            record[col] = row[col]

                    summary_data.append(record)

        # 写入汇总表
        if summary_data:
            summary_df = pd.DataFrame(summary_data)
            # 重新排列列的顺序，将聚类信息放在前面
            cluster_cols = ['部门', '聚类ID', '集群大小', '中心报表', '中心数据项', '平均相似度']
            other_cols = [col for col in summary_df.columns if col not in cluster_cols]
            summary_df = summary_df[cluster_cols + other_cols]
            summary_df.to_excel(writer, sheet_name='聚类汇总', index=False)
            print(f"聚类汇总表记录数: {len(summary_df)}")
        else:
            print("警告: 没有生成任何聚类汇总数据")

        # 写入详细数据
        if all_detailed_data:
            detailed_df = pd.concat(all_detailed_data, ignore_index=True)
            detailed_df.to_excel(writer, sheet_name='详细数据', index=False)
            print(f"详细数据表记录数: {len(detailed_df)}")

            # 检查两个表的数据量是否一致
            if summary_data and len(summary_df) != len(detailed_df):
                print(f"警告: 两个表的数据量不一致! 汇总表: {len(summary_df)}, 详细表: {len(detailed_df)}")
        else:
            print("警告: 没有生成任何详细数据")

    print(f"聚类分析报告已保存至: {output_file}")

# def perform_clustering_combined(df, method='auto'):
#     """
#     组合聚类方法：自动选择最佳方法
#     """
#     clustering_results = []
#
#     for department, group in df.groupby('部门名称'):
#         print(f"对部门 {department} 进行聚类分析...")
#
#         texts = group['综合文本'].tolist()
#
#         if len(texts) <= 1:
#             # 单个项目无法聚类
#             cluster_result = group.copy()
#             cluster_result['聚类标签'] = 0
#             cluster_result['距中心距离'] = 0.0
#             clustering_results.append(cluster_result)
#             continue
#
#         if method == 'auto':
#             # 根据数据特征自动选择方法
#             embeddings = model.encode(texts)
#
#             if len(texts) <= 3:
#                 # 小数据集使用K-means
#                 result = perform_clustering_single(group, method='kmeans_improved')
#             else:
#                 # 先尝试DBSCAN
#                 dbscan_result = perform_clustering_single(group, method='dbscan')
#                 dbscan_labels = dbscan_result[0]['聚类标签']
#                 unique_labels = np.unique(dbscan_labels)
#                 noise_count = np.sum(dbscan_labels == -1) if -1 in dbscan_labels else 0
#
#                 # 如果噪声点过多或聚类数不合理，使用K-means
#                 if (noise_count > len(texts) * 0.4 or
#                         len(unique_labels) <= 1 or
#                         len(unique_labels) > len(texts) * 0.8):
#                     print(f"  DBSCAN结果不理想，使用K-means重新聚类")
#                     result = perform_clustering_single(group, method='kmeans_improved')
#                 else:
#                     result = dbscan_result
#
#             clustering_results.append(result)
#         else:
#             # 使用指定方法
#             result = perform_clustering_single(group, method=method)
#             clustering_results.append(result)
#
#     return clustering_results


def perform_clustering_single(group, method='kmeans_improved'):
    """
    对单个部门执行聚类分析
    """
    texts = group['综合文本'].tolist()
    embeddings = model.encode(texts)

    if method == 'kmeans_improved':
        # 使用改进的K-means
        best_k, best_score = find_optimal_clusters_improved(embeddings, max_k=min(15, len(texts)))
        print(f"  K-means聚类数: {best_k}, 轮廓系数: {best_score:.4f}")

        if best_k == 1:
            # 检查是否应该分割
            if should_split_single_cluster(embeddings, texts):
                best_k = 2
                print(f"  强制分割为 {best_k} 个聚类")

            kmeans = KMeans(n_clusters=best_k, random_state=42, n_init=20, max_iter=300)
            labels = kmeans.fit_predict(embeddings)
            distances = np.linalg.norm(embeddings - kmeans.cluster_centers_[labels], axis=1)
        else:
            kmeans = KMeans(n_clusters=best_k, random_state=42, n_init=20, max_iter=300)
            labels = kmeans.fit_predict(embeddings)
            distances = np.linalg.norm(embeddings - kmeans.cluster_centers_[labels], axis=1)

        # 后处理
        labels = post_process_clusters(labels, distances, embeddings, min_cluster_size=2)

    elif method == 'dbscan':
        # 使用DBSCAN
        eps = find_optimal_eps(embeddings, method='statistical')
        min_samples = max(2, min(3, len(texts) // 5))

        print(f"  DBSCAN参数: eps={eps:.4f}, min_samples={min_samples}")

        # 标准化数据并使用欧几里得距离
        scaler = StandardScaler()
        embeddings_scaled = scaler.fit_transform(embeddings)
        eps_scaled = eps * np.sqrt(embeddings_scaled.shape[1])

        dbscan = DBSCAN(eps=eps_scaled, min_samples=min_samples, metric='euclidean')
        labels = dbscan.fit_predict(embeddings_scaled)

        # 处理噪声点
        unique_labels = np.unique(labels)
        noise_mask = labels == -1
        noise_count = np.sum(noise_mask)

        if noise_count > 0:
            print(f"  发现 {noise_count} 个噪声点")
            next_label = np.max(labels) + 1 if len(unique_labels) > 1 else 0
            for i in np.where(noise_mask)[0]:
                labels[i] = next_label
                next_label += 1

        # 计算距离（对于DBSCAN，使用到聚类中心的距离）
        unique_labels = np.unique(labels)
        distances = np.zeros(len(labels))
        for cluster_id in unique_labels:
            cluster_indices = np.where(labels == cluster_id)[0]
            if len(cluster_indices) > 0:
                cluster_center = np.mean(embeddings[cluster_indices], axis=0)
                distances[cluster_indices] = np.linalg.norm(
                    embeddings[cluster_indices] - cluster_center, axis=1)

    else:  # 默认K-means
        best_k, best_score = find_optimal_clusters(embeddings, max_k=min(10, len(texts)))
        kmeans = KMeans(n_clusters=best_k, random_state=42, n_init=10)
        labels = kmeans.fit_predict(embeddings)
        distances = np.linalg.norm(embeddings - kmeans.cluster_centers_[labels], axis=1)

    # 为每个聚类找到中心点
    cluster_centers = {}
    unique_labels = np.unique(labels)

    for cluster_id in unique_labels:
        cluster_indices = np.where(labels == cluster_id)[0]
        if len(cluster_indices) > 0:
            cluster_embeddings = embeddings[cluster_indices]

            if method == 'dbscan':
                # 对于DBSCAN，使用密度中心
                center_index = find_density_center(cluster_embeddings, embeddings)
            else:
                # 对于K-means，使用最接近质心的点
                if method == 'kmeans_improved' or method == 'kmeans':
                    cluster_center = kmeans.cluster_centers_[cluster_id]
                else:
                    cluster_center = np.mean(cluster_embeddings, axis=0)

                center_distances = np.linalg.norm(cluster_embeddings - cluster_center, axis=1)
                center_index = cluster_indices[np.argmin(center_distances)]

            cluster_centers[cluster_id] = {
                '中心报表': group.iloc[center_index]['报表名称'],
                '中心数据项': group.iloc[center_index]['数据项'],
                '聚类大小': len(cluster_indices)
            }

    # 添加到结果
    cluster_result = group.copy()
    cluster_result['聚类标签'] = labels
    cluster_result['距中心距离'] = distances

    return (cluster_result, cluster_centers)


def find_optimal_clusters_improved(embeddings, max_k=15):
    """
    使用改进的方法找到最佳聚类数量
    """
    if len(embeddings) <= 2:
        return 1, 0

    best_score = -1
    best_k = 1
    scores = []

    for k in range(2, min(max_k + 1, len(embeddings))):
        kmeans = KMeans(n_clusters=k, random_state=42, n_init=15)
        labels = kmeans.fit_predict(embeddings)

        # 只有当所有聚类都有至少2个点时才计算轮廓系数
        unique, counts = np.unique(labels, return_counts=True)
        if np.all(counts >= 2):
            score = silhouette_score(embeddings, labels)
        else:
            # 如果有聚类只有一个点，给一个较低的分数
            score = -0.5

        scores.append(score)

        # 选择轮廓系数最高的k，但倾向于选择更多的聚类
        if score > best_score or (abs(score - best_score) < 0.05 and k > best_k):
            best_score = score
            best_k = k

    # 如果最佳分数仍然很低，尝试使用肘部法则
    if best_score < 0.2:
        best_k = find_elbow_point(embeddings, max_k)
        kmeans = KMeans(n_clusters=best_k, random_state=42, n_init=10)
        labels = kmeans.fit_predict(embeddings)
        best_score = silhouette_score(embeddings, labels) if len(np.unique(labels)) > 1 else 0

    return best_k, best_score


def find_optimal_eps(embeddings, method='statistical'):
    """
    自动确定DBSCAN的最佳eps参数 - 多种方法
    """
    if len(embeddings) <= 1:
        return 0.5

    # 方法1: 使用余弦距离的统计信息
    if method == 'statistical':
        # 计算所有点之间的平均距离
        distances = cosine_distances(embeddings)
        np.fill_diagonal(distances, np.inf)  # 忽略对角线上的0
        avg_distance = np.mean(distances[distances < np.inf])

        # 使用平均距离的一半作为eps
        eps = avg_distance * 0.5
        eps = max(eps, 0.1)  # 确保最小值
        eps = min(eps, 0.8)  # 确保最大值

    # 方法2: 使用k距离图（肘部法则）
    elif method == 'knee':
        k = min(5, len(embeddings) - 1)
        neighbors = NearestNeighbors(n_neighbors=k, metric='cosine')
        neighbors_fit = neighbors.fit(embeddings)
        distances, indices = neighbors_fit.kneighbors(embeddings)

        k_distances = distances[:, -1]
        k_distances_sorted = np.sort(k_distances)

        # 寻找"肘部"点
        if len(k_distances_sorted) > 10:
            # 计算二阶差分找到变化最大的点
            first_diff = np.diff(k_distances_sorted)
            second_diff = np.diff(first_diff)
            knee_index = np.argmax(np.abs(second_diff)) + 2
            eps = k_distances_sorted[knee_index]
        else:
            # 小数据集使用中位数
            eps = np.median(k_distances_sorted)

    # 方法3: 使用百分位数（原始方法，但更稳健）
    else:  # method == 'percentile'
        k = min(5, len(embeddings) - 1)
        neighbors = NearestNeighbors(n_neighbors=k, metric='cosine')
        neighbors_fit = neighbors.fit(embeddings)
        distances, indices = neighbors_fit.kneighbors(embeddings)

        k_distances = distances[:, -1]
        k_distances_sorted = np.sort(k_distances)

        # 使用更高的百分位数避免0值
        eps = np.percentile(k_distances_sorted, 50)  # 使用中位数而不是30百分位数

        # 如果仍有问题，使用非零值的最小值
        if eps <= 0:
            non_zero_distances = k_distances_sorted[k_distances_sorted > 0]
            if len(non_zero_distances) > 0:
                eps = np.min(non_zero_distances)
            else:
                eps = 0.3  # 默认值

    # 最终检查
    eps = max(eps, 0.1)
    eps = min(eps, 0.8)

    return eps


def should_split_single_cluster(embeddings, texts, similarity_threshold=0.6):
    """
    判断单个聚类是否应该被分割
    """
    if len(embeddings) <= 3:
        return False

    # 计算所有点之间的余弦相似度
    cosine_scores = util.cos_sim(embeddings, embeddings).numpy()

    # 找到最小相似度
    min_similarity = np.min(cosine_scores[np.triu_indices_from(cosine_scores, k=1)])

    # 如果存在相似度很低的对，说明应该分割
    return min_similarity < similarity_threshold


def find_elbow_point(embeddings, max_k):
    """
    使用肘部法则找到最佳聚类数
    """
    inertias = []
    k_range = range(1, min(max_k + 1, len(embeddings)))

    for k in k_range:
        kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
        kmeans.fit(embeddings)
        inertias.append(kmeans.inertia_)

    # 计算二阶差分找到肘点
    if len(inertias) > 2:
        first_diff = np.diff(inertias)
        second_diff = np.diff(first_diff)
        elbow_point = np.argmin(second_diff) + 2  # +2 因为二阶差分少了两个点
        return min(elbow_point, max_k)
    else:
        return 2


def post_process_clusters(labels, distances, embeddings, min_cluster_size=2):
    """
    后处理聚类结果，合并过小的聚类或重新分配边界点
    """
    unique_labels, counts = np.unique(labels, return_counts=True)

    # 找出过小的聚类
    small_clusters = unique_labels[counts < min_cluster_size]

    if len(small_clusters) == 0:
        return labels

    # 重新分配过小聚类中的点
    new_labels = labels.copy()
    for small_cluster in small_clusters:
        small_indices = np.where(labels == small_cluster)[0]

        for idx in small_indices:
            # 找到最近的其它聚类中心
            point_embedding = embeddings[idx]
            other_clusters = [l for l in unique_labels if
                              l != small_cluster and counts[np.where(unique_labels == l)[0][0]] >= min_cluster_size]

            if other_clusters:
                # 计算到其他聚类中心的距离，分配到最近的聚类
                min_distance = float('inf')
                best_cluster = other_clusters[0]

                for cluster_id in other_clusters:
                    cluster_indices = np.where(labels == cluster_id)[0]
                    cluster_center = np.mean(embeddings[cluster_indices], axis=0)
                    distance = np.linalg.norm(point_embedding - cluster_center)

                    if distance < min_distance:
                        min_distance = distance
                        best_cluster = cluster_id

                new_labels[idx] = best_cluster

    return new_labels


def find_density_center(cluster_embeddings, all_embeddings, eps=None):
    """
    找到密度最高的点作为中心点
    """
    # 如果没有提供eps，计算一个
    if eps is None:
        distances = cosine_distances(cluster_embeddings, all_embeddings)
        eps = np.percentile(distances, 30)

    # 计算每个点在其邻域内的点数
    densities = []
    for i, embedding in enumerate(cluster_embeddings):
        # 计算与所有点的余弦距离
        distances = cosine_distances([embedding], all_embeddings)[0]
        # 统计在eps范围内的点数
        density = np.sum(distances <= eps)
        densities.append(density)

    # 返回密度最高的点的索引
    return np.argmax(densities)


def find_optimal_clusters(embeddings, max_k=10):
    """
    使用轮廓系数找到最佳聚类数量 - 原始版本
    """
    if len(embeddings) <= 2:
        return 1, 0

    best_score = -1
    best_k = 1

    for k in range(2, min(max_k + 1, len(embeddings))):
        kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
        labels = kmeans.fit_predict(embeddings)
        score = silhouette_score(embeddings, labels)

        if score > best_score:
            best_score = score
            best_k = k

    return best_k, best_score

#-------------------------------------------------
def perform_avg_similarity_clustering(df, num_clusters=3, similarity_column='相似度得分'):
    """
    基于平均相似度的聚类方法
    按照部门分组，然后在每个部门内部根据相似度得分进行聚类
    """
    clustering_results = []

    for department, group in df.groupby('部门名称'):
        print(f"对部门 {department} 进行平均相似度聚类...")

        if len(group) <= 1:
            # 单个数据点
            cluster_result = group.copy()
            cluster_result['聚类标签'] = 0
            cluster_result['距中心距离'] = 0.0
            clustering_results.append(cluster_result)
            continue

        # 获取相似度得分
        if similarity_column not in group.columns:
            print(f"  警告: 部门 {department} 没有 '{similarity_column}' 列，跳过聚类")
            cluster_result = group.copy()
            cluster_result['聚类标签'] = 0
            cluster_result['距中心距离'] = 0.0
            clustering_results.append(cluster_result)
            continue

        similarity_scores = group[similarity_column].values

        # 确定该部门的最佳聚类数量
        dept_num_clusters = min(num_clusters, len(group))

        # 使用K-means对相似度得分进行聚类
        from sklearn.cluster import KMeans

        # 重塑数据为2D数组
        X = similarity_scores.reshape(-1, 1)

        kmeans = KMeans(n_clusters=dept_num_clusters, random_state=42, n_init=10)
        labels = kmeans.fit_predict(X)

        # 计算每个点到其聚类中心的距离
        distances = np.abs(similarity_scores - kmeans.cluster_centers_[labels].flatten())

        # 为每个聚类找到中心点
        cluster_centers = {}
        unique_labels = np.unique(labels)

        for cluster_id in unique_labels:
            cluster_indices = np.where(labels == cluster_id)[0]
            if len(cluster_indices) > 0:
                # 找到最接近中心的点
                cluster_scores = similarity_scores[cluster_indices]
                center_score = kmeans.cluster_centers_[cluster_id][0]
                center_index = cluster_indices[np.argmin(np.abs(cluster_scores - center_score))]

                cluster_centers[cluster_id] = {
                    '中心报表': group.iloc[center_index]['报表名称'],
                    '中心数据项': group.iloc[center_index]['数据项'],
                    '聚类大小': len(cluster_indices),
                    '平均相似度': np.mean(cluster_scores),
                    '相似度范围': f"{np.min(cluster_scores):.3f}-{np.max(cluster_scores):.3f}"
                }

        # 添加到结果
        cluster_result = group.copy()
        cluster_result['聚类标签'] = labels
        cluster_result['距中心距离'] = distances

        clustering_results.append((cluster_result, cluster_centers))

        print(f"  生成 {len(unique_labels)} 个聚类")

    return clustering_results


def find_optimal_similarity_clusters(similarity_scores, max_k=5):
    """
    为相似度得分找到最佳聚类数量
    """
    if len(similarity_scores) <= 1:
        return 1, 0

    from sklearn.cluster import KMeans
    from sklearn.metrics import silhouette_score

    best_score = -1
    best_k = 1

    X = similarity_scores.reshape(-1, 1)

    for k in range(2, min(max_k + 1, len(similarity_scores))):
        kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
        labels = kmeans.fit_predict(X)

        # 只有当所有聚类都有至少2个点时才计算轮廓系数
        unique, counts = np.unique(labels, return_counts=True)
        if np.all(counts >= 2):
            score = silhouette_score(X, labels)
        else:
            # 如果有聚类只有一个点，给一个较低的分数
            score = -0.5

        if score > best_score:
            best_score = score
            best_k = k

    return best_k, best_score


def perform_avg_similarity_clustering_auto(df, similarity_column='相似度得分'):
    """
    基于平均相似度的聚类方法 - 自动确定聚类数量
    """
    clustering_results = []

    for department, group in df.groupby('部门名称'):
        print(f"对部门 {department} 进行平均相似度聚类...")

        if len(group) <= 1:
            # 单个数据点
            cluster_result = group.copy()
            cluster_result['聚类标签'] = 0
            cluster_result['距中心距离'] = 0.0
            clustering_results.append(cluster_result)
            continue

        # 获取相似度得分
        if similarity_column not in group.columns:
            print(f"  警告: 部门 {department} 没有 '{similarity_column}' 列，跳过聚类")
            cluster_result = group.copy()
            cluster_result['聚类标签'] = 0
            cluster_result['距中心距离'] = 0.0
            clustering_results.append(cluster_result)
            continue

        similarity_scores = group[similarity_column].values

        # 自动确定最佳聚类数量
        best_k, best_score = find_optimal_similarity_clusters(similarity_scores)

        print(f"  最佳聚类数: {best_k}, 轮廓系数: {best_score:.4f}")

        # 使用K-means对相似度得分进行聚类
        from sklearn.cluster import KMeans

        X = similarity_scores.reshape(-1, 1)
        kmeans = KMeans(n_clusters=best_k, random_state=42, n_init=10)
        labels = kmeans.fit_predict(X)

        # 计算每个点到其聚类中心的距离
        distances = np.abs(similarity_scores - kmeans.cluster_centers_[labels].flatten())

        # 为每个聚类找到中心点
        cluster_centers = {}
        unique_labels = np.unique(labels)

        for cluster_id in unique_labels:
            cluster_indices = np.where(labels == cluster_id)[0]
            if len(cluster_indices) > 0:
                # 找到最接近中心的点
                cluster_scores = similarity_scores[cluster_indices]
                center_score = kmeans.cluster_centers_[cluster_id][0]
                center_index = cluster_indices[np.argmin(np.abs(cluster_scores - center_score))]

                cluster_centers[cluster_id] = {
                    '中心报表': group.iloc[center_index]['报表名称'],
                    '中心数据项': group.iloc[center_index]['数据项'],
                    '聚类大小': len(cluster_indices),
                    '平均相似度': np.mean(cluster_scores),
                    '相似度范围': f"{np.min(cluster_scores):.3f}-{np.max(cluster_scores):.3f}"
                }

        # 添加到结果
        cluster_result = group.copy()
        cluster_result['聚类标签'] = labels
        cluster_result['距中心距离'] = distances

        clustering_results.append((cluster_result, cluster_centers))

        print(f"  生成 {len(unique_labels)} 个聚类")

    return clustering_results


def perform_avg_similarity_clustering_single(group, num_clusters=3, similarity_column='相似度得分'):
    """
    对单个部门执行平均相似度聚类
    """
    if similarity_column not in group.columns:
        print(f"  警告: 没有 '{similarity_column}' 列，跳过聚类")
        cluster_result = group.copy()
        cluster_result['聚类标签'] = 0
        cluster_result['距中心距离'] = 0.0
        return cluster_result

    similarity_scores = group[similarity_column].values

    # 确定该部门的最佳聚类数量
    dept_num_clusters = min(num_clusters, len(group))

    # 使用K-means对相似度得分进行聚类
    from sklearn.cluster import KMeans

    X = similarity_scores.reshape(-1, 1)
    kmeans = KMeans(n_clusters=dept_num_clusters, random_state=42, n_init=10)
    labels = kmeans.fit_predict(X)

    # 计算每个点到其聚类中心的距离
    distances = np.abs(similarity_scores - kmeans.cluster_centers_[labels].flatten())

    # 为每个聚类找到中心点
    cluster_centers = {}
    unique_labels = np.unique(labels)

    for cluster_id in unique_labels:
        cluster_indices = np.where(labels == cluster_id)[0]
        if len(cluster_indices) > 0:
            # 找到最接近中心的点
            cluster_scores = similarity_scores[cluster_indices]
            center_score = kmeans.cluster_centers_[cluster_id][0]
            center_index = cluster_indices[np.argmin(np.abs(cluster_scores - center_score))]

            cluster_centers[cluster_id] = {
                '中心报表': group.iloc[center_index]['报表名称'],
                '中心数据项': group.iloc[center_index]['数据项'],
                '聚类大小': len(cluster_indices),
                '平均相似度': np.mean(cluster_scores),
                '相似度范围': f"{np.min(cluster_scores):.3f}-{np.max(cluster_scores):.3f}"
            }

    # 添加到结果
    cluster_result = group.copy()
    cluster_result['聚类标签'] = labels
    cluster_result['距中心距离'] = distances

    return (cluster_result, cluster_centers)


def perform_avg_similarity_clustering_auto_single(group, similarity_column='相似度得分'):
    """
    对单个部门执行自动平均相似度聚类
    """
    if similarity_column not in group.columns:
        print(f"  警告: 没有 '{similarity_column}' 列，跳过聚类")
        cluster_result = group.copy()
        cluster_result['聚类标签'] = 0
        cluster_result['距中心距离'] = 0.0
        return cluster_result

    similarity_scores = group[similarity_column].values

    # 自动确定最佳聚类数量
    best_k, best_score = find_optimal_similarity_clusters(similarity_scores)

    print(f"  最佳聚类数: {best_k}, 轮廓系数: {best_score:.4f}")

    # 使用K-means对相似度得分进行聚类
    from sklearn.cluster import KMeans

    X = similarity_scores.reshape(-1, 1)
    kmeans = KMeans(n_clusters=best_k, random_state=42, n_init=10)
    labels = kmeans.fit_predict(X)

    # 计算每个点到其聚类中心的距离
    distances = np.abs(similarity_scores - kmeans.cluster_centers_[labels].flatten())

    # 为每个聚类找到中心点
    cluster_centers = {}
    unique_labels = np.unique(labels)

    for cluster_id in unique_labels:
        cluster_indices = np.where(labels == cluster_id)[0]
        if len(cluster_indices) > 0:
            # 找到最接近中心的点
            cluster_scores = similarity_scores[cluster_indices]
            center_score = kmeans.cluster_centers_[cluster_id][0]
            center_index = cluster_indices[np.argmin(np.abs(cluster_scores - center_score))]

            cluster_centers[cluster_id] = {
                '中心报表': group.iloc[center_index]['报表名称'],
                '中心数据项': group.iloc[center_index]['数据项'],
                '聚类大小': len(cluster_indices),
                '平均相似度': np.mean(cluster_scores),
                '相似度范围': f"{np.min(cluster_scores):.3f}-{np.max(cluster_scores):.3f}"
            }

    # 添加到结果
    cluster_result = group.copy()
    cluster_result['聚类标签'] = labels
    cluster_result['距中心距离'] = distances

    return (cluster_result, cluster_centers)


def perform_clustering_combined(df, method='auto', similarity_threshold=0.7, num_clusters=3):
    """
    组合聚类方法：自动选择最佳方法 - 包含平均相似度聚类
    """
    clustering_results = []

    for department, group in df.groupby('部门名称'):
        print(f"对部门 {department} 进行聚类分析...")

        texts = group['综合文本'].tolist()

        if len(texts) <= 1:
            # 单个数据点的部门
            cluster_result = group.copy()
            cluster_result['聚类标签'] = 0
            cluster_result['距中心距离'] = 0.0
            clustering_results.append(cluster_result)
            print(f"  部门 {department} 只有1个数据点，跳过聚类")
            continue

        if method == 'auto':
            # 根据数据特征自动选择方法
            embeddings = model.encode(texts)

            # 计算数据的平均相似度
            similarity_matrix = util.cos_sim(embeddings, embeddings).numpy()
            avg_similarity = np.mean(similarity_matrix)

            if len(texts) <= 3:
                # 小数据集使用平均相似度聚类
                result = perform_avg_similarity_clustering_single(group, num_clusters=num_clusters)
            elif avg_similarity > 0.8:
                # 高相似度数据使用平均相似度聚类
                print(f"  数据平均相似度高({avg_similarity:.4f})，使用平均相似度聚类")
                result = perform_avg_similarity_clustering_single(group, num_clusters=num_clusters)
            elif avg_similarity < 0.4:
                # 低相似度数据使用DBSCAN
                print(f"  数据平均相似度低({avg_similarity:.4f})，使用DBSCAN")
                result = perform_clustering_single(group, method='dbscan')
            else:
                # 中等相似度数据使用K-means
                print(f"  数据平均相似度中等({avg_similarity:.4f})，使用K-means")
                result = perform_clustering_single(group, method='kmeans_improved')

            clustering_results.append(result)
        elif method == 'similarity':
            # 使用相似度聚类
            result = perform_similarity_based_clustering_single(group, similarity_threshold)
            clustering_results.append(result)
        elif method == 'avg_similarity':
            # 使用平均相似度聚类
            result = perform_avg_similarity_clustering_single(group, num_clusters=num_clusters)
            clustering_results.append(result)
        elif method == 'avg_similarity_auto':
            # 使用自动平均相似度聚类
            result = perform_avg_similarity_clustering_auto_single(group)
            clustering_results.append(result)
        else:
            # 使用指定方法
            result = perform_clustering_single(group, method=method)
            clustering_results.append(result)

    return clustering_results


# 主执行函数
def main (input_file, sheet_name, output_file, output_path, clustering_method='avg_similarity_auto', similarity_threshold=0.7, num_clusters=3):
    # (input_file,sheet_name,output_file,output_path):
    """
    主执行流程
    """
    print("开始加载数据...")
    df = load_and_preprocess_data(input_file,sheet_name)

    print("开始计算相似度并排序...")
    sorted_df = calculate_similarity_and_sort(df)

    print("开始聚类分析...")
    # 使用K-means
    # clustering_results = perform_clustering_analysis(sorted_df)
    # 使用DBSCAN
    # clustering_results = perform_clustering_dbscan(sorted_df)
    # 组合方法
    # clustering_results = perform_clustering_combined(sorted_df, method='auto')
    print(f"开始聚类分析（方法: {clustering_method}）...")

    if clustering_method == 'similarity':
        clustering_results = perform_similarity_based_clustering(sorted_df, similarity_threshold)
    elif clustering_method == 'avg_similarity':
        clustering_results = perform_avg_similarity_clustering(sorted_df, num_clusters=num_clusters)
    elif clustering_method == 'avg_similarity_auto':
        clustering_results = perform_avg_similarity_clustering_auto(sorted_df)
    elif clustering_method == 'auto':
        clustering_results = perform_clustering_combined(sorted_df, method='auto',
                                                         similarity_threshold=similarity_threshold,
                                                         num_clusters=num_clusters)
    elif clustering_method == 'kmeans':
        clustering_results = perform_clustering_combined(sorted_df, method='kmeans_improved')
    elif clustering_method == 'dbscan':
        clustering_results = perform_clustering_combined(sorted_df, method='dbscan')
    else:
        print(f"未知的聚类方法: {clustering_method}，使用自动方法")
        clustering_results = perform_clustering_combined(sorted_df, method='auto')

    # print("生成可视化...")
    # visualize_clusters(clustering_results,output_path)

    print("生成分析报告...")
    generate_cluster_report(clustering_results,output_file)

    # 保存排序后的结果
    sorted_output = "C:/Users/xingwenzheng/Desktop/代码测试集/整合数据全/排序后的报表数据.xlsx" #"C:/Users/xingwenzheng/Desktop/代码测试集/排序后的报表数据.xlsx"
    sorted_df.to_excel(sorted_output, index=False)
    print(f"排序后的数据已保存至: {sorted_output}")

    return sorted_df , clustering_results       # , clustering_results


# 使用示例
if __name__ == "__main__":
    # 选择聚类方法
    clustering_method = "avg_similarity"  # 可以是 "avg_similarity", "avg_similarity_auto", "auto", "kmeans", "dbscan"
    num_clusters = 3  # 当使用 avg_similarity 方法时，指定聚类数量
    similarity_threshold = 0.01  # 当使用 similarity 方法时，指定相似度阈值


    input_file = "C:/Users/xingwenzheng/Desktop/代码测试集/整合数据全/测试数据908_匹配结果.xlsx"
    # "C:/Users/xingwenzheng/Desktop/代码测试集/包含数据项/测试数据.xlsx"
    # "C:/Users/xingwenzheng/Desktop/代码测试集/报表统计汇总表_匹配结果_匹配结果.xlsx" # "C:/Users/xingwenzheng/Desktop/国家部委-附件1-基层报表底数初步清单-1.xlsx"  # 请替换为您的实际文件路径
    sheet_name = "汇总台账"  #"分割前数据"
    output_file = "C:/Users/xingwenzheng/Desktop/代码测试集/整合数据全/聚类分析报告.xlsx"
    # "C:/Users/xingwenzheng/Desktop/代码测试集/包含数据项/聚类分析报告.xlsx"
    output_path = "C:/Users/xingwenzheng/Desktop/代码测试集/整合数据全/聚类可视化.png"
    # sorted_data, cluster_results = main(input_file,sheet_name,output_file,output_path)   # , cluster_results

    sorted_data, cluster_results = main(
        input_file,
        sheet_name,
        output_file,
        output_path,
        clustering_method=clustering_method,
        num_clusters=num_clusters,
        similarity_threshold=similarity_threshold
    )

