'''打上提醒人工查看标记（重新对报表名称进行相似度判断，如判断相似度较低，则打上人工审核的标记）'''
import pandas as pd
import numpy as np
import jieba
import difflib
from collections import Counter
import warnings

warnings.filterwarnings('ignore')


def improved_chinese_similarity(text1, text2):
    """
    改进的中文文本相似度计算，结合多种方法
    """
    if pd.isna(text1) or pd.isna(text2) or text1 == "" or text2 == "":
        return 0.0

    text1 = str(text1).strip()
    text2 = str(text2).strip()

    # 如果文本完全相同，直接返回1.0
    if text1 == text2:
        return 1.0

    # 方法1: 使用difflib的SequenceMatcher（基于字符的相似度）
    char_similarity = difflib.SequenceMatcher(None, text1, text2).ratio()

    # 方法2: 基于词级的Jaccard相似度
    words1 = set(jieba.cut(text1))
    words2 = set(jieba.cut(text2))

    # 过滤单字
    words1 = {word for word in words1 if len(word) > 1}
    words2 = {word for word in words2 if len(word) > 1}

    if words1 and words2:
        intersection = len(words1.intersection(words2))
        union = len(words1.union(words2))
        word_similarity = intersection / union if union > 0 else 0.0
    else:
        word_similarity = 0.0

    # 方法3: 考虑词序的相似度（编辑距离归一化）
    max_len = max(len(text1), len(text2))
    edit_distance_similarity = 1 - (self.edit_distance(text1, text2) / max_len) if max_len > 0 else 0.0

    # 方法4: 考虑公共子串的相似度
    common_substring_similarity = self.longest_common_substring_ratio(text1, text2)

    # 综合多种方法，给字符级相似度更高权重
    # 因为对于报表名称，字符级别的相似性很重要
    final_similarity = (
            0.5 * char_similarity +
            0.3 * word_similarity +
            0.1 * edit_distance_similarity +
            0.1 * common_substring_similarity
    )

    return round(final_similarity, 4)


def edit_distance(self, s1, s2):
    """
    计算两个字符串的编辑距离
    """
    if len(s1) < len(s2):
        return self.edit_distance(s2, s1)

    if len(s2) == 0:
        return len(s1)

    previous_row = range(len(s2) + 1)
    for i, c1 in enumerate(s1):
        current_row = [i + 1]
        for j, c2 in enumerate(s2):
            insertions = previous_row[j + 1] + 1
            deletions = current_row[j] + 1
            substitutions = previous_row[j] + (c1 != c2)
            current_row.append(min(insertions, deletions, substitutions))
        previous_row = current_row

    return previous_row[-1]


def longest_common_substring_ratio(self, s1, s2):
    """
    计算最长公共子串比率
    """
    if not s1 or not s2:
        return 0.0

    # 创建二维数组
    m = [[0] * (1 + len(s2)) for _ in range(1 + len(s1))]
    longest, x_longest = 0, 0
    for x in range(1, 1 + len(s1)):
        for y in range(1, 1 + len(s2)):
            if s1[x - 1] == s2[y - 1]:
                m[x][y] = m[x - 1][y - 1] + 1
                if m[x][y] > longest:
                    longest = m[x][y]
                    x_longest = x
            else:
                m[x][y] = 0

    # 最长公共子串
    longest_substring = s1[x_longest - longest: x_longest]

    # 计算比率
    max_len = max(len(s1), len(s2))
    return len(longest_substring) / max_len if max_len > 0 else 0.0


def simple_chinese_similarity(text1, text2):
    """
    简化的中文相似度计算，主要基于字符级相似度
    """
    if pd.isna(text1) or pd.isna(text2) or text1 == "" or text2 == "":
        return 0.0

    text1 = str(text1).strip()
    text2 = str(text2).strip()

    if text1 == text2:
        return 1.0

    # 使用difflib的SequenceMatcher，它对中文字符串效果很好
    return round(difflib.SequenceMatcher(None, text1, text2).ratio(), 4)


def process_clustering_report(file_path, output_path=None, similarity_threshold=0.6):
    """
    处理聚类分析报告，计算中文文本相似度并添加审核标记
    """
    # 读取Excel文件
    try:
        df = pd.read_excel(file_path, sheet_name='聚类汇总')
        print(f"成功读取文件，共{len(df)}行数据")
    except Exception as e:
        print(f"读取文件失败: {e}")
        return None

    # 检查必要的列是否存在
    required_columns = ['中心报表', '报表名称']
    missing_columns = [col for col in required_columns if col not in df.columns]
    if missing_columns:
        print(f"缺少必要的列: {missing_columns}")
        return None

    # 计算相似度 - 使用简化的方法
    print("正在计算中文文本相似度...")
    similarities = []

    for idx, row in df.iterrows():
        if idx % 100 == 0 and idx > 0:  # 每100行显示进度
            print(f"已处理 {idx}/{len(df)} 行...")

        # 使用简化的相似度计算方法
        similarity = simple_chinese_similarity(row['中心报表'], row['报表名称'])
        similarities.append(similarity)

    # 添加相似度列
    df['相似度'] = similarities

    # 添加是否审核列
    df['是否审核'] = df['相似度'].apply(
        lambda x: '是' if x < similarity_threshold else '否'
    )

    # 统计信息
    need_review_count = (df['是否审核'] == '是').sum()
    similarity_stats = df['相似度'].describe()

    print(f"\n相似度统计:")
    print(f"平均相似度: {similarity_stats['mean']:.4f}")
    print(f"最小相似度: {similarity_stats['min']:.4f}")
    print(f"最大相似度: {similarity_stats['max']:.4f}")
    print(f"中位数相似度: {similarity_stats['50%']:.4f}")
    print(f"需要人工审核的数据行数: {need_review_count} ({need_review_count / len(df) * 100:.1f}%)")

    # 显示一些示例
    print("\n相似度计算示例:")
    sample_data = [
        ("困难企业名单", "困难企业"),
        ("党费核定报表", "党费核定表"),
        ("行政事业单位内部控制表", "行政事业单位内部控制报表"),
        ("政府财务报表", "政府财务表")
    ]

    for text1, text2 in sample_data:
        similarity = simple_chinese_similarity(text1, text2)
        print(f"'{text1}' vs '{text2}': {similarity}")

    # 保存结果
    if output_path is None:
        file_name_parts = file_path.split('.')
        output_path = file_name_parts[0] + '_processed.' + file_name_parts[1]

    try:
        # 尝试使用openpyxl保存
        with pd.ExcelWriter(output_path, engine='openpyxl') as writer:
            df.to_excel(writer, sheet_name='聚类汇总', index=False)
        print(f"\n处理完成！结果已保存到: {output_path}")
    except Exception as e:
        print(f"使用openpyxl保存失败: {e}，尝试使用xlsxwriter...")
        try:
            with pd.ExcelWriter(output_path, engine='xlsxwriter') as writer:
                df.to_excel(writer, sheet_name='聚类汇总', index=False)
            print(f"\n处理完成！结果已保存到: {output_path}")
        except Exception as e2:
            print(f"保存文件失败: {e2}")
            # 保存为CSV作为备选
            csv_path = output_path.replace('.xlsx', '.csv')
            df.to_csv(csv_path, index=False, encoding='utf-8-sig')
            print(f"已保存为CSV文件: {csv_path}")

    return df


# 使用示例
if __name__ == "__main__":
    # 文件路径 - 请修改为你的实际文件路径
    input_file = "C:/Users/xingwenzheng/Desktop/代码测试集/包含数据项/聚类分析报告.xlsx"  # 输入文件路径
    output_file = "C:/Users/xingwenzheng/Desktop/代码测试集/包含数据项/聚类分析报告_处理结果.xlsx"  # 输出文件路径
    threshold = 0.6  # 相似度阈值，可调整

    # 处理数据
    result_df = process_clustering_report(input_file, output_file, threshold)

    if result_df is not None:
        # 显示前几行结果
        print("\n前5行处理结果:")
        display_columns = ['中心报表', '报表名称', '相似度', '是否审核']
        available_columns = [col for col in display_columns if col in result_df.columns]
        print(result_df[available_columns].head())

        # 显示需要审核的数据
        need_review = result_df[result_df['是否审核'] == '是']
        if len(need_review) > 0:
            print(f"\n需要人工审核的数据 (前10行，共{len(need_review)}行):")
            print(need_review[['中心报表', '报表名称', '相似度']].head(20))
        else:
            print("\n所有数据相似度均达到阈值，无需人工审核")
# 使用示例
# if __name__ == "__main__":
#     # 文件路径 - 请修改为你的实际文件路径
#     input_file = "C:/Users/xingwenzheng/Desktop/代码测试集/包含数据项/聚类分析报告.xlsx"  # 输入文件路径
#     output_file = "C:/Users/xingwenzheng/Desktop/代码测试集/包含数据项/聚类分析报告_处理结果.xlsx"  # 输出文件路径
#
#     # 处理数据
#     result_df = process_clustering_report(input_file, output_file)
#
#     if result_df is not None:
#         # 显示前几行结果
#         print("\n前5行处理结果:")
#         print(result_df[['中心报表', '报表名称', '相似度', '是否审核']].head())
#
#         # 显示需要审核的数据
#         need_review = result_df[result_df['是否审核'] == '是']
#         if len(need_review) > 0:
#             print(f"\n需要人工审核的数据 (共{len(need_review)}行):")
#             print(need_review[['中心报表', '报表名称', '相似度']].head(10))
#
