# demo1（demo2.1）------demo3
# 优化合理性判断，使最终分析聚类合理性更准确
import pandas as pd
import numpy as np
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import jieba
import jieba.posseg as pseg
from tqdm import tqdm
import warnings
import re
from rapidfuzz import fuzz
from collections import Counter
import uuid

warnings.filterwarnings('ignore')


class EnhancedSimilarityCalculator:
    def __init__(self):
        # 加载否定词和反义词词典
        self.negation_words = {'不', '没', '未', '无', '非', '否', '莫', '勿'}
        self.antonym_pairs = {
            '已': '未', '未': '已',
            '有': '无', '无': '有',
            '是': '非', '非': '是',
            '入': '出', '出': '入',
            '上': '下', '下': '上',
            '增加': '减少', '减少': '增加',
            '收入': '支出', '支出': '收入',
            '盈利': '亏损', '亏损': '盈利',
            '借方': '贷方', '贷方': '借方'
        }

    def calculate_enhanced_similarity(self, text1, text2, semantic_model):
        """增强的相似度计算，特别处理否定和反义情况"""
        if not text1 or not text2:
            return {'final_similarity': 0.0, 'base_similarity': 0.0,
                    'negation_penalty': 0.0, 'antonym_penalty': 0.0, 'key_diff_penalty': 0.0}

        # 1. 基础语义相似度
        base_similarity = self.calculate_semantic_similarity(text1, text2, semantic_model)

        # 2. 检测语义对立
        if self.detect_semantic_opposition(text1, text2):
            return {'final_similarity': 0.0, 'base_similarity': base_similarity,
                    'negation_penalty': 1.0, 'antonym_penalty': 1.0, 'key_diff_penalty': 1.0}

        # 3. 否定词检测
        negation_penalty = self.detect_negation_mismatch(text1, text2)

        # 4. 反义词检测
        antonym_penalty = self.detect_antonym_mismatch(text1, text2)

        # 5. 关键差异检测
        key_diff_penalty =  self.detect_key_differences(text1, text2)

        # 综合相似度 = 基础相似度 - 各种惩罚
        total_penalty = max(negation_penalty, antonym_penalty, key_diff_penalty)
        final_similarity = base_similarity * (1 - total_penalty)

        return {
            'final_similarity': final_similarity,
            'base_similarity': base_similarity,
            'negation_penalty': negation_penalty,
            'antonym_penalty': antonym_penalty,
            'key_diff_penalty': key_diff_penalty
        }

    def detect_semantic_opposition(self, text1, text2):
        """专门检测语义对立的情况 - 修复版"""
        # 使用更简单直接的方法检测语义对立
        opposition_pairs = [
            ('未', '已'),
            ('无', '有'),
            ('增加', '减少'),
            ('收入', '支出'),
            ('盈利', '亏损'),
            ('借方', '贷方'),
        ]

        for word1, word2 in opposition_pairs:
            # 检查是否一个文本包含word1，另一个包含word2
            if word1 in text1 and word2 in text2:
                # 检查剩余部分是否相似
                remainder1 = text1.replace(word1, '')
                remainder2 = text2.replace(word2, '')
                if self.is_similar_remainder(remainder1, remainder2):
                    return True

            # 反过来也检查
            if word2 in text1 and word1 in text2:
                remainder1 = text1.replace(word2, '')
                remainder2 = text2.replace(word1, '')
                if self.is_similar_remainder(remainder1, remainder2):
                    return True

        # 特殊处理否定词"不"
        if '不' in text1 and '不' not in text2:
            # 检查是否text1是text2的否定形式
            remainder1 = text1.replace('不', '')
            if self.is_similar_remainder(remainder1, text2):
                return True

        if '不' in text2 and '不' not in text1:
            # 检查是否text2是text1的否定形式
            remainder2 = text2.replace('不', '')
            if self.is_similar_remainder(text1, remainder2):
                return True

        return False

    def is_similar_remainder(self, text1, text2):
        """检查两个文本的剩余部分是否相似"""
        if not text1 or not text2:
            return False

        # 简单的相似度检查 - 使用编辑距离
        similarity = fuzz.ratio(text1, text2) / 100.0
        return similarity > 0.7  # 如果剩余部分相似度超过70%，认为是相似的
        # 阈值选择的合理性分析
        # SIMILARITY_THRESHOLDS = {
        #     0.9: "过于严格，可能漏判一些真正的对立关系",
        #     0.8: "比较严格，适合高精度场景",
        #     0.7: "平衡点：既能捕获对立关系，又避免误判",  # ⭐ 选择这个值
        #     0.6: "比较宽松，可能产生误判",
        #     0.5: "过于宽松，误判率较高"
        # }

    def detect_negation_mismatch(self, text1, text2):
        """检测否定词不匹配的情况"""
        words1 = set(jieba.cut(text1))
        words2 = set(jieba.cut(text2))

        negation1 = words1 & self.negation_words
        negation2 = words2 & self.negation_words

        # 如果一个有否定词，一个没有，给予惩罚
        if (len(negation1) > 0 and len(negation2) == 0) or (len(negation1) == 0 and len(negation2) > 0):
            return 0.8  # 80%的惩罚

        return 0.0

    def detect_antonym_mismatch(self, text1, text2):
        """检测反义词不匹配的情况"""
        words1 = list(jieba.cut(text1))
        words2 = list(jieba.cut(text2))

        penalty = 0.0
        for i, word1 in enumerate(words1):
            if word1 in self.antonym_pairs:
                antonym = self.antonym_pairs[word1]
                # 检查相同位置或附近是否有反义词
                for j, word2 in enumerate(words2):
                    if word2 == antonym and abs(i - j) <= 2:  # 在附近位置找到反义词
                        penalty = max(penalty, 0.9)  # 90%的惩罚
                        break

        return penalty

    def detect_key_differences(self, text1, text2):
        """检测关键差异"""
        # 提取名词和动词作为关键词
        keywords1 = self.extract_keywords(text1)
        keywords2 = self.extract_keywords(text2)

        # 计算关键词的差异
        common_keywords = keywords1 & keywords2
        all_keywords = keywords1 | keywords2

        if len(all_keywords) == 0:
            return 0.0

        diff_ratio = 1 - len(common_keywords) / len(all_keywords)

        # 如果差异很大，给予惩罚
        if diff_ratio > 0.3:
            return min(diff_ratio, 0.7)  # 最多70%的惩罚

        return 0.0

    # def extract_keywords(self, text):
    #     """提取关键词（名词和动词）"""
    #     try:
    #         words = pseg.cut(text)
    #         keywords = set()
    #         for word, flag in words:
    #             if flag.startswith('n') or flag.startswith('v'):  # 名词或动词
    #                 keywords.add(word)
    #         return keywords
    #     except Exception as e:
    #         # 如果分词出错，返回空集合
    #         print(f"分词出错: {e}")
    #         return set()

    def extract_keywords(self, text):
        """提取关键词（名词和动词）- 更健壮的版本"""
        try:
            if not text or not isinstance(text, str) or text.strip() == "":
                return set()

            # 预处理文本，移除可能引起问题的字符
            clean_text = re.sub(r'[^\w\u4e00-\u9fff]', ' ', text)
            clean_text = re.sub(r'\s+', ' ', clean_text).strip()

            if not clean_text:
                return set()

            words = pseg.cut(clean_text)
            keywords = set()

            for segment in words:
                # 多种方式安全获取词和词性
                word = None
                flag = None

                # 方式1：尝试获取属性
                if hasattr(segment, 'word') and hasattr(segment, 'flag'):
                    word = segment.word
                    flag = segment.flag
                # 方式2：尝试解包
                elif isinstance(segment, (tuple, list)) and len(segment) == 2:
                    word, flag = segment
                # 方式3：如果是字符串，直接使用
                elif isinstance(segment, str):
                    word = segment
                    flag = 'x'  # 未知词性
                else:
                    continue

                # 验证获取到的值
                if word and flag and isinstance(word, str) and isinstance(flag, str):
                    if flag.startswith('n') or flag.startswith('v'):
                        keywords.add(word)

            return keywords

        except Exception as e:
            print(f"分词出错: {e}, 文本: '{text}'")
            return set()


    def calculate_semantic_similarity(self, text1, text2, semantic_model):
        """计算基础语义相似度"""
        embeddings = semantic_model.encode([text1, text2])
        similarity = cosine_similarity([embeddings[0]], [embeddings[1]])[0][0]
        return similarity


class OptimizedReportDeduplicator:
    def __init__(self, model_path):
        """初始化模型"""
        print("正在加载模型...")
        model_path = 'C:/Users/xingwenzheng/models/sentence-transformer'
        self.model = SentenceTransformer(model_path)
        self.similarity_calculator = EnhancedSimilarityCalculator()

        # 定义不参与清洗的词汇列表（白名单）
        self.whitelist_words = [
            '社区', '街镇', '镇园区', '园区', '街（镇）', '乡镇', 'XX街道', '行管区', '镇街道', '乡镇街道', '小区',
            '片区'
            # 可以根据需要继续添加更多白名单词汇
        ]

        print("模型加载完成!")

    def preprocess_text(self, text):
        """文本预处理"""
        if pd.isna(text):
            return ""
        text = str(text).strip()
        text = re.sub(r'\s+', ' ', text)
        return text

    def clean_report_name(self, text):
        """清洗报表名称，去除地址和时间信息，但保留白名单词汇"""
        if pd.isna(text) or not text:
            return ""

        text = str(text).strip()

        # 定义要去除的模式
        patterns_to_remove = [
            # 地址相关
            #r'[^，。、；：(\d\s]+(?:市|区|县|州|盟|旗|镇|街道|街道级)',{2,4}    (?![\u4e00-\u9fa5])
            r'[^，。、；：（）()\d\s]+(?:市|区|县|镇|街道|街道级)',
            r'安徽省|包河区|合肥市|宣州区|砀山县|庐阳区|蜀山区|瑶海区|长丰县|肥东县|肥西县|巢湖市|庐江县',
            r'淮北市|亳州市|宿州市|蚌埠市|阜阳市|淮南市|滁州市|六安市|马鞍山市|芜湖市|宣城市|铜陵市|池州市|安庆市|黄山市',
            r'埇桥区|周寨|望东|宁国市|芜湖路|以来|镇西村|望江东路|沈福村|沱河街道',
            #r'\d+省|\d+市|\d+区|\d+县|\d+镇',

            # 时间相关
            r'\d{4}年度|\d{2}年度',
            r'\d{4}年|\d{2}年',
            r'2023|2024|2025',
            r'\d{4}月份|\d{1,2}月份',
            r'\d{4}月|\d{1,2}月',
            r'\d{4}-\d{1,2}-\d{1,2}|\d{4}/\d{1,2}/\d{1,2}',
            r'第.季度|第.期',
            r'[一二三四1-4]季度'

            # 其他常见无关词
            # r'关于|有关|进一步|加强|开展|做好|通知|报告|报表|表格'
        ]

        # 应用所有模式
        cleaned_text = text
        for pattern in patterns_to_remove:
            # 使用正则表达式替换，但排除白名单词汇
            cleaned_text = re.sub(pattern, '', cleaned_text)

        # 去除多余空格和标点
        cleaned_text = re.sub(r'\s+', ' ', cleaned_text)
        cleaned_text = re.sub(r'^[，。、；：]|[，。、；：]$', '', cleaned_text)
        cleaned_text = cleaned_text.strip()

        # 如果清洗后为空，返回原始文本
        if not cleaned_text:
            return text

        return cleaned_text

    def clean_report_name_advanced(self, text):
        """高级清洗报表名称，使用更智能的方法保留重要词汇"""
        if pd.isna(text) or not text:
            return ""

        text = str(text).strip()

        # 先标记白名单词汇，防止被误删
        protected_text = text
        placeholder_map = {}

        # 为每个白名单词汇创建唯一占位符
        for i, word in enumerate(self.whitelist_words):
            if word in protected_text:
                placeholder = f"PROTECTED_{uuid.uuid4().hex[:8]}_PLACEHOLDER"
                placeholder_map[placeholder] = word
                protected_text = protected_text.replace(word, placeholder)

        # 定义要去除的模式
        patterns_to_remove = [
            # 地址相关
            #r'[^，。、；：(\d\s]+(?:市|区|县|州|盟|旗|镇|街道|街道级)',(?![\u4e00-\u9fa5])
            r'[^，。、；：（）()\d\s]+(?:市|区|县|镇|街道|街道级)',
            r'安徽省|包河区|合肥市|宣州区|砀山县|庐阳区|蜀山区|瑶海区|长丰县|肥东县|肥西县|巢湖市|庐江县',
            r'淮北市|亳州市|宿州市|蚌埠市|阜阳市|淮南市|滁州市|六安市|马鞍山市|芜湖市|宣城市|铜陵市|池州市|安庆市|黄山市',
            r'埇桥区|周寨|望东|宁国市|芜湖路|以来|镇西村|望江东路|沈福村',
            #r'\d+省|\d+市|\d+区|\d+县|\d+镇',

            # 时间相关
            r'\d{4}年度|\d{2}年度',
            r'\d{4}年|\d{2}年',
            r'2023|2024|2025',
            r'\d{4}月份|\d{1,2}月份',
            r'\d{4}月|\d{1,2}月',
            r'\d{4}-\d{1,2}-\d{1,2}|\d{4}/\d{1,2}/\d{1,2}',
            r'第.季度|第.期',
            r'[一二三四1-4]季度'

            # 其他常见无关词
            # r'关于|有关|进一步|加强|开展|做好|通知|报告|报表|表格'
        ]

        # 应用所有模式到受保护的文本
        cleaned_text = protected_text
        # 新增的保护检查逻辑：
        for pattern in patterns_to_remove:
            # 确保占位符不会被误删
            temp_cleaned = re.sub(pattern, '', cleaned_text)

            # 检查是否有占位符被误删
            for placeholder in placeholder_map.keys():
                if placeholder in cleaned_text and placeholder not in temp_cleaned:
                    # 如果占位符被误删，恢复原始文本
                    temp_cleaned = cleaned_text
                    break

            cleaned_text = temp_cleaned

        # 恢复白名单词汇
        for placeholder, original_word in placeholder_map.items():
            cleaned_text = cleaned_text.replace(placeholder, original_word)

        # 去除多余空格和标点
        cleaned_text = re.sub(r'\s+', ' ', cleaned_text)
        cleaned_text = re.sub(r'^[，。、；：]|[，。、；：]$', '', cleaned_text)
        cleaned_text = cleaned_text.strip()

        # 如果清洗后为空，返回原始文本
        if not cleaned_text:
            return text

        return cleaned_text

    def calculate_semantic_similarity_batch(self, texts1, texts2):
        """批量计算语义相似度"""
        embeddings1 = self.model.encode(texts1)
        embeddings2 = self.model.encode(texts2)
        similarities = []
        for i in range(len(texts1)):
            similarity = cosine_similarity([embeddings1[i]], [embeddings2[i]])[0][0]
            similarities.append(similarity)
        return similarities

    def calculate_edit_similarity(self, text1, text2):
        """使用编辑距离计算相似度"""
        if not text1 or not text2:
            return 0.0
        return fuzz.ratio(text1, text2) / 100.0

    def cluster_by_name_only(self, reports_data, similarity_threshold=0.7):
        """方法1: 仅使用报表名称进行聚类"""
        print("使用方法1: 仅使用报表名称进行聚类")

        # 使用清洗后的名称进行聚类
        name_texts = [item['cleaned_name'] for item in reports_data]
        name_embeddings = self.model.encode(name_texts)

        clusters = []
        assigned_indices = set()

        pbar = tqdm(total=len(reports_data), desc="聚类进度")

        for i in range(len(reports_data)):
            if i in assigned_indices:
                pbar.update(1)
                continue

            current_cluster = [i]
            assigned_indices.add(i)

            for j in range(i + 1, len(reports_data)):
                if j in assigned_indices:
                    continue

                # 计算名称相似度
                similarity = cosine_similarity([name_embeddings[i]], [name_embeddings[j]])[0][0]

                if similarity >= similarity_threshold:
                    current_cluster.append(j)
                    assigned_indices.add(j)

            clusters.append(current_cluster)
            pbar.update(1)

        pbar.close()
        return clusters

    def cluster_by_weighted_similarity(self, reports_data, similarity_threshold=0.7, weights=(0.6, 0.4)):
        """方法2: 使用报表名称和数据项相似度加权后聚类"""
        print("使用方法2: 使用报表名称和数据项相似度加权后聚类")

        # 使用清洗后的名称进行聚类
        name_texts = [item['cleaned_name'] for item in reports_data]
        data_item_texts = [item['data_item'] for item in reports_data]

        name_embeddings = self.model.encode(name_texts)
        data_item_embeddings = self.model.encode(data_item_texts)

        clusters = []
        assigned_indices = set()

        pbar = tqdm(total=len(reports_data), desc="聚类进度")

        for i in range(len(reports_data)):
            if i in assigned_indices:
                pbar.update(1)
                continue

            current_cluster = [i]
            assigned_indices.add(i)

            for j in range(i + 1, len(reports_data)):
                if j in assigned_indices:
                    continue

                # 计算名称相似度
                name_similarity = cosine_similarity([name_embeddings[i]], [name_embeddings[j]])[0][0]

                # 计算数据项相似度
                data_item_similarity = cosine_similarity([data_item_embeddings[i]], [data_item_embeddings[j]])[0][0]

                # 加权相似度
                weighted_similarity = weights[0] * name_similarity + weights[1] * data_item_similarity

                if weighted_similarity >= similarity_threshold:
                    current_cluster.append(j)
                    assigned_indices.add(j)

            clusters.append(current_cluster)
            pbar.update(1)

        pbar.close()
        return clusters

    def cluster_by_combined_text(self, reports_data, similarity_threshold=0.7):
        """方法3: 使用报表名称和数据项的合并文本计算相似度后聚类"""
        print("使用方法3: 使用报表名称和数据项的合并文本计算相似度")

        # 使用清洗后的名称进行聚类
        combined_texts = [f"{item['cleaned_name']} {item['data_item']}" for item in reports_data]

        # 预计算合并文本的嵌入向量
        combined_embeddings = self.model.encode(combined_texts)

        clusters = []
        assigned_indices = set()

        pbar = tqdm(total=len(reports_data), desc="聚类进度")

        for i in range(len(reports_data)):
            if i in assigned_indices:
                pbar.update(1)
                continue

            current_cluster = [i]
            assigned_indices.add(i)

            for j in range(i + 1, len(reports_data)):
                if j in assigned_indices:
                    continue

                # 计算合并文本相似度
                similarity = cosine_similarity([combined_embeddings[i]], [combined_embeddings[j]])[0][0]

                if similarity >= similarity_threshold:
                    current_cluster.append(j)
                    assigned_indices.add(j)

            clusters.append(current_cluster)
            pbar.update(1)

        pbar.close()
        return clusters

    def get_cluster_representative(self, cluster_data):
        """获取聚类的代表性名称和数据项"""
        if not cluster_data:
            return {"name": "", "data_item": ""}

        # 选择最长的原始报表名称作为代表
        representative_name = max([item['name'] for item in cluster_data], key=len)

        # 获取该代表名称对应的数据项
        representative_data_item = ""
        for item in cluster_data:
            if item['name'] == representative_name:
                representative_data_item = item['data_item']
                break

        # 如果没有找到匹配的数据项，则使用第一个
        if not representative_data_item and cluster_data:
            representative_data_item = cluster_data[0]['data_item']

        return {
            "name": representative_name,
            "data_item": representative_data_item
        }


    def process_department(self, department_data, department_name,
                           method='name_only', similarity_threshold=0.7,
                           weights=(0.6, 0.4), use_advanced_cleaning=True):
        """处理单个部门的数据"""
        print(f"\n正在处理部门: {department_name}")

        # 准备数据 - 现在包含清洗后的名称
        reports_data = []
        for _, row in department_data.iterrows():
            original_name = self.preprocess_text(row['报表名称'])

            # 根据参数选择使用哪种清洗方法
            if use_advanced_cleaning:
                cleaned_name = self.clean_report_name_advanced(original_name)
            else:
                cleaned_name = self.clean_report_name(original_name)

            reports_data.append({
                'name': original_name,  # 原始名称
                'cleaned_name': cleaned_name,  # 清洗后的名称
                'data_item': self.preprocess_text(row['数据项']),
                'original_row': row
            })

        if len(reports_data) <= 1:
            # 如果只有一个或没有报表，直接返回
            result_data = []
            for item in reports_data:
                new_row = item['original_row'].to_dict()
                new_row.update({
                    '聚类标签': 0,
                    '聚类代表名称': item['name'],
                    '聚类代表数据项': item['data_item'],
                    '清洗后报表名称': item['cleaned_name'],
                    '基础相似度': 1.0,
                    '最终相似度': 1.0,
                    '否定词惩罚': 0.0,
                    '反义词惩罚': 0.0,
                    '关键差异惩罚': 0.0,
                    '是否合理': '合理',
                    '部门名称': department_name,
                    '使用方法': method
                })
                result_data.append(new_row)
            return pd.DataFrame(result_data)

        # 根据选择的方法进行聚类（使用清洗后的名称）
        if method == 'name_only':
            clusters = self.cluster_by_name_only(reports_data, similarity_threshold)
        elif method == 'weighted':
            clusters = self.cluster_by_weighted_similarity(reports_data, similarity_threshold, weights)
        elif method == 'combined':
            clusters = self.cluster_by_combined_text(reports_data, similarity_threshold)
        else:
            # 默认使用名称聚类
            clusters = self.cluster_by_name_only(reports_data, similarity_threshold)

        # 为每个聚类分配标签和代表信息
        cluster_labels = [-1] * len(reports_data)
        cluster_representatives = {}

        for cluster_id, cluster_indices in enumerate(clusters):
            cluster_items = [reports_data[i] for i in cluster_indices]
            representative = self.get_cluster_representative(cluster_items)

            # 对聚类代表名称也进行清洗
            if use_advanced_cleaning:
                cleaned_representative_name = self.clean_report_name_advanced(representative['name'])
            else:
                cleaned_representative_name = self.clean_report_name(representative['name'])

            cluster_representatives[cluster_id] = {
                'name': representative['name'],
                'data_item': representative['data_item'],
                'cleaned_name': cleaned_representative_name  # 存储清洗后的代表名称
            }

            for idx in cluster_indices:
                cluster_labels[idx] = cluster_id

        # 处理未聚类的点（作为独立聚类）
        for idx in range(len(reports_data)):
            if cluster_labels[idx] == -1:
                cluster_id = len(cluster_representatives)
                representative_name = reports_data[idx]['name']

                # 对代表名称也进行清洗
                if use_advanced_cleaning:
                    cleaned_representative_name = self.clean_report_name_advanced(representative_name)
                else:
                    cleaned_representative_name = self.clean_report_name(representative_name)

                cluster_representatives[cluster_id] = {
                    'name': representative_name,
                    'data_item': reports_data[idx]['data_item'],
                    'cleaned_name': cleaned_representative_name
                }
                cluster_labels[idx] = cluster_id

        # 计算所有清洗后名称与清洗后代表名称的相似度（使用增强相似度计算）
        print("正在计算最终相似度...")
        similarity_results = []
        for idx in range(len(reports_data)):
            # 使用清洗后的名称进行相似度计算
            cleaned_name = reports_data[idx]['cleaned_name']
            representative_cleaned_name = cluster_representatives[cluster_labels[idx]]['cleaned_name']

            # 使用增强相似度计算
            try:
                similarity_result = self.similarity_calculator.calculate_enhanced_similarity(
                    cleaned_name, representative_cleaned_name, self.model
                )
            except Exception as e:
                # 如果出现错误，使用基础相似度
                print(f"计算相似度时出错: {e}, 文本1: '{cleaned_name}', 文本2: '{representative_cleaned_name}'")
                base_similarity = self.calculate_semantic_similarity_batch(
                    [cleaned_name], [representative_cleaned_name]
                )[0]
                similarity_result = {
                    'final_similarity': base_similarity,
                    'base_similarity': base_similarity,
                    'negation_penalty': 0.0,
                    'antonym_penalty': 0.0,
                    'key_diff_penalty': 0.0
                }

            similarity_results.append(similarity_result)

        # 生成结果
        result_data = []
        for idx, item in enumerate(reports_data):
            cluster_id = cluster_labels[idx]
            representative = cluster_representatives[cluster_id]
            similarity_result = similarity_results[idx]

            # 获取相似度
            final_similarity = similarity_result['final_similarity']
            is_reasonable = '合理' if final_similarity >= 0.9 else '不合理'

            # 创建新行
            new_row = item['original_row'].to_dict()
            new_row.update({
                '聚类标签': cluster_id,
                '聚类代表名称': representative['name'],
                '聚类代表数据项': representative['data_item'],
                '清洗后报表名称': item['cleaned_name'],
                '聚类代表清洗后名称': representative['cleaned_name'],  # 新增：聚类代表清洗后的名称
                '基础相似度': round(similarity_result['base_similarity'], 4),
                '最终相似度': round(similarity_result['final_similarity'], 4),
                '否定词惩罚': round(similarity_result['negation_penalty'], 4),
                '反义词惩罚': round(similarity_result['antonym_penalty'], 4),
                '关键差异惩罚': round(similarity_result['key_diff_penalty'], 4),
                '是否合理': is_reasonable,
                '部门名称': department_name,
                '使用方法': method
            })
            result_data.append(new_row)

        return pd.DataFrame(result_data)


def main():
    # 初始化去重器
    deduplicator = OptimizedReportDeduplicator('paraphrase-multilingual-MiniLM-L12-v2')

    # 读取测试数据
    print("正在读取Excel文件...")
    try:
        df = pd.read_excel('C:/Users/xingwenzheng/Desktop/代码测试集/全量数据/报表统计汇总表-0723_匹配结果_匹配结果.xlsx',
                           sheet_name='汇总台账')
        # C:/Users/xingwenzheng/Desktop/代码测试集/整合数据全/测试数据908_匹配结果_匹配结果.xlsx
        # C:/Users/xingwenzheng/Desktop/代码测试集/全量数据/报表统计汇总表-0723_匹配结果_匹配结果.xlsx
        print(f"成功读取数据，共 {len(df)} 行")
    except Exception as e:
        print(f"读取文件失败: {e}")
        return

    # 检查必要的列是否存在
    required_columns = ['部门名称', '报表名称', '数据项']
    missing_columns = [col for col in required_columns if col not in df.columns]
    if missing_columns:
        print(f"缺少必要的列: {missing_columns}")
        return

    # 配置参数 - 在这里选择聚类方法
    clustering_method = 'weighted'  # 可选: 'name_only', 'weighted', 'combined'
    similarity_threshold = 0.85  # 聚类相似度阈值，超过0.9才会被聚到一类
    weights = (0.8, 0.2)  # 加权方法中的权重 (名称权重, 数据项权重)
    use_advanced_cleaning = True  # 是否使用高级清洗方法

    # 方法说明
    method_descriptions = {
        'name_only': '仅使用报表名称进行聚类',
        'weighted': '使用报表名称和数据项相似度加权后聚类',
        'combined': '使用报表名称和数据项的合并文本计算相似度后聚类'
    }

    print(f"\n选择的聚类方法: {clustering_method} - {method_descriptions[clustering_method]}")
    if clustering_method == 'weighted':
        print(f"权重设置: 名称权重={weights[0]}, 数据项权重={weights[1]}")
    print(f"使用高级清洗方法: {use_advanced_cleaning}")

    # 按部门分组处理
    all_results = []
    departments = df['部门名称'].unique()

    print(f"\n开始处理 {len(departments)} 个部门...")

    for dept in departments:
        dept_data = df[df['部门名称'] == dept].reset_index(drop=True)
        dept_result = deduplicator.process_department(
            dept_data, dept, clustering_method, similarity_threshold, weights, use_advanced_cleaning
        )
        all_results.append(dept_result)

    # 合并所有结果
    final_result = pd.concat(all_results, ignore_index=True)

    # 保存结果
    output_file = f'C:/Users/xingwenzheng/Desktop/代码测试集/全量数据/聚类报告_匹配结果_{clustering_method}0.xlsx'
    # f'C:/Users/xingwenzheng/Desktop/代码测试集/整合数据全/聚类报告_匹配结果_{clustering_method}.xlsx'
    # f'C:/Users/xingwenzheng/Desktop/代码测试集/全量数据/聚类报告_匹配结果_{clustering_method}.xlsx'
    print(f"\n正在保存结果到 {output_file}...")

    with pd.ExcelWriter(output_file, engine='openpyxl') as writer:
        final_result.to_excel(writer, sheet_name='聚类结果', index=False)

        # 添加统计信息
        stats_data = {
            '统计项': [
                '总数据条数', '部门数量', '聚类数量',
                '合理聚类数', '不合理聚类数',
                    '平均基础相似度', '平均最终相似度', '聚类方法'
            ],
            '数值': [
                len(final_result),
                len(departments),
                final_result['聚类标签'].nunique(),
                len(final_result[final_result['是否合理'] == '合理']),
                len(final_result[final_result['是否合理'] == '不合理']),
                round(final_result['基础相似度'].mean(), 4),
                round(final_result['最终相似度'].mean(), 4),
                clustering_method
            ]
        }
        stats_df = pd.DataFrame(stats_data)
        stats_df.to_excel(writer, sheet_name='统计信息', index=False)

        # 添加聚类详情
        cluster_details = []
        for cluster_id in final_result['聚类标签'].unique():
            cluster_data = final_result[final_result['聚类标签'] == cluster_id]
            representative_name = cluster_data.iloc[0]['聚类代表名称']
            representative_data_item = cluster_data.iloc[0]['聚类代表数据项']

            cluster_details.append({
                '聚类ID': cluster_id,
                '聚类大小': len(cluster_data),
                '代表名称': representative_name,
                '代表数据项': representative_data_item,
                '部门': cluster_data.iloc[0]['部门名称'],
                '平均基础相似度': round(cluster_data['基础相似度'].mean(), 4),
                '平均最终相似度': round(cluster_data['最终相似度'].mean(), 4),
                '合理性': '全部合理' if all(cluster_data['是否合理'] == '合理') else '存在不合理'
            })

        cluster_details_df = pd.DataFrame(cluster_details)
        cluster_details_df.to_excel(writer, sheet_name='聚类详情', index=False)

    print("处理完成!")

    # 打印简要统计
    print(f"\n=== 处理结果统计 ===")
    print(f"总数据条数: {len(final_result)}")
    print(f"部门数量: {len(departments)}")
    print(f"聚类数量: {final_result['聚类标签'].nunique()}")
    print(f"合理聚类: {len(final_result[final_result['是否合理'] == '合理'])}")
    print(f"不合理聚类: {len(final_result[final_result['是否合理'] == '不合理'])}")
    print(f"平均基础相似度: {round(final_result['基础相似度'].mean(), 4)}")
    print(f"平均最终相似度: {round(final_result['最终相似度'].mean(), 4)}")
    print(f"使用方法: {clustering_method}")


if __name__ == "__main__":
    main()


