"""1. 文本嵌入
使用sentence-transformers库的多语言预训练模型
将中文文本转换为512维的语义向量

2. 聚类算法
使用DBSCAN聚类，基于余弦相似度
自动发现语义相似的文本组

3. 可视化功能
可选生成聚类可视化图表
帮助理解机器学习发现的关系

4. 回退机制
如果机器学习方法失败，自动回退到传统方法
确保程序的稳定性"""

import pandas as pd
import numpy as np
from rapidfuzz import process, fuzz
import re
from collections import defaultdict
import argparse
import os
from openpyxl import load_workbook
import warnings

warnings.filterwarnings('ignore')

# 机器学习相关库
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import DBSCAN, KMeans
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import cosine_similarity
import umap
import matplotlib.pyplot as plt
import seaborn as sns

# 尝试导入sentence-transformers，如果失败则使用备选方案
try:
    from sentence_transformers import SentenceTransformer

    SENTENCE_TRANSFORMERS_AVAILABLE = True
except ImportError:
    print("sentence-transformers 库未安装，将使用TF-IDF作为替代")
    SENTENCE_TRANSFORMERS_AVAILABLE = False


class MLDataStandardizer:
    def __init__(self, file_path, sheet_name, similarity_threshold=80, use_ml=True):
        """
        初始化数据标准化器（含机器学习功能）

        参数:
            file_path: Excel文件路径
            sheet_name: 要读取的工作表名称
            similarity_threshold: 相似度阈值(0-100)
            use_ml: 是否使用机器学习方法
        """
        self.file_path = file_path
        self.sheet_name = sheet_name
        self.similarity_threshold = similarity_threshold
        self.use_ml = use_ml and SENTENCE_TRANSFORMERS_AVAILABLE
        self.df = None
        self.model = None
        self.load_data()

        # 如果使用机器学习，尝试加载预训练模型
        if self.use_ml:
            print("尝试加载预训练语义模型...")
            try:
                # 尝试多个可能的模型
                model_names = [
                    'C:/Users/xingwenzheng/models/sentence-transformer'
                    # 'paraphrase-multilingual-MiniLM-L12-v2',
                    # 'distiluse-base-multilingual-cased-v1',
                    # 'paraphrase-multilingual-mpnet-base-v2'
                ]

                for model_name in model_names:
                    try:
                        self.model = SentenceTransformer(model_name)
                        print(f"成功加载模型: {model_name}")
                        break
                    except Exception as e:
                        print(f"加载模型 {model_name} 失败: {e}")

                # 如果所有模型都失败，回退到TF-IDF
                if self.model is None:
                    print("所有预训练模型加载失败，将使用TF-IDF")
                    self.use_ml = False

            except Exception as e:
                print(f"模型加载失败，将回退到传统方法: {e}")
                self.use_ml = False

    def load_data(self):
        """加载Excel数据"""
        try:
            self.df = pd.read_excel(self.file_path, sheet_name=self.sheet_name)
            print(f"成功加载数据，共 {len(self.df)} 行")
            print("列名:", self.df.columns.tolist())

            # 检查必要的列是否存在
            required_columns = ['部门名称', '报表名称', '数据项']
            for col in required_columns:
                if col not in self.df.columns:
                    raise ValueError(f"缺少必要列: {col}")

        except Exception as e:
            print(f"加载数据失败: {e}")
            raise

    def preprocess_text(self, text):
        """文本预处理"""
        if pd.isna(text):
            return ""

        text = str(text).strip()

        # 转换全角字符到半角
        full_to_half_map = {
            '，': ',', '：': ':', '；': ';',
            '！': '!', '？': '?', '（': '(',
            '）': ')', '【': '[', '】': ']'
        }
        for full, half in full_to_half_map.items():
            text = text.replace(full, half)

        return text

    def apply_preprocessing(self):
        """应用数据预处理"""
        print("正在进行数据预处理...")
        self.df['部门名称_清洗'] = self.df['部门名称'].apply(self.preprocess_text)
        self.df['报表名称_清洗'] = self.df['报表名称'].apply(self.preprocess_text)
        self.df['数据项_清洗'] = self.df['数据项'].apply(self.preprocess_text)
        print("数据预处理完成")

    def get_text_embeddings(self, texts):
        """
        获取文本的语义嵌入向量

        参数:
            texts: 文本列表

        返回:
            嵌入向量矩阵
        """
        clean_texts = [f"TEXT_{str(t).strip()}" for t in texts]  # 给所有文本加前缀，避免数字解析
        if self.use_ml and self.model is not None:
            print("生成语义嵌入向量...")
            try:
                # embeddings = self.model.encode(texts, show_progress_bar=False)
                embeddings = self.model.encode(clean_texts, show_progress_bar=False)
                return embeddings
            except Exception as e:
                print(f"生成嵌入向量失败: {e}")
                # 回退到TF-IDF
                self.use_ml = False

        # 使用TF-IDF作为备选
        print("使用TF-IDF向量化...")
        vectorizer = TfidfVectorizer()
        tfidf_matrix = vectorizer.fit_transform(texts)
        return tfidf_matrix.toarray()

    # def get_text_embeddings(self, texts):
    #     if self.use_ml and self.model is not None:
    #         print(">>> [ML] 准备生成语义向量...")
    #         try:
    #             # 关键加固：确保输入是字符串列表
    #             safe_texts = [str(t) if t is not None else "" for t in texts]
    #             embeddings = self.model.encode(safe_texts, show_progress_bar=False)
    #             print(f">>> [ML] 生成成功！形状: {embeddings.shape}")
    #             return embeddings
    #         except Exception as e:
    #             print(f"!!! [ML] 生成失败: {e}")
    #             self.use_ml = False  # 失败后关闭 ML 模式
    #
    #     # 走到这里说明 ML 失败，使用 TF-IDF
    #     print(">>> [TF-IDF] 回退到传统方法...")
    #     try:
    #         # 清洗：给所有文本加前缀，彻底避免数字解析
    #         cleaned_texts = [f"SAFE_TEXT_{str(t).strip()}" for t in texts]
    #         vectorizer = TfidfVectorizer(
    #             lowercase=True,
    #             stop_words=None,
    #             token_pattern=r'(?u)\b\w+\b'  # 只匹配单词
    #         )
    #         tfidf_matrix = vectorizer.fit_transform(cleaned_texts)
    #         print(f">>> [TF-IDF] 生成成功！形状: {tfidf_matrix.shape}")
    #         return tfidf_matrix.toarray()
    #     except Exception as e:
    #         print(f"!!! [TF-IDF] 生成失败: {e}")
    #         # 终极兜底：返回零向量
    #         return np.zeros((len(texts), 1))

    def ml_find_similar_items(self, items, dept, report):
        """
        使用机器学习方法发现相似项

        参数:
            items: 数据项列表
            dept: 部门名称
            report: 报表名称

        返回:
            相似项映射字典
        """
        if len(items) <= 1:
            return {item: item for item in items}

        # 获取文本嵌入
        embeddings = self.get_text_embeddings(items)

        # 使用DBSCAN聚类发现相似项
        print(f"对 {dept}-{report} 的数据项进行聚类分析...")

        try:
            # 计算余弦相似度矩阵
            similarity_matrix = cosine_similarity(embeddings)

            # 使用DBSCAN聚类
            # 将相似度转换为距离 (1 - similarity)
            distance_matrix = 1 - similarity_matrix
            distance_matrix = np.clip(distance_matrix, 0, 2)    #添加
            dbscan = DBSCAN(eps=0.3, min_samples=1, metric='precomputed')
            clusters = dbscan.fit_predict(distance_matrix)

            # 创建映射字典
            mapping = {}
            cluster_items = {}

            # 分组聚类结果
            for item, cluster_id in zip(items, clusters):
                if cluster_id not in cluster_items:
                    cluster_items[cluster_id] = []
                cluster_items[cluster_id].append(item)

            # 为每个聚类选择标准项
            for cluster_id, cluster_list in cluster_items.items():
                if len(cluster_list) == 1:
                    # 单个项目，自身映射
                    mapping[cluster_list[0]] = cluster_list[0]
                else:
                    # 多个项目，选择最长的作为标准项
                    standard_item = max(cluster_list, key=len)
                    for item in cluster_list:
                        mapping[item] = standard_item

            return mapping

        except Exception as e:
            print(f"聚类分析失败: {e}")
            # 回退到传统方法
            return self.traditional_find_similar_items(items)

    def traditional_find_similar_items(self, items):
        """
        使用传统方法发现相似项

        参数:
            items: 数据项列表

        返回:
            相似项映射字典
        """
        mapping = {}
        processed_items = set()

        for item in items:
            if item in processed_items:
                continue

            # 查找相似项
            other_items = [i for i in items if i != item and i not in processed_items]
            similar_items = []

            for other_item in other_items:
                similarity = fuzz.token_sort_ratio(item, other_item)
                if similarity >= self.similarity_threshold:
                    similar_items.append(other_item)
                    processed_items.add(other_item)

            if similar_items:
                # 选择最长的项作为标准项
                standard_item = max([item] + similar_items, key=len)
                mapping[item] = standard_item
                for sim_item in similar_items:
                    mapping[sim_item] = standard_item
            else:
                mapping[item] = item

            processed_items.add(item)

        return mapping

    def generate_mappings_with_ml(self):
        """
        使用机器学习方法生成映射表
        """
        print("使用机器学习方法生成映射表...")

        # 先处理报表名称
        report_mappings = []
        departments = self.df['部门名称_清洗'].unique()

        for dept in departments:
            dept_data = self.df[self.df['部门名称_清洗'] == dept]
            reports = dept_data['报表名称_清洗'].unique()
            reports = [r for r in reports if r and str(r).strip()]

            if len(reports) > 0:
                # 使用ML方法找到相似的报表名称
                report_mapping = self.ml_find_similar_items(reports, dept, "所有报表")

                for orig, std in report_mapping.items():
                    report_mappings.append({
                        '部门名称': dept,
                        '原始报表名称': orig,
                        '标准报表名称': std
                    })

        report_mapping_df = pd.DataFrame(report_mappings)

        # 应用报表名称映射
        report_mapping_dict = {}
        for _, row in report_mapping_df.iterrows():
            key = (row['部门名称'], row['原始报表名称'])
            report_mapping_dict[key] = row['标准报表名称']

        self.df['标准报表名称'] = self.df.apply(
            lambda row: report_mapping_dict.get(
                (row['部门名称_清洗'], row['报表名称_清洗']),
                row['报表名称_清洗']
            ),
            axis=1
        )

        # 处理数据项
        data_item_mappings = []
        dept_report_groups = self.df.groupby(['部门名称_清洗', '标准报表名称'])

        for (dept, report), group in dept_report_groups:
            items = group['数据项_清洗'].unique()
            items = [item for item in items if item and str(item).strip()]

            if len(items) > 0:
                # 使用ML方法找到相似的数据项
                item_mapping = self.ml_find_similar_items(items, dept, report)

                for orig, std in item_mapping.items():
                    data_item_mappings.append({
                        '部门名称': dept,
                        '报表名称': report,
                        '原始数据项': orig,
                        '标准数据项': std
                    })

        data_item_mapping_df = pd.DataFrame(data_item_mappings)

        return report_mapping_df, data_item_mapping_df

    def generate_mappings_traditional(self):
        """
        使用传统方法生成映射表
        """
        print("使用传统方法生成映射表...")

        # 先处理报表名称
        report_mappings = []
        departments = self.df['部门名称_清洗'].unique()

        for dept in departments:
            dept_data = self.df[self.df['部门名称_清洗'] == dept]
            reports = dept_data['报表名称_清洗'].unique()
            reports = [r for r in reports if r and str(r).strip()]

            # 使用传统方法找到相似的报表名称
            report_mapping = self.traditional_find_similar_items(reports)

            for orig, std in report_mapping.items():
                report_mappings.append({
                    '部门名称': dept,
                    '原始报表名称': orig,
                    '标准报表名称': std
                })

        report_mapping_df = pd.DataFrame(report_mappings)

        # 应用报表名称映射
        report_mapping_dict = {}
        for _, row in report_mapping_df.iterrows():
            key = (row['部门名称'], row['原始报表名称'])
            report_mapping_dict[key] = row['标准报表名称']

        self.df['标准报表名称'] = self.df.apply(
            lambda row: report_mapping_dict.get(
                (row['部门名称_清洗'], row['报表名称_清洗']),
                row['报表名称_清洗']
            ),
            axis=1
        )

        # 处理数据项
        data_item_mappings = []
        dept_report_groups = self.df.groupby(['部门名称_清洗', '标准报表名称'])

        for (dept, report), group in dept_report_groups:
            items = group['数据项_清洗'].unique()
            items = [item for item in items if item and str(item).strip()]

            if len(items) > 0:
                # 使用传统方法找到相似的数据项
                item_mapping = self.traditional_find_similar_items(items)

                for orig, std in item_mapping.items():
                    data_item_mappings.append({
                        '部门名称': dept,
                        '报表名称': report,
                        '原始数据项': orig,
                        '标准数据项': std
                    })

        data_item_mapping_df = pd.DataFrame(data_item_mappings)

        return report_mapping_df, data_item_mapping_df

    def save_results(self):
        """
        保存结果到Excel文件
        """
        print("正在保存结果...")

        # 生成映射表
        if self.use_ml:
            report_mapping_df, data_item_mapping_df = self.generate_mappings_with_ml()
        else:
            report_mapping_df, data_item_mapping_df = self.generate_mappings_traditional()

        # 应用标准化
        mapping_dict = {}
        for _, row in data_item_mapping_df.iterrows():
            key = (row['部门名称'], row['报表名称'], row['原始数据项'])
            mapping_dict[key] = row['标准数据项']

        self.df['数据项_标准'] = self.df.apply(
            lambda row: mapping_dict.get(
                (row['部门名称_清洗'], row['标准报表名称'], row['数据项_清洗']),
                row['数据项_清洗']
            ),
            axis=1
        )

        # 保存到Excel
        try:
            book = load_workbook(self.file_path)

            with pd.ExcelWriter(self.file_path, engine='openpyxl', mode='a', if_sheet_exists='replace') as writer:
                sheet_name = 'ML_标准化数据' if self.use_ml else '标准化数据'
                self.df.to_excel(writer, sheet_name=sheet_name, index=False)

                sheet_name = 'ML_报表映射表' if self.use_ml else '报表映射表'
                report_mapping_df.to_excel(writer, sheet_name=sheet_name, index=False)

                sheet_name = 'ML_数据项映射表' if self.use_ml else '数据项映射表'
                data_item_mapping_df.to_excel(writer, sheet_name=sheet_name, index=False)

                # 保存唯一标准数据项
                unique_items = []
                for (dept, report), group in self.df.groupby(['部门名称_清洗', '标准报表名称']):
                    items = group['数据项_标准'].unique()
                    for item in items:
                        unique_items.append({
                            '部门名称': dept,
                            '报表名称': report,
                            '标准数据项': item
                        })

                sheet_name = 'ML_唯一标准项' if self.use_ml else '唯一标准项'
                pd.DataFrame(unique_items).to_excel(writer, sheet_name=sheet_name, index=False)

            print(f"结果已保存到: {self.file_path}")

        except Exception as e:
            print(f"保存结果失败: {e}")
            # 创建新文件
            suffix = "_ml_result" if self.use_ml else "_result"
            output_path = self.file_path.replace('.xlsx', f'{suffix}.xlsx')
            with pd.ExcelWriter(output_path, engine='openpyxl') as writer:
                sheet_name = 'ML_标准化数据' if self.use_ml else '标准化数据'
                self.df.to_excel(writer, sheet_name=sheet_name, index=False)

                sheet_name = 'ML_报表映射表' if self.use_ml else '报表映射表'
                report_mapping_df.to_excel(writer, sheet_name=sheet_name, index=False)

                sheet_name = 'ML_数据项映射表' if self.use_ml else '数据项映射表'
                data_item_mapping_df.to_excel(writer, sheet_name=sheet_name, index=False)

            print(f"结果已保存到新文件: {output_path}")

        return self.file_path


def main():
    parser = argparse.ArgumentParser(description='机器学习文本去重工具')
    parser.add_argument('--file_path', default='C:/Users/xingwenzheng/Desktop/国家部委-附件1-基层报表底数初步清单-1.xlsx')
    parser.add_argument('--sheet', default='分割前数据', help='工作表名称')
    parser.add_argument('--threshold', type=int, default=80, help='相似度阈值')
    parser.add_argument('--no-ml', action='store_true', help='不使用机器学习')

    args = parser.parse_args()

    # 初始化标准化器
    standardizer = MLDataStandardizer(
        args.file_path,
        args.sheet,
        args.threshold,
        use_ml=not args.no_ml
    )

    # 应用预处理
    standardizer.apply_preprocessing()

    # 保存结果
    output_path = standardizer.save_results()

    print("\n处理完成!")
    print(f"原始数据项数量: {len(standardizer.df)}")
    print(f"唯一标准数据项数量: {standardizer.df['数据项_标准'].nunique()}")
    print(f"去重率: {(1 - standardizer.df['数据项_标准'].nunique() / len(standardizer.df)) * 100:.2f}%")


if __name__ == "__main__":
    main()