from py2neo import Graph
import pandas as pd
import os
from dotenv import load_dotenv
from rank_bm25 import BM25Okapi
import re

# 加载环境变量
load_dotenv()

# 初始化Neo4j连接
def init_neo4j_connection():
    """初始化Neo4j连接"""
    NEO4J_URI = os.getenv("NEO4J_URI")
    NEO4J_USER = os.getenv("NEO4J_USER")
    NEO4J_PASSWORD = os.getenv("NEO4J_PASSWORD")
    graph = Graph(NEO4J_URI, auth=(NEO4J_USER, NEO4J_PASSWORD))
    if graph.run("RETURN 1").data():
        print("✅ 连接Neo4j成功")
        return graph
    else:
        print("❌ 连接Neo4j失败")
        return None

# 节点转换为字典（带ID）
def node_to_dict_with_Id(node):
    return {
        "identity": node.identity,
        "labels": list(node.labels),
        "properties": dict(node)
    }

# 主处理函数（无修改）
def process_mpm_id_mapping(graph, standard_process_file):
    """
    处理工步节点的mpm_id映射
    返回：更新数量、未匹配的工步名称列表
    """
    # 读取标准工序文件
    try:
        df = pd.read_csv(standard_process_file)
        print(f"✅ 读取标准工步文件，共 {len(df)} 条记录")
    except Exception as e:
        print(f"❌ 读取文件失败: {str(e)}")
        return 0, []

    # 定义匹配列和目标列
    match_col = "*标准工步名称"
    mpm_id_col = "*标准工步编码"
    
    # 检查必要列
    if match_col not in df.columns or mpm_id_col not in df.columns:
        print(f"❌ 缺少必要列: {match_col} 或 {mpm_id_col}")
        return 0, []

    # 构建名称到编码的映射（过滤空值）
    process_map = {
        row[match_col]: row[mpm_id_col]
        for _, row in df.iterrows()
        if pd.notna(row[match_col]) and pd.notna(row[mpm_id_col])
    }
    print(f"✅ 构建映射完成，共 {len(process_map)} 条有效数据")

    # 查询所有工步节点
    try:
        nodes = [node_to_dict_with_Id(record["n"]) for record in graph.run("MATCH (n:`工步`) RETURN n")]
        print(f"✅ 查询到 {len(nodes)} 个工步节点")
    except Exception as e:
        print(f"❌ 查询节点失败: {str(e)}")
        return 0, []

    # 匹配并添加mpm_id属性
    updated = 0
    unmatched_names = []  # 存储未匹配的工步名称
    
    for node in nodes:
        node_id = node["identity"]
        step_name = node["properties"].get("工步名称")
        
        if not step_name:
            print(f"⚠️ 节点 {node_id} 无'工步名称'属性，跳过")
            continue
        
        mpm_id = process_map.get(step_name)
        if mpm_id:
            graph.run("MATCH (n) WHERE id(n) = $id SET n.mpm_id = $mpm", id=node_id, mpm=mpm_id)
            updated += 1
            print(f"✅ 节点 {node_id} 添加mpm_id: {mpm_id}")
        else:
            print(f"⚠️ 节点 {node_id} 名称'{step_name}'无匹配的标准工步编码")
            unmatched_names.append(step_name)  # 收集未匹配名称

    print(f"\n处理完成，共更新 {updated} 个节点")
    return updated, unmatched_names

# BM25模糊匹配函数（核心修改：单字分词）
def bm25_fuzzy_matching(unmatched_names, standard_process_file, threshold=0.3):
    """
    使用BM25算法进行模糊匹配（单字分词）
    参数：
        unmatched_names: 未匹配的工步名称列表
        standard_process_file: 标准工序文件路径
        threshold: 匹配阈值，低于此值不返回
    返回：匹配结果字典 {未匹配名称: (最佳匹配名称, 匹配分数, mpm_id)}
    """
    # 读取标准工序文件
    try:
        df = pd.read_csv(standard_process_file)
    except Exception as e:
        print(f"❌ 读取标准工序文件失败: {str(e)}")
        return {}

    match_col = "*标准工步名称"
    mpm_id_col = "*标准工步编码"
    
    # 过滤有效数据
    valid_data = df[
        df[match_col].notna() & 
        df[mpm_id_col].notna()
    ][[match_col, mpm_id_col]]
    
    # 提取标准名称列表
    standard_names = valid_data[match_col].tolist()
    standard_mpm = dict(zip(valid_data[match_col], valid_data[mpm_id_col]))
    
    # 预处理文本：去除特殊字符 + 单字分词（核心修改）
    def preprocess(text):
        text = str(text).strip()
        # 去除特殊字符（保留中文、数字、空格）
        text = re.sub(r'[^\u4e00-\u9fa50-9\s]', '', text)
        # 单字分词：将文本拆分为单个字符的列表（如“精磨A面”→["精","磨","A","面"]）
        return [char for char in text if char.strip()]
    
    # 对标准名称进行单字分词
    tokenized_corpus = [preprocess(name) for name in standard_names]
    
    # 初始化BM25模型
    bm25 = BM25Okapi(tokenized_corpus)
    
    # 处理每个未匹配名称
    match_results = {}
    for name in unmatched_names:
        # 对查询词进行单字分词
        query_tokens = preprocess(name)
        if not query_tokens:
            continue
            
        # 计算BM25匹配分数
        scores = bm25.get_scores(query_tokens)
        
        # 找到最佳匹配
        max_idx = scores.argmax()
        max_score = scores[max_idx]
        best_match_name = standard_names[max_idx]
        best_match_mpm = standard_mpm[best_match_name]
        
        # 应用阈值过滤
        if max_score >= threshold:
            match_results[name] = (best_match_name, max_score, best_match_mpm)
            print(f"🔍 模糊匹配: '{name}' -> '{best_match_name}' (分数: {max_score:.4f})")
        else:
            match_results[name] = (None, max_score, None)
            print(f"❌ 无有效匹配: '{name}' (最高分数: {max_score:.4f} < {threshold})")
    
    return match_results

# 应用模糊匹配结果更新节点（无修改）
def update_nodes_with_fuzzy_matches(graph, fuzzy_matches):
    """根据模糊匹配结果更新节点的mpm_id"""
    updated = 0
    # 查询所有工步节点
    nodes = [node_to_dict_with_Id(record["n"]) for record in graph.run("MATCH (n:`工步`) RETURN n")]
    
    for node in nodes:
        node_id = node["identity"]
        step_name = node["properties"].get("工步名称")
        
        if step_name in fuzzy_matches:
            match_info = fuzzy_matches[step_name]
            if match_info[0] is not None:  # 有有效匹配
                graph.run(
                    "MATCH (n) WHERE id(n) = $id SET n.mpm_id = $mpm, n.fuzzy_match = $match_name, n.match_score = $score",
                    id=node_id,
                    mpm=match_info[2],
                    match_name=match_info[0],
                    score=float(match_info[1])
                )
                updated += 1
                print(f"✅ 节点 {node_id} 通过模糊匹配添加mpm_id: {match_info[2]} (匹配: {match_info[0]})")
    
    print(f"\n模糊匹配更新完成，共更新 {updated} 个节点")
    return updated

# 主执行流程（无修改）
if __name__ == "__main__":
    # 初始化连接
    graph = init_neo4j_connection()
    if not graph:
        exit(1)
    
    # 定义文件路径
    standard_process_file = os.path.join("data", "标准工步.csv")
    
    # 执行精确匹配
    _, unmatched_names = process_mpm_id_mapping(graph, standard_process_file)
    
    # 输出未匹配的工序名称
    print("\n未精确匹配的工步名称：")
    for name in unmatched_names:
        print(f"- {name}")
    
    # 执行模糊匹配（如需自动更新可取消注释）
    if unmatched_names:
        print("\n开始模糊匹配...")
        fuzzy_results = bm25_fuzzy_matching(unmatched_names, standard_process_file, threshold=0.2)
        
        # 如需自动更新节点，取消下面一行的注释
        # update_nodes_with_fuzzy_matches(graph, fuzzy_results)