import os
import pandas as pd
import re
from dotenv import load_dotenv
from py2neo import Graph, Node
from rank_bm25 import BM25Okapi
import jieba

# ----------------------------
# 配置与工具函数
# ----------------------------
load_dotenv()



CHINESE_STOPWORDS = {
    '的', '了', '和', '是', '就', '都', '而', '及', '与', '或', '等', '在', '上', '下', '中', '内', '外',
    '一', '二', '三', '1', '2', '3', '个', '件', '种', '类', ' ', '\t', '\n', ''
}

def preprocess_text(text):
    if not text or pd.isna(text):
        return []
    text = str(text).strip()
    if not text:
        return []
    words = jieba.lcut(text)
    filtered = [w.strip() for w in words if w.strip() and w not in CHINESE_STOPWORDS]
    return filtered

def node_to_dict_with_Id(node: Node):
    return {
        "identity": node.identity,
        "labels": list(node.labels),
        "properties": dict(node)
    }

# ----------------------------
# Neo4j 连接
# ----------------------------
NEO4J_URI = os.getenv("NEO4J_URI")
NEO4J_USER = os.getenv("NEO4J_USER")
NEO4J_PASSWORD = os.getenv("NEO4J_PASSWORD")
graph = Graph(NEO4J_URI, auth=(NEO4J_USER, NEO4J_PASSWORD))
if not graph.run("RETURN 1").data():
    raise ConnectionError("❌ 连接Neo4j失败")
print("✅ 连接Neo4j成功")

# ----------------------------
# 全局类型映射（用于辅料/设备/制造资源）
# ----------------------------
SOURCE_MAPPING = {
    '辅料': {'type': 'PartIteration', 'modelDefinition': 'Accessories'},
    '加工检测设备': {'type': 'ResourceIteration', 'modelDefinition': 'Equipment'},
    '加工工具': {'type': 'ResourceIteration', 'modelDefinition': 'Tool'},
    '工装工具': {'type': 'ResourceIteration', 'modelDefinition': 'Frock'},
    '计量器具': {'type': 'ResourceIteration', 'modelDefinition': 'MeasuringInstrument'}
}

def get_type_and_model(source: str):
    for key, val in SOURCE_MAPPING.items():
        if key in source:
            return val['type'], val['modelDefinition']
    return '', ''

# ----------------------------
# 通用 BM25 匹配函数（用于辅料/设备/制造资源）
# ----------------------------
def match_nodes_to_standard(
    node_label: str,
    name_property: str,
    data_dir: str,
    output_filename: str,
    source_mapping_func
):
    # 1. 加载标准库
    df_list = []
    for f in os.listdir(data_dir):
        if f.endswith('.csv'):
            df = pd.read_csv(os.path.join(data_dir, f))
            required_cols = {'编码', '名称', '规格型号'}
            if not required_cols.issubset(df.columns):
                print(f"⚠️ 文件 {f} 缺少必要列，跳过")
                continue
            df = df[['编码', '名称', '规格型号']].copy()
            df['数据来源'] = f  # 保留完整文件名
            df_list.append(df)
    if not df_list:
        print(f"⚠️ {data_dir} 中无有效 CSV 文件")
        return pd.DataFrame()
    df = pd.concat(df_list, ignore_index=True)
    print(f"✅ 合并完成，共 {len(df)} 条记录")

    # 2. 查询 Neo4j 节点
    query = f"MATCH (n:`{node_label}`) RETURN n"
    part_dict = [node_to_dict_with_Id(record["n"]) for record in graph.run(query)]
    print(f"✅ 查询到 {len(part_dict)} 个 {node_label} 节点")

    # 3. 构建语料
    df['combined_text'] = df.apply(
        lambda row: f"{row['名称']} {row['规格型号']}"
        if pd.notna(row['名称']) and pd.notna(row['规格型号'])
        else (row['名称'] if pd.notna(row['名称']) else str(row['规格型号'])),
        axis=1
    )
    corpus = [preprocess_text(text) for text in df['combined_text']]
    bm25 = BM25Okapi(corpus)

    # 4. 匹配
    results = []
    for part in part_dict:
        part_name = part['properties'].get(name_property, '')
        if not part_name:
            continue
        query_tokens = preprocess_text(part_name)
        if not query_tokens:
            continue
        scores = bm25.get_scores(query_tokens)
        max_idx = scores.argmax()
        max_score = scores[max_idx]
        matched_row = df.iloc[max_idx]
        t, m = source_mapping_func(matched_row['数据来源'])
        results.append({
            'neo4j_ID': part['identity'],
            'neo4j_名称': part_name,
            'standard_名称': matched_row['名称'],
            'standard_规格型号': matched_row['规格型号'],
            'standard_编码': matched_row.get('编码', ''),
            'standard_来源': matched_row.get('数据来源', ''),
            'max_score': max_score,
            'type': t,
            'modelDefinition': m
        })

    results_df = pd.DataFrame(results)
    output_path = os.path.join(OUTPUT_DIR, output_filename)
    results_df.to_csv(output_path, index=False, encoding='utf-8-sig')
    print(f"✅ {node_label} 匹配结果已保存至：{output_path}")
    return results_df

# ----------------------------
# 工步处理（含模糊匹配）
# ----------------------------
def process_steps_with_fuzzy_match(standard_file: str):
    # 精确匹配
    df = pd.read_csv(standard_file)
    match_col, mpm_id_col = "*标准工步名称", "*标准工步编码"
    if match_col not in df.columns or mpm_id_col not in df.columns:
        raise ValueError(f"❌ 标准工步文件缺少必要列")

    process_map = {
        row[match_col]: row[mpm_id_col]
        for _, row in df.iterrows()
        if pd.notna(row[match_col]) and pd.notna(row[mpm_id_col])
    }

    nodes = [node_to_dict_with_Id(record["n"]) for record in graph.run("MATCH (n:`工步`) RETURN n")]
    exact_results = []
    unmatched_names = []

    for node in nodes:
        name = node["properties"].get("工步名称")
        if not name:
            continue
        mpm = process_map.get(name)
        if mpm:
            exact_results.append({
                'neo4j_ID': node["identity"],
                'neo4j_名称': name,
                'standard_编码': mpm,
                'type': 'ProcessIteration',
                'modelDefinition': 'ProcessStep',
                'match_type': 'exact'
            })
        else:
            unmatched_names.append(name)

    # 模糊匹配（单字分词）
    fuzzy_results = []
    if unmatched_names:
        valid_data = df[df[match_col].notna() & df[mpm_id_col].notna()][[match_col, mpm_id_col]]
        standard_names = valid_data[match_col].tolist()
        standard_mpm = dict(zip(standard_names, valid_data[mpm_id_col]))

        def preprocess_char(text):
            text = str(text).strip()
            text = re.sub(r'[^\u4e00-\u9fa50-9\s]', '', text)
            return [c for c in text if c.strip()]

        tokenized_corpus = [preprocess_char(name) for name in standard_names]
        bm25 = BM25Okapi(tokenized_corpus)

        for name in unmatched_names:
            tokens = preprocess_char(name)
            if not tokens:
                continue
            scores = bm25.get_scores(tokens)
            max_idx = scores.argmax()
            max_score = scores[max_idx]
            if max_score >= 0.2:  # 阈值
                best_name = standard_names[max_idx]
                fuzzy_results.append({
                    'neo4j_ID': None,  # 暂不填，后续补
                    'neo4j_名称': name,
                    'standard_编码': standard_mpm[best_name],
                    'type': 'ProcessIteration',
                    'modelDefinition': 'ProcessStep',
                    'match_type': 'fuzzy',
                    'fuzzy_score': max_score,
                    'fuzzy_match_name': best_name
                })

    # 补全 fuzzy 的 neo4j_ID
    all_step_nodes = {n["properties"].get("工步名称"): n["identity"] for n in nodes}
    for item in fuzzy_results:
        item['neo4j_ID'] = all_step_nodes.get(item['neo4j_名称'])

    # 合并结果
    all_results = exact_results + fuzzy_results
    results_df = pd.DataFrame(all_results)
    if not results_df.empty:
        output_path = os.path.join(OUTPUT_DIR, "out-工步匹配结果.csv")
        results_df.to_csv(output_path, index=False, encoding='utf-8-sig')
        print(f"✅ 工步匹配结果已保存至：{output_path}")
    else:
        results_df = pd.DataFrame()
    return results_df

# ----------------------------
# 工序处理（仅精确匹配）
# ----------------------------
def process_processes(standard_file: str):
    df = pd.read_csv(standard_file)
    match_col, mpm_id_col = "*标准工序名称", "*标准工序编码"
    if match_col not in df.columns or mpm_id_col not in df.columns:
        raise ValueError(f"❌ 标准工序文件缺少必要列")

    process_map = {
        row[match_col]: row[mpm_id_col]
        for _, row in df.iterrows()
        if pd.notna(row[match_col]) and pd.notna(row[mpm_id_col])
    }

    nodes = [node_to_dict_with_Id(record["n"]) for record in graph.run("MATCH (n:`工序`) RETURN n")]
    results = []
    for node in nodes:
        name = node["properties"].get("工序名称")
        if not name:
            continue
        mpm = process_map.get(name)
        if mpm:
            results.append({
                'neo4j_ID': node["identity"],
                'neo4j_名称': name,
                'standard_编码': mpm,
                'type': 'ProcessIteration',
                'modelDefinition': 'Process',
                'match_type': 'exact'
            })

    results_df = pd.DataFrame(results)
    if not results_df.empty:
        output_path = os.path.join(OUTPUT_DIR, "out-工序匹配结果.csv")
        results_df.to_csv(output_path, index=False, encoding='utf-8-sig')
        print(f"✅ 工序匹配结果已保存至：{output_path}")
    else:
        results_df = pd.DataFrame()
    return results_df

# ----------------------------
# 主流程
# ----------------------------
if __name__ == "__main__":
    # 获取当前脚本所在目录的绝对路径
    SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))

    DATA_BM25_DIR = os.path.join(SCRIPT_DIR, "data_bm25_2")
    STANDARD_STEP_FILE = os.path.join(SCRIPT_DIR, "data", "标准工步.csv")
    STANDARD_PROC_FILE = os.path.join(SCRIPT_DIR, "data", "标准工序.csv")

    OUTPUT_DIR = os.path.join(SCRIPT_DIR, 'output')
    os.makedirs(OUTPUT_DIR, exist_ok=True)

    # 1. 辅料
    df1 = match_nodes_to_standard(
        node_label="辅料",
        name_property="辅料名称",
        data_dir=DATA_BM25_DIR,
        output_filename="out-辅料匹配结果.csv",
        source_mapping_func=get_type_and_model
    )

    # 2. 设备
    df2 = match_nodes_to_standard(
        node_label="设备",
        name_property="设备名称",
        data_dir=DATA_BM25_DIR,
        output_filename="out-设备匹配结果.csv",
        source_mapping_func=get_type_and_model
    )

    # 3. 制造资源
    df3 = match_nodes_to_standard(
        node_label="制造资源",
        name_property="资源名称",
        data_dir=DATA_BM25_DIR,
        output_filename="out-制造资源匹配结果.csv",
        source_mapping_func=get_type_and_model
    )

    # 4. 工步（含模糊）
    df4 = process_steps_with_fuzzy_match(STANDARD_STEP_FILE)

    # 5. 工序
    df5 = process_processes(STANDARD_PROC_FILE)

    # 合并所有结果
    all_dfs = [df for df in [df1, df2, df3, df4, df5] if not df.empty]
    if not all_dfs:
        print("⚠️ 无任何匹配结果，跳过更新")
        exit(0)

    final_df = pd.concat(all_dfs, ignore_index=True)

    # 去重（按 neo4j_ID 保留第一个）
    final_df = final_df.drop_duplicates(subset=['neo4j_ID'], keep='first')

    # 更新 Neo4j
    updated = 0
    for _, row in final_df.iterrows():
        graph.run(
            """
            MATCH (n) WHERE id(n) = $id
            SET n.mpm_id = $mpm_id,
                n.mpm_type = $mpm_type,
                n.mpm_modelDefinition = $mpm_modelDefinition
            """,
            id=int(row["neo4j_ID"]),
            mpm_id=row["standard_编码"],
            mpm_type=row["type"],
            mpm_modelDefinition=row["modelDefinition"]
        )
        updated += 1

    print(f"\n🎉 全部完成！共更新 {updated} 个节点")
    print(f"中间文件已保存至：{OUTPUT_DIR}/")