import pandas as pd
import numpy as np
import os
from langchain.document_loaders import PyPDFLoader, UnstructuredPDFLoader, PyPDFium2Loader
from langchain.document_loaders import PyPDFDirectoryLoader, DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from pathlib import Path
import random
from df_helpers import df2Graph
from df_helpers import graph2Df
from df_helpers import documents2Dataframe
from py2neo import Graph, Node, Relationship
import tempfile

def contextual_proximity(df: pd.DataFrame) -> pd.DataFrame:
    ## 将数据集合转换成节点列表
    dfg_long = pd.melt(
        df, id_vars=["chunk_id"], value_vars=["node_1", "node_2"], value_name="node"
    )
    dfg_long.drop(columns=["variable"], inplace=True)
    # 以块id 为关键字的自连接 在同一文本块之间创建链接。
    dfg_wide = pd.merge(dfg_long, dfg_long, on="chunk_id", suffixes=("_1", "_2"))
    # 减少自循环
    self_loops_drop = dfg_wide[dfg_wide["node_1"] == dfg_wide["node_2"]].index
    dfg2 = dfg_wide.drop(index=self_loops_drop).reset_index(drop=True)
    ## 对边缘进行分组和计数。
    dfg2 = (
        dfg2.groupby(["node_1", "node_2"])
        .agg({"chunk_id": [",".join, "count"]})
        .reset_index()
    )
    dfg2.columns = ["node_1", "node_2", "chunk_id", "count"]
    dfg2.replace("", np.nan, inplace=True)
    dfg2.dropna(subset=["node_1", "node_2"], inplace=True)
    # 边缘掉落 1 次
    dfg2 = dfg2[dfg2["count"] != 1]
    dfg2["edge"] = "contextual proximity"
    return dfg2

def safe_import_df_to_neo4j(df, graph, batch_size=1000):
    nodes_cache = {}
    
    # 开始事务
    tx = graph.begin()
    
    for i, row in enumerate(df.itertuples(), 1):
        # 处理节点1
        node1_key = row.node_1
        if node1_key not in nodes_cache:
            node1 = Node("sanguo", name=node1_key)
            nodes_cache[node1_key] = node1
            tx.create(node1)
        
        # 处理节点2
        node2_key = row.node_2
        if node2_key not in nodes_cache:
            node2 = Node("sanguo", name=node2_key)
            nodes_cache[node2_key] = node2
            tx.create(node2)
        
        # 创建关系
        node1 = nodes_cache[node1_key]
        node2 = nodes_cache[node2_key]
        rel_type = row.edge.split(',')[0]
        rel = Relationship(node1, rel_type, node2,
                         chunk_ids=row.chunk_id,
                         count=row.count)
        tx.create(rel)
        
        # 批量提交
        if i % batch_size == 0:
            tx.commit()
            tx = graph.begin()

    tx.commit()

def analyze(pdf_bytes, output_dir="./", regenerate=True):
    # 创建临时文件处理二进制数据
    with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_file:
        tmp_file.write(pdf_bytes)
        tmp_file_path = tmp_file.name
    
    try:
        # 使用PyPDFLoader加载临时文件
        loader = PyPDFLoader(tmp_file_path)
        documents = loader.load()
        
        splitter = RecursiveCharacterTextSplitter(
            chunk_size=1500,
            chunk_overlap=150,
            length_function=len,
            is_separator_regex=False,
        )
        
        pages = splitter.split_documents(documents)
        print(f"处理完成，生成数据块数量 = {len(pages)}")
        
        # 转换为DataFrame
        df = documents2Dataframe(pages)
        
        # 生成知识图谱
        if regenerate:
            concepts_list = df2Graph(df, model='qwen2.5:1.5b')
            dfg1 = graph2Df(concepts_list)
            
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
            
            dfg1.to_csv(os.path.join(output_dir, "graph.csv"), sep="|", index=False)
            df.to_csv(os.path.join(output_dir, "chunks.csv"), sep="|", index=False)
        else:
            dfg1 = pd.read_csv(os.path.join(output_dir, "graph.csv"), sep="|")
        
        # 清理和准备数据
        dfg1.replace("", np.nan, inplace=True)
        dfg1.dropna(subset=["node_1", "node_2", 'edge'], inplace=True)
        dfg1['count'] = 4
        
        # 添加上下文邻近关系
        dfg2 = contextual_proximity(dfg1)
        dfg = pd.concat([dfg1, dfg2], axis=0)
        dfg = (
            dfg.groupby(["node_1", "node_2"])
            .agg({"chunk_id": ",".join, "edge": ','.join, 'count': 'sum'})
            .reset_index()
        )
        
        # 导入到Neo4j
        graph = Graph("bolt://localhost:7687", auth=("neo4j", "12345678"))
        safe_import_df_to_neo4j(dfg, graph)
        
        return "1", "sanguo"
    
    finally:
        # 确保删除临时文件
        if os.path.exists(tmp_file_path):
            os.remove(tmp_file_path)