import pandas as pd
import neo4j
from neo4j import GraphDatabase
import time
from loguru import logger
import math

pd.set_option('display.max_columns', 5000)
pd.set_option('display.width', 5000)
pd.set_option('display.max_colwidth', 5000)

logger.add("for-debug-rag.log", level="DEBUG")

NEO4J_URI = "neo4j://localhost:57687"  # or neo4j+s://xxxx.databases.neo4j.io
NEO4J_USERNAME = "neo4j"
NEO4J_PASSWORD = "vPn**12@@56" #你自己的密码
NEO4J_DATABASE = "neo4jnewnew"

_030_version = True
GRAPHRAG_FOLDER = "output/20250312-211523/artifacts"
catelogy_tag = "论文图谱"
doc_type = '论文'

PART_NUMBER = ""
task_tag=doc_type+"_"+catelogy_tag+PART_NUMBER

relationship_keywords=True
create_index = False
_patch_ = True
_only_path_ = False
# Create a Neo4j driver
driver = GraphDatabase.driver(NEO4J_URI, auth=(NEO4J_USERNAME, NEO4J_PASSWORD))
error_query  = []
#__Document__ __Chunk__ __Entity__ __RELATIONSHIP__ __Node__ __Community__ __ComunityReports__ ID重复的情况
statements =[ 
#"create constraint document_id if not exists for (d:`__Document__`) require d.id is unique;",
#"create constraint chunk_id if not exists for (c:`__Chunk__`) require c.id is unique;",
#"MERGE (n:`__Entity__` {compositeKey: n.id+ ':' + n.name});",
#"create constraint on (n:`__Entity__`) assert n.compositeKey is unique;",
"create index tag_id_index_doc for (r:`__Document__`) on (r.task_tag,r.id);",
"create index tag_id_index for (r:`__Chunk__`) on (r.task_tag,r.id);",
"create index id_index for (r:`__Chunk__`) on (r.id);",
"create index if not exists for (n:`__Entity__`) ON (n.task_tag,n.id,n.name,n.type);",
"create index id_tag_index_relationship for ()-[r:`相关实体`]-() on (r.task_tag,r.id);",
"create index id_name_index_node for (r:`__Node__`) on (r.task_tag,r.id,r.title);",
"create index if not exists for (n:`__Community__`) ON (n.task_tag,n.community_id, n.title,n.level);",
"create index if not exists for (n:`__Community__`) ON (n.community_id,n.community_index,n.level,n.task_tag);",
"create index if not exists for (n:`__Finding__`) ON (n.task_tag,n.id,n.community);",
"create index if not exists for (n:`__Entity__`) ON (n.name,n.task_tag);",
"create index if not exists for (n:`__Entity__`) ON (n.task_tag);",
"create index if not exists for (n:`__Entity__`) ON (n.id,n.name,n.type);",
"create index if not exists for (n:`__Document__`) ON (n.title,n.task_tag);",
]

if create_index:
    with driver.session(database=NEO4J_DATABASE) as session:
        for statement in statements:
            #if len((statement or "").strip()) > 0:
                #session.execute(statement)
            result = session.run(statement)
            logger.debug('statement {}'.format(statement))

def batched_import(statement, df, batch_size=1000,step_name=None):
    """
    Import a dataframe into Neo4j using a batched approach.
    Parameters: statement is the Cypher query to execute, df is the dataframe to import, and batch_size is the number of rows to import in each batch.
    """
    total = len(df)
    start_s = time.time()
    title_records = []
    count_ = 0
    total_count = math.ceil(total/batch_size)
    for start in range(0,total, batch_size):
        count_ = count_ + 1
        batch = df.iloc[start: min(start+batch_size,total)]
        query = "UNWIND $rows AS value " + statement
        rows=batch.to_dict('records')
        logger.info("step {} current {}/{},current total size {}".format(step_name,count_,total_count,len(rows)))
        try:
            result = driver.execute_query(query,
                                          rows=rows,
                                          database_=NEO4J_DATABASE)
        except Exception as e:
            logger.error('Exception {} query {}.'.format(e,query))
    return total


def batched_import_patent_document(df, batch_size=1000):
    """
    Import a dataframe into Neo4j using a batched approach.
    """
    total = len(df)
    start_s = time.time()
    count_ = 0
    for start in range(0,total, batch_size):
        batch = df.iloc[start: min(start+batch_size,total)]
        rows=batch.to_dict('records')
        for ro in rows:
            count_ = count_ + 1
            logger.info("step patent_document current {}/{}".format(count_,total))
            _title_= ro['title']
            _id_elasticsearch = ro['title'].rstrip('.txt')
            logger.info("current {}/{},_id_elasticsearch {}".format(count_,total,_id_elasticsearch))
            query = "MATCH (source:`{}` {{_id_elasticsearch:'{}'}}), (target:`__Document__` {{title:'{}',task_tag:'{}'}}) MERGE (source)-[rel:`相关内容`]->(target) SET target.patent={}".format(doc_type,_id_elasticsearch,_title_,task_tag,_id_elasticsearch)
            try:
                logger.info("current {}/{},_id_elasticsearch {},query {}".format(count_,total,_id_elasticsearch,query))
                result = driver.execute_query(query)
            except Exception as e:
                logger.error("query  {} exception {}".format(query,e))
                logger.error("query  {} exception {}".format(query,e))
    return total

def batched_import_patent_chunk(df, batch_size=1000):
    total = len(df)
    start_s = time.time()
    count_ = 0
    for start in range(0,total, batch_size):
        batch = df.iloc[start: min(start+batch_size,total)]
        rows=batch.to_dict('records')
        for ro in rows:
            count_ = count_ + 1
            logger.info("step patent_chunk current {}/{}".format(count_,total))
            document_ids_= ro['document_ids']
            chunk_id = ro['id']
            for doc_id in document_ids_:
                query = "MATCH (c:__Chunk__) where c.id='{}' and c.task_tag='{}' MATCH (d:`__Document__`) where d.id_id='{}' and d.task_tag='{}' with c, d MATCH (p:`{}` {{_id_elasticsearch:toString(d.patent)}}) set c.patent_title=p.patent_title,c.patent_name=p.patent_name,c._id_elasticsearch=p._id_elasticsearch,c.description=p.abstract MERGE (c)-[:PART_OF]->(p)".format(chunk_id,task_tag,doc_id,task_tag,doc_type)
                logger.info("current {}/{},chunk id {},query {}".format(count_,total,chunk_id,query))
                try:
                    result = driver.execute_query(query)
                except Exception as e:
                    logger.error("query  {} exception {}".format(query,e))
                    logger.error("query  {} exception {}".format(query,e))
    return total

def batched_import_relationship(df, batch_size=1000):
    """
    Import a dataframe into Neo4j using a batched approach.
    """
    total = len(df)
    count_ = 0
    time_cost = 0
    for start in range(0,total, batch_size):
        batch = df.iloc[start: min(start+batch_size,total)]
        rows=batch.to_dict('records')
        #print(rows)
        for ro in rows:
            start_s = time.time()
            count_ = count_ + 1
            logger.info("step relationship current {}/{},last time cost {}".format(count_,total,time_cost))
            #print(ro['description'],ro['source'],ro['target'],ro['combined_degree'],ro['weight'],ro['human_readable_id'])
            r_weight_ = ro['weight']
            human_readable_id_ = ro['human_readable_id']
            id_= ro['id']
            r_combined_degree_source =ro.get('source_degree',"")
            r_combined_degree_target =ro.get('target_degree',"")
            r_combined_degree = ro.get('combined_degree',"")
            r_name = ro['description'].replace(" ",'_').replace('-','_').replace('\n','_').replace(',','_').replace('，','_').replace('）','_').replace('（','_').replace('(','_').replace(')','_').replace("。",'_').replace('、','_').replace('℃','_')
            r_name = '相关实体'
            r_description = ro['description']
            r_description = r_description.replace("'",'').replace('"','')
            text_unit_ids_ = ro['text_unit_ids']
            ro_source = ro['source'].replace('"','').replace("'",'')
            ro_relationship_keywords= ro.get('relationship_keywords',"").replace('"','')
            ro_target = ro['target'].replace('"','').replace("'",'')
            query = "MATCH (a:`__Entity__` {{name:{},task_tag:'{}'}}), (b:`__Entity__` {{name:{},task_tag:'{}'}}) MERGE (a)-[r:`{}` {}]->(b) SET r.source_degree= '{}',r.target_degree='{}',r.weight = {},r.human_readable_id = {},r.description = '{}',r.relationship_keywords='{}',r.combined_degree='{}',r.task_tag='{}' ".format('"'+ro_source+'"',task_tag,'"'+ro_target+'"',task_tag,r_name,"{"+"id"+":"+"\'"+id_+"\'"+"}",r_combined_degree_source,r_combined_degree_target,r_weight_,human_readable_id_,r_description,ro_relationship_keywords,r_combined_degree,task_tag)
            try:
                result = driver.execute_query(query)
            except neo4j.exceptions.CypherSyntaxError as e:
                logger.error("query  {} exception {}".format(query,e))
                error_query.append(query)
                continue
            except neo4j.DatabaseError.Schema.TokenLimitReached as e1:
                logger.error("query  {} exception {}".format(query,e1))
                error_query.append(query)
                continue
            except neo4j.exceptions.CypherSyntaxError as e3:
                logger.error("query  {} exception {}".format(query,e3))
                error_query.append(query)
                continue
            except Exception as e2:
                logger.error("query  {} exception {}".format(query,e2))
                error_query.append(query)
                continue
            start_end = time.time()
            time_cost = 1000*(start_end-start_s)
    return total

def batched_import_community(df, batch_size=6000,task_tag="未设置"):
    """
    """
    total = len(df)
    start_s = time.time()
    count_ = 0
    for start in range(0,total, batch_size):
        batch = df.iloc[start: min(start+batch_size,total)]
        rows=batch.to_dict('records')
        for ro in rows:
            count_ = count_ + 1
            logger.info("step community current {}/{}".format(count_,total))
            id_= ro['id']
            level_=ro['level']
            title_= ro['title']
            #一定要注意Merge语句，匹配的属性，如果是数字，不要带 '{}'，否则一直匹配不上就会重建
            if _030_version:
                query = "MERGE (c:`__Community__` {{task_tag:'{}',community_id:'{}',title:'{}',level:{}}}) SET c.task_tag='{}',c.level= {},c.title= '{}',c.raw_community='{}',c.community_index='{}'".format(task_tag,ro['id'],ro['title'],ro['level'],task_tag,ro['level'],ro['title'],ro['raw_community'],ro['raw_community'])
            else:
                query = "MERGE (c:`__Community__` {{task_tag:'{}',community_id:'{}',title:'{}',level:{}}}) SET c.task_tag='{}',c.level= {},c.title= '{}',c.raw_community='{}',c.community_index='{}'".format(task_tag,ro['id'],ro['title'],ro['level'],task_tag,ro['level'],ro['title'],ro['community'],ro['community'])
            try:
                result = driver.execute_query(query)
            except Exception as e:
                logger.error("query  {} exception {}".format(query,e))
    return total

def batched_import_community_entity(df, size=1000,task_tag="未设置"):
    """
    """
    total = len(df)
    start_s = time.time()
    count_ = 0
    batch_size = size
    for start in range(0,total, batch_size):
        batch = df.iloc[start: min(start+batch_size,total)]
        rows=batch.to_dict('records')
        for ro in rows:
            count_ = count_ + 1
            logger.info("step community_entity current {}/{}".format(count_,total))
            id_= ro['id']
            level_=ro['level']
            title_= ro['title']
            relations_ids_ = ro['relationship_ids']
            rel_id_count = 0
            len_relations = len(relations_ids_)
            batch_size_internal = 1
            for start_relations in range(0,len_relations, batch_size_internal):
                end_max = min(start_relations+batch_size_internal,total)
                relations_ids_internal = relations_ids_[start_relations: end_max]
                len_relations_internal = len(relations_ids_internal)
                relations_ids_str = "["
                i_c = 0
                for i in relations_ids_internal:
                    i_c = i_c +1
                    if i_c==len_relations_internal:
                        relations_ids_str = relations_ids_str +"'"+i+"'"
                    else:
                        relations_ids_str = relations_ids_str +"'"+i+"'"+','
                relations_ids_str = relations_ids_str+"]"
                query = "UNWIND {} as rel_id MATCH(c:`__Community__` {{task_tag:'{}',community_id:'{}',title:'{}',level:{}}}) WITH c,rel_id MATCH (start_n:`__Entity__` {{task_tag:'{}'}})-[r]-(end_n:`__Entity__` {{task_tag:'{}'}}) WHERE r.id=rel_id MERGE (start_n)-[:IN_COMMUNITY]->(c) MERGE (end_n)-[:IN_COMMUNITY]->(c)".format(relations_ids_str,task_tag,ro['id'],ro['title'],ro['level'],task_tag,task_tag)
                try:
                    start_t = time.time()
                    logger.info("step community_entity current {}~{}/{} of {}/{} query start".format(start_relations,end_max,len_relations,count_,total))
                    result = driver.execute_query(query)
                    logger.info("step community_entity current {}~{}/{} of {}/{} query end".format(start_relations,end_max,len_relations,count_,total))
                    end_t = time.time()
                    cost_query = 1000*(end_t-start_t)
                    logger.info("step community_entity current {}~{}/{} of {}/{},cost {}ms".format(start_relations,end_max,len_relations,count_,total,cost_query))
                except Exception as e:
                    logger.error("exception {}".format(e))
    return total


def batched_import_entity_industry(industry_name,df,batch_size=1000):
    total = len(df)
    start_s = time.time()
    count_ = 0
    for start in range(0,total, batch_size):
        batch = df.iloc[start: min(start+batch_size,total)]
        rows=batch.to_dict('records')
        for ro in rows:
            count_ = count_ + 1
            logger.info("step entity_industry current {}/{}".format(count_,total))
            id_= ro['id']
            name_ = ro.get('name',ro.get('title')).replace("'",'').replace('"','')
            type_=ro['type']
            query = "MATCH(c:`行业标签_Level001` {{name:'{}'}}) with c MATCH (e:`__Entity__` {{id:'{}',name:'{}',type:'{}',task_tag:'{}'}}) with c,e MERGE (e)-[:相关行业]->(c) ".format(industry_name,id_,name_,type_,task_tag)
            print(query)
            try:
                result = driver.execute_query(query)
            except Exception as e:
                logger.error("query  {} exception {}".format(query,e))
    return total


def batched_import_entity_patent(df,batch_size=1000):
    total = len(df)
    start_s = time.time()
    count_ = 0
    for start in range(0,total, batch_size):
        batch = df.iloc[start: min(start+batch_size,total)]
        rows=batch.to_dict('records')
        for ro in rows:
            count_ = count_ + 1
            logger.info("step entity_patent current {}/{}".format(count_,total))
            id_= ro['id']
            name_ = ro.get('name',ro.get('title')).replace("'",'').replace('"','')
            type_ = ro['type']
            text_units_ids_ = ro['text_unit_ids']
            unit_len = len(text_units_ids_)
            unit_count = 0
            for text_id_ in text_units_ids_:
                unit_count = unit_count+1
                logger.info("step entity_patent current {}/{},entity has units {}/{}".format(count_,total,unit_count,unit_len))
                query = "MATCH (t:__Entity__ {{id:'{}',name:'{}',type:'{}'}}) MATCH (c:__Chunk__ {{id:'{}'}}) with t,c MATCH (p:`{}` {{_id_elasticsearch:c._id_elasticsearch}}) MERGE (p)-[:HAS_ENTITY]->(t)".format(ro['id'],name_,type_,text_id_,doc_type)
                print(query)
                try:
                    result = driver.execute_query(query)
                except Exception as e:
                    logger.error("query  {} exception {}".format(query,e))
    return total


def batched_import_community_text_units(df, batch_size=1000,task_tag="未设置"):
    """
    """
    total = len(df)
    start_s = time.time()
    count_ = 0
    for start in range(0,total, batch_size):
        batch = df.iloc[start: min(start+batch_size,total)]
        rows=batch.to_dict('records')
        for ro in rows:
            count_ = count_ + 1
            logger.info("step community_text_units current {}/{}".format(count_,total))
            #print(ro['id'],ro['level'],ro['title'])
            id_= ro['id']
            level_=ro['level']
            title_= ro['title']
            text_units_ids_ = ro['text_unit_ids']
            for text_id_ in text_units_ids_:
                query = "MATCH(c:`__Community__` {{task_tag:'{}',community_id:'{}',level:{},title:'{}'}}) WITH c MATCH (t:__Chunk__ {{id:'{}',task_tag:'{}'}})  MERGE (c)-[:HAS_CHUNK]->(t)".format(task_tag,ro['id'],ro['level'],ro['title'],text_id_,task_tag)
                try:
                    result = driver.execute_query(query)
                except Exception as e:
                    logger.error("query  {} exception {}".format(query,e))
    return total

def main():
    if _030_version:
        columns_map = {
                "create_final_documents.parquet":["id","text_unit_ids","raw_content", "title"],
                "create_final_text_units.parquet":["id","text","n_tokens","document_ids","entity_ids","relationship_ids"],
                "create_final_entities.parquet":["id","human_readable_id","name","type", "description","text_unit_ids","graph_embedding","description_embedding"],
                "create_final_relationships.parquet":["id","human_readable_id", "source", "target","description","text_unit_ids","source_degree","target_degree","weight","rank"],
                "create_final_nodes.parquet":["id", "human_readable_id",  "title","community","level","degree", "x", "y"],
        }
    else:
        columns_map = {
                "create_final_documents.parquet":["id","human_readable_id","text_unit_ids","title"],
                "create_final_text_units.parquet":["id","human_readable_id","text","n_tokens","document_ids","entity_ids","relationship_ids"],
                "create_final_entities.parquet":["id","human_readable_id","title","type", "description","text_unit_ids"],
                "create_final_relationships.parquet":["id","human_readable_id","source", "target", "description","weight","text_unit_ids","combined_degree"],
                "create_final_nodes.parquet":["id", "human_readable_id",  "title","community","level","degree", "x", "y"],
        }

    length_map = {

    }
    logger.info("开始执行")
    import pyarrow.parquet as pq
    for parquet in ['create_final_documents.parquet','create_final_text_units.parquet','create_final_entities.parquet',
            'create_final_relationships.parquet','create_final_nodes.parquet']:
        table = pq.read_table(f'{GRAPHRAG_FOLDER}/{parquet}')
        logger.info('table {} \n 字段包括\n {}'.format(parquet,table.to_pandas().head(0)))
        logger.info('=======================================')
        doc_df = pd.read_parquet(f'{GRAPHRAG_FOLDER}/{parquet}', columns=["id"])
        logger.info('{} length {}'.format(parquet,len(doc_df)))
        length_map[parquet] = len(doc_df)
    parquet = "create_final_documents.parquet"
    doc_df = pd.read_parquet(f'{GRAPHRAG_FOLDER}/{parquet}', columns=columns_map[parquet])
    print('__Document__ length ',len(doc_df))

    statement = """ MERGE (d:`__Document__` {id:value.title,task_tag:"""+"'"+task_tag+"'"+"""}) SET d += value {.raw_content,.title,.text_unit_ids},d.id_id = value.id """
    if _only_path_!=True:
        batched_import(statement, doc_df,batch_size=1000,step_name='final_documents')
    if _patch_:
        batched_import_patent_document(doc_df)

    parquet = "create_final_text_units.parquet"
    text_df = pd.read_parquet(f'{GRAPHRAG_FOLDER}/{parquet}', columns=columns_map[parquet])
    #print('__Chunk__',text_df.head())
    print('__Chunk__ length ',len(text_df))
    statement = """ MERGE (c:`__Chunk__` {id:value.id,task_tag:"""+"'"+task_tag+"'"+"""}) SET c += value {.text, .n_tokens} WITH c, value UNWIND value.document_ids AS document MATCH (d:__Document__ {id_id:document}) MERGE (c)-[:PART_OF]->(d) """
    if _only_path_!=True:
        batched_import(statement, text_df,batch_size=1000,step_name='final_text_units')
    if _patch_:
        batched_import_patent_chunk(text_df)

    parquet = "create_final_entities.parquet"
    entity_df = pd.read_parquet(f'{GRAPHRAG_FOLDER}/{parquet}', columns=columns_map[parquet])
    print('__Entity__ length ',len(entity_df))
    if _030_version:
        entity_statement = """MERGE (e:`__Entity__` {id:value.id,type:value.type,name:replace(replace(value.name,'"',''),"'",''),task_tag:"""+"'"+task_tag+"'"+"""}) SET e += value {.human_readable_id, .description, .type,name:replace(replace(value.name,'"',''),"'","")} WITH e, value UNWIND value.text_unit_ids AS text_unit MATCH (c:__Chunk__ {id:text_unit,task_tag:"""+"'"+task_tag+"'"+"""}) MERGE (c)-[:HAS_ENTITY]->(e) """
    else:
        entity_statement = """MERGE (e:`__Entity__` {id:value.id,type:value.type,name:replace(replace(value.title,'"',''),"'",''),task_tag:"""+"'"+task_tag+"'"+"""}) SET e += value {.human_readable_id, .description, .type,name:replace(replace(value.title,'"',''),"'","")} WITH e, value UNWIND value.text_unit_ids AS text_unit MATCH (c:__Chunk__ {id:text_unit,task_tag:"""+"'"+task_tag+"'"+"""}) MERGE (c)-[:HAS_ENTITY]->(e) """
    if _only_path_!=True:
        batched_import(entity_statement, entity_df,batch_size=1000,step_name='final_entities')
    if _patch_:
        batched_import_entity_industry(catelogy_tag,entity_df)
    print('__Entity__ imported ',len(entity_df))
    if relationship_keywords:
        parquet = "create_final_relationships.parquet"
        rel_df = pd.read_parquet(f'{GRAPHRAG_FOLDER}/{parquet}', columns=(columns_map[parquet]+["relationship_keywords"]))
    else:
        parquet = "create_final_relationships.parquet"
        rel_df = pd.read_parquet(f'{GRAPHRAG_FOLDER}/{parquet}', columns=(columns_map[parquet]))
    logger.info('__RELATIONSHIP__ length {}'.format(len(rel_df)))
    if _only_path_!=True:
        batched_import_relationship(rel_df)
    logger.info('__RELATIONSHIP__ imported {} '.format(len(rel_df)))
    if error_query:
        logger.error('__RELATIONSHIP__ imported error {} '.format(error_query))

    parquet = "create_final_nodes.parquet"
    nodes_df = pd.read_parquet(f'{GRAPHRAG_FOLDER}/{parquet}', columns=columns_map[parquet])
    logger.info('__Node__ length {}'.format(len(nodes_df)))
    #可能一个Node title属于多个Community，因此不能设置community等属性
    statement = """
    MERGE (c:`__Node__` {id:value.id,title:replace(replace(value.title,'"',''),"'",""),task_tag:"""+"'"+task_tag+"'"+"""})
    SET c += value {.human_readable_id,.level,.degree,.community},c.title=replace(replace(value.title,'"',''),"'","")
    """
    if _only_path_!=True:
        batched_import(statement, nodes_df,batch_size=1000,step_name='final_nodes')

    node_entity_rel_statement= """
        MATCH (source:`__Node__` {id:value.id,human_readable_id:value.human_readable_id,title:replace(replace(value.title,'"',''),"'",""),task_tag:"""+"'"+task_tag+"'"+"""})
        MATCH (target:`__Entity__` {id:value.id,human_readable_id:value.human_readable_id,name:replace(replace(value.title,'"',''),"'",""),task_tag:"""+"'"+task_tag+"'"+"""})
        MERGE (source)-[rel:Node_Entity {id: value.id}]->(target)
    """
    logger.info('__Node_Entity_ Start')
    if _only_path_!=True:
        batched_import(node_entity_rel_statement, nodes_df,batch_size=1000,step_name='node_entity_relationship')
    logger.info('__Node_Entity_ End')

if __name__=="__main__":
    main()
