import os
from models.status import UStatus
from utils.UIEUtil import UIEUtil
from pyltp import SentenceSplitter
import chardet
from utils.Neo4jUtil import Neo4jUtil
from question.kb_service.milvus_service import MilvusService
from config.config_env import KB_NAME
from tqdm import tqdm

from utils.llm_ie import LLMIEUtil
#from config.config_lab import is_llm_ie
g = Neo4jUtil()
# 主数据库
g_master = g.graph
# 从数据库
g_from = g.from_graph
# 知识库
kb_service = MilvusService(KB_NAME)

llm_ie=LLMIEUtil()

rel2label = {
    '包含': '部件',
    '包括': '检查内容',
    '涉及': '故障现象',
    '产生': '故障现象',
    '导致': '故障现象',
    '相关': '部件',
    '建议': '维修方案',
    '排故': '排故流程',
    '对应': '故障原因',
}


def detect_encoding(file_path):
    with open(file_path, 'rb') as f:
        raw_data = f.read()
        encoding_result = chardet.detect(raw_data)
        return encoding_result['encoding']


def handle_predict(predict):
    # 这里写入库代码
    print("数据处理完成，开始入库！")
    syz = set()
    entities = set()
    for p_item in predict:
        # 拿到每一行的预测值p_item
        for item_key in p_item.keys():
            item_data = p_item[item_key]
            for item in item_data:
                # 主体
                subject = item['text']
                subject_label = item_key
                entities.add((subject, subject_label))
                # 遍历关系, 如果有关系才遍历
                if 'relations' in item.keys():
                    for rel_key in item['relations'].keys():
                        rel_list = item['relations'][rel_key]
                        for rel in rel_list:
                            object_name = rel['text']
                            object_label = rel2label[rel_key]
                            entities.add((object_name, object_label))
                            syz.add((subject, subject_label, rel_key, object_name, object_label))
    for item in entities:
        node_data = item[0]
        node_label = item[1]
        cypher_query = '''MERGE (a:%s {name: '%s'})''' % (node_label, node_data)
        g_master.run(cypher_query)
        #g_from.run(cypher_query)
    for item in syz:
        start_node_data = item[0]
        start_label = item[1]
        relation_type = item[2]
        end_node_data = item[3]
        end_label = item[4]
        cql = '''MERGE (a:%s {name: '%s'})
                                    MERGE (b:%s {name: '%s'})
                                    MERGE (a)-[r:%s]-> (b)
                                    ''' % (start_label, start_node_data, end_label, end_node_data, relation_type)
        # 主数据库录入
        g_master.run(cql)
        # 从数据库录入
        #g_from.run(cql)
    print("insert2neo4j success")

def handle_ie_llm(llm_ie):
    """
    处理大模型抽取的实体和关系数据，存入Neo4j数据库
    :param llm_ie: LLMIEUtil.predict()返回的结果，格式为{"entities": [], "relations": []}
    """
    print("LLM数据处理完成，开始入库！")
    syz = set()  # 存储(主体, 主体标签, 关系, 客体, 客体标签)
    entities = set()  # 存储(实体值, 实体标签)

    # 提取实体
    for entity in llm_ie.get("entities", []):
        node_data = entity.get("node_data")
        node_label = entity.get("node_label")
        if node_data and node_label:  # 过滤无效实体
            entities.add((node_data, node_label))

    # 提取关系及关联实体
    for relation in llm_ie.get("relations", []):
        start_node_data = relation.get("start_node_data")
        start_label = relation.get("start_label")
        rel_type = relation.get("relation_type")
        end_node_data = relation.get("end_node_data")
        end_label = relation.get("end_label")

        # 过滤无效关系
        if not all([start_node_data, start_label, rel_type, end_node_data, end_label]):
            continue

        # 确保关系两端的实体被添加到实体集合
        entities.add((start_node_data, start_label))
        entities.add((end_node_data, end_label))
        # 添加关系到集合
        syz.add((start_node_data, start_label, rel_type, end_node_data, end_label))

    # 批量插入实体节点到主从数据库
    for node_data, node_label in entities:
        # 处理节点名称中的单引号，避免Cypher语法错误
        node_data_escaped = node_data.replace("'", "\\'")
        cypher = f"MERGE (a:{node_label} {{name: '{node_data_escaped}'}})"
        g_master.run(cypher)
        #g_from.run(cypher)

    # 批量插入关系到主从数据库
    for start_data, start_label, rel_type, end_data, end_label in syz:
        # 处理特殊字符转义
        start_data_escaped = start_data.replace("'", "\\'")
        end_data_escaped = end_data.replace("'", "\\'")
        cypher = f"""
            MERGE (a:{start_label} {{name: '{start_data_escaped}'}})
            MERGE (b:{end_label} {{name: '{end_data_escaped}'}})
            MERGE (a)-[r:{rel_type}]->(b)
        """
        g_master.run(cypher)
        #g_from.run(cypher)

    print("LLM抽取结果入库成功")

# def handle_txt_to_db(file_path, id, current_app):
#     try:
#         uie_process = UIEUtil()
#         uie_process.create_ie_instance()
#         lines = []
#         # 打开文本文件
#         encoding = detect_encoding(file_path)
#         with open(file_path, 'r', encoding=encoding) as file:
#             # 读取文件内容并按行分割为数组
#             lines = file.readlines()
#         sentences = []
#         for line in lines:
#             rst = SentenceSplitter.split(line)
#             sentences += rst
#         #for sentence in sentences:
#         for sentence in tqdm(sentences):
#             predict = uie_process.predict(sentence)
#             handle_predict(predict)
#         # 文件入知识库
#         kb_service.insert_doc_to_vector(file_path)
#         with current_app.app_context():
#             s = UStatus.query.get(id)
#             s.change_status()
#         return True
#     except Exception as e:
#         print("调用uie解析出错")
#         return False
#     pass

def handle_txt_to_db(file_path, id, current_app,is_llm_ie=False):
    try:
        uie_process = UIEUtil()
        uie_process.create_ie_instance()
        lines = []
        # 打开文本文件
        encoding = detect_encoding(file_path)
        with open(file_path, 'r', encoding=encoding) as file:
            # 读取文件内容并按行分割为数组
            lines = file.readlines()
        sentences = []
        for line in lines:
            rst = SentenceSplitter.split(line)
            sentences += rst
        #for sentence in sentences:

        for sentence in tqdm(sentences):
            if is_llm_ie:
                ie=llm_ie.predict(sentence)
                handle_ie_llm(ie)
            else:
                predict = uie_process.predict(sentence)
                handle_predict(predict)
        # 文件入知识库
        kb_service.insert_doc_to_vector(file_path)
        with current_app.app_context():
            s = UStatus.query.get(id)
            s.change_status()
        return True
    except Exception as e:
        print("调用uie解析出错",e)
        return False
    pass