# -*- coding: utf-8 -*-
from typing import List, Tuple
import os
import spacy
import re
from sentence_transformers import SentenceTransformer
import chromadb
import networkx as nx
from docx import Document
from tqdm import tqdm
import pickle


class ChineseKnowledgeGraphBuilder:
    def __init__(self):
        """
        简化版中文知识图谱构建器
        """
        self.nlp = self.nlp = spacy.load("zh_core_web_lg")
        self.embedding_model = SentenceTransformer("Qwen/Qwen3-Embedding-0.6B")
        self.vector_db_client = chromadb.PersistentClient(path="./storege")
        self.vector_db = self.vector_db_client.get_or_create_collection("chinese_knowledge_base")
        self.knowledge_graph = nx.Graph()
        self.persist_directory = "./storege"

    def _extract_chinese_triples(self, text: str) -> List[Tuple[str, str, str]]:
        # 使用spaCy进行中文三元组提取，用于生成知识图谱
        """
        简化版三元组提取
        """
        doc = self.nlp(text)
        triples = []
        
        # 简单提取主谓宾结构
        for sent in doc.sents:
            for token in sent:
                if token.dep_ == "nsubj" and token.head.pos_ == 'VERB':
                    subj = token.text
                    verb = token.head.text
                    # 查找宾语
                    for child in token.head.children:
                        if child.dep_ == "dobj":
                            obj = child.text
                            triples.append((subj, verb, obj))
                            break
        return triples

    def _embed_and_store(self, chunks: List[str]):
        #将实体关系图谱向量化并保存到数据库
        """向量化并存储文本块"""
        embeddings = self.embedding_model.encode(chunks)
        self.vector_db.add(
            documents=chunks,#["文本1","文本2","文本3",..."文本n"]
            embeddings=embeddings.tolist(),#[[emd1],[emd2],[emd3],...]
            ids=[str(i) for i in range(len(chunks))]
        )

    def build_from_docx(self, file_path: str,batch_size=16):
        """
        简化版DOCX文档处理
        """
        # 解析DOCX文档
        doc = Document(file_path)
        paragraphs=[para.text for para in doc.paragraphs if para.text.strip()]

        # print(full_text)
        # 提取三元组，构建知识图谱
        for i in tqdm(range(0,len(paragraphs),batch_size),desc="处理文档:"):
            #一次只处理一段文字为三元组；
            paragraph=paragraphs[i:i+batch_size]
            batch_para=""
            for para in paragraph:
                batch_para+=para
            triples=self._extract_chinese_triples(batch_para)
            # 将三元组添加到知识图谱中
            triples_store=[]
            for s,p,o in triples:
                self.knowledge_graph.add_edge(s,o,label=p)
                triples_store.append(f'{s}-{p}->{o}')

            #实体关系图（三元组）和对应原文经过embedding后存储数据库
            chunk_text="\n".join(triples_store+[batch_para])
            self._embed_and_store([chunk_text])


        return self.vector_db, self.knowledge_graph

    def save_knowledge_graph(self):
        """保存知识图谱到文件"""
        kg_path = os.path.join(self.persist_directory, "knowledge_graph.pkl")
        with open(kg_path, "wb") as f:
            pickle.dump(self.knowledge_graph, f)
        print(f"知识图谱已保存到: {kg_path}")

    def load_knowledge_graph(self):
        """从文件加载知识图谱"""
        kg_path = os.path.join(self.persist_directory, "knowledge_graph.pkl")
        if os.path.exists(kg_path):
            with open(kg_path, "rb") as f:
                self.knowledge_graph = pickle.load(f)
            print(f"知识图谱已从 {kg_path} 加载")
            return True
        else:
            print("未找到保存的知识图谱文件")
            return False

biuder=ChineseKnowledgeGraphBuilder()
db,kg=biuder.build_from_docx("./any_chinese.docx")
biuder.save_knowledge_graph()#向量数据库自动保存。
