import os
from config import HF_ENDPOINT, PROJECT_DIR

print(HF_ENDPOINT)

from llama_index.core.embeddings import resolve_embed_model

from llama_index.core.node_parser import SimpleNodeParser, SentenceWindowNodeParser, NodeParser, \
    SemanticSplitterNodeParser, MarkdownNodeParser
from llama_index.readers.file import PyMuPDFReader


def parseNodes(node_parser:NodeParser,docs):
    print(os.environ.get( "NLTK_DATA"))

    # 创建文档切割器
    print("parse begin")
    base_nodes = node_parser.get_nodes_from_documents(docs,show_progress=True)

    from utils import display_source_node
    print(f"nodes:{len(base_nodes)} docs:{len(docs)}")
    for n in base_nodes:
        display_source_node(n, source_length=1500)

def loadPdfdata(sectionNo:str="A3301010060280271001231"):
   filepath= os.path.join(PROJECT_DIR, "data", "files",sectionNo+".pdf")
   return PyMuPDFReader().load_data(file_path=filepath)

def loadSimpleNode(docs):
    # 创建文档切割器
    node_parser = SimpleNodeParser.from_defaults(chunk_size=1024)
    # 创建文档切割器
    print("parse begin")
    base_nodes = node_parser.get_nodes_from_documents(docs,show_progress=True)
    print(f"nodes:{len(base_nodes)} docs:{len(docs)}")
    return base_nodes


if __name__=="__main__":
    print(os.environ.get( "NLTK_DATA"))

    print("pdf read")
    docs = loadPdfdata()

    # 创建文档切割器
    node_parser = MarkdownNodeParser.from_defaults()
    print("MarkdownNodeParser")
    parseNodes(node_parser,docs)

