from llama_index.core import VectorStoreIndex
from llama_index.core import Document
from llama_index.core import SimpleDirectoryReader
from llama_index.core.extractors import TitleExtractor, KeywordExtractor
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.embeddings.openai import OpenAIEmbedding
from pprint import pprint

# 这是一个用于构建数据转换管理的类
from llama_index.core.ingestion import IngestionPipeline

# 加载文档
documents = SimpleDirectoryReader("./data").load_data()
pprint(f"读到了{len(documents)}文档")
# 1.分割文档
pipeline = IngestionPipeline(
    transformations=[
        # 使用TokenTextSplitter将文本分割为长度为512，重叠为10的块
        TokenTextSplitter(chunk_size=64, chunk_overlap=10),
        # 提取每个节点的标题 放入元数据
        TitleExtractor(),
        # 提取每个节点的关键词 放入元数据
        KeywordExtractor(),
        # 生成每个节点的嵌入向量
        OpenAIEmbedding(),
    ]
)
nodes = pipeline.run(documents=documents)
print(f"生成了{len(nodes)}个节点")
for i, node in enumerate(nodes):
    print(f"文本内容:{node.text[:10]}")
    print(f"标题:{node.metadata.get("document_title")}")
    print(f"关键词:{node.metadata.get("excerpt_keywords")}")
    print(f"嵌入维度:{len(node.embedding)}")
