from langchain_chroma import Chroma
from langchain_core.documents import Document
from langchain_ollama import ChatOllama, OllamaEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter

# ollama 模型使用 ollamasdk 下面几款使用通用openaisdk
ollama_embedding = OllamaEmbeddings(
    # streaming = True, # 是否启用流式传输
    model="nomic-embed-text:latest",
    base_url="http://127.0.0.1:11434",
)


# --- 2. 读取本地文档 ---
file_path = "d://test.txt"  # 你的文档路径
with open(file_path, "r", encoding="utf-8") as f:
    text = f.read()

# --- 3. 拆分文档为小段落（可选，根据需要） ---
splitter = RecursiveCharacterTextSplitter(
    chunk_size=500,   # 每段最大字符数
    chunk_overlap=50  # 段落重叠
)
chunks = splitter.split_text(text)

# --- 4. 构造 Document 对象 ---
documents = [Document(page_content=chunk) for chunk in chunks]

# --- 5. 保存到 Chroma ---
persist_dir = "./chroma_db"
vectordb = Chroma.from_documents(
    documents=documents,
    embedding=ollama_embedding,
    persist_directory=persist_dir
)



print(f"文档已成功存入 Chroma，路径：{persist_dir}")
