
import os

import chromadb
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import SentenceTransformerEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_google_genai import GoogleGenerativeAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter

os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "playground"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["GOOGLE_API_KEY"] = "AIzaSyBJoz7BvdFgWTBwzcu-0xWpJKfEJOR6vPM"

embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")

def simple_demo_write_read():
    # load the document and split it into chunks
    loader = TextLoader("F:/tmp/output_遥远的救世主.txt", encoding="utf8")
    documents = loader.load()

    # split it into chunks
    text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
    docs = text_splitter.split_documents(documents)

    # load it into Chroma
    db = Chroma.from_documents(docs, embedding=embeddings, persist_directory="F:/tmp/demo/chroma_db")

    # query it
    query = "传统文化而感到自卑"
    docs = db.similarity_search(query)

    # print results
    print(docs[0].page_content)


def simple_demo_read():
    db = Chroma(embedding_function=embeddings, persist_directory="F:/tmp/demo/chroma_db")
    query = "神话"
    docs = db.similarity_search_with_score(query)

    for doc in docs:
        print(doc)


def simple_chroma_db():
    pass

if __name__ == "__main__":
    simple_demo_read()