from langchain.embeddings import OpenAIEmbeddings, AlephAlphaAsymmetricSemanticEmbedding, FakeEmbeddings, \
    CacheBackedEmbeddings
from langchain.storage import LocalFileStore


def normal_embedding():
    embeddings_model = OpenAIEmbeddings()
    # embeddings = embeddings_model.embed_documents(
    #     [
    #         "Hi there!",
    #         "Oh, hello!",
    #         "What's your name?",
    #         "My friends call me World",
    #         "Hello World!"
    #     ]
    # )
    # print(len(embeddings), len(embeddings[0]))
    embedded_query = embeddings_model.embed_query("What was the name mentioned in the conversation?")
    print(embedded_query[:5])


def embedding_with_aleph():
    document = "这是文档的内容"
    query = "文档的内容是什么？"
    embeddings = AlephAlphaAsymmetricSemanticEmbedding()
    doc_result = embeddings.embed_documents([document])
    query_result = embeddings.embed_query(query)
    print(doc_result)
    print(query_result)


def embedding_with_fake():
    embeddings = FakeEmbeddings(size=1352)
    query_result = embeddings.embed_query("foo")
    doc_results = embeddings.embed_documents(["foo"])
    print(query_result)
    print(doc_results)


def use_with_a_vector_store():
    underlying_embeddings = OpenAIEmbeddings()
    fs = LocalFileStore("./files")
    cached_embedder = CacheBackedEmbeddings.from_bytes_store(underlying_embeddings, fs, underlying_embeddings.model)
    list(fs.yield_keys())


if __name__ == '__main__':
    embedding_with_fake()
