# -*- coding: utf-8 -*-

# @Project : fastapi-tutorial
# @Date    : 20240401-1850
# @Author  : robin

import sys
import os
from dotenv import load_dotenv
# Load environment variables from openai.env file
load_dotenv("../.env")

# Read the OPENAI_API_KEY from the environment
api_key = os.getenv("OPENAI_API_KEY")
api_base = os.getenv("OPENAI_API_BASE")
os.environ["OPENAI_API_KEY"] = api_key
os.environ["OPENAI_API_BASE"] = api_base

# ! pip install --upgrade langchain
# ! pip install --upgrade openai==0.27.8
# ! pip install -U langchain-openai

# Embed_documents
from langchain_openai import OpenAIEmbeddings

e_model = OpenAIEmbeddings()
ebeddings = e_model.embed_documents(
     [
        "你好",
        "你好啊",
        "你叫什么名字?",
        "我叫王大锤",
        "很高兴认识你大锤",
    ]
)
print(ebeddings)

# embed_query
embedded_query = e_model.embed_query("这段对话中提到了什么名字?")
# 可以缓存起来降低对大模型的请求, 降低成本提升效率
print(embedded_query[:5])

# 嵌入向量缓存
from langchain.embeddings import CacheBackedEmbeddings
from langchain.storage import  LocalFileStore
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
u_embeddings = OpenAIEmbeddings()

fs = LocalFileStore("./cache/")
cached_embeddings = CacheBackedEmbeddings.from_bytes_store(
    u_embeddings,
    fs,
    namespace=u_embeddings.model,
)
print(list(fs.yield_keys()))

# 加载文档，切分文档，将切分文档向量化病存储在缓存中
raw_documents = TextLoader("letter.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=600, chunk_overlap=0)
documents = text_splitter.split_documents(raw_documents)
print(documents)

#! pip install faiss-cup
from langchain.vectorstores import FAISS
# %timeit -r  1 -n 1
db = FAISS.from_documents(documents, cached_embeddings)
#查看缓存中的键
print( list(fs.yield_keys()) )
