import os
from dotenv import load_dotenv, find_dotenv # 导入 find_dotenv 帮助定位
from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain_community.chat_models import ChatOpenAI
import httpx

# 加载 .env 文件中的环境变量 (增强调试)
load_dotenv(dotenv_path=find_dotenv(usecwd=True), verbose=True, override=True)

# 从环境变量加载 API 密钥和基础 URL
api_key = os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENAI_BASE_URL")
model = os.getenv("DEFAULT_MODEL")
max_tokens = os.getenv("MAX_TOKENS_DEFAULT")

chat = ChatOpenAI(
    openai_api_base=base_url,
    request_timeout=httpx.Timeout(300.0, connect=60.0),
    max_retries=1,
    #max_tokens=int(max_tokens),
    openai_api_key=os.environ["OPENAI_API_KEY"],
    model_name=model
)

# 加载数据
loader = PyPDFLoader("https://arxiv.org/pdf/2309.10305.pdf")
pages = loader.load_and_split()

# 知识切片，将文档分割成均匀的块，每个块是一段原始文本
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=500,
    chunk_overlap=50
)
docs = text_splitter.split_documents(pages)

# 利用Embedding模型对每个文本片段进行向量化，并储存到向量数据库中
embed_model = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(documents=docs, embedding=embed_model, collection_name="openai_embed")

query = "How large is the baichuan2 vocabulary?"

def augment_prompt(query: str):
    # 通过向量相似度检索和问题最相关的3个文档
    results = vectorstore.similarity_search(query, k=3)
    source_knowledge = "\n".join([x.page_content for x in results])
    # 构建prompt
    augment_prompt = f"""
        Using the contexts below, answer the query.
        context: {source_knowledge}
        query: {query}
    """
    return augment_prompt

from langchain.schema import (
    HumanMessage
)

messages = [
    HumanMessage(content=augment_prompt(query)),
]

res = chat(messages)
print("模型回复：", res.content)