from langchain_openai import ChatOpenAI, AzureChatOpenAI, AzureOpenAI
from langchain_core.messages import HumanMessage, SystemMessage
from dotenv import load_dotenv
from langchain import hub
from langchain_openai import OpenAIEmbeddings, AzureOpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import PyPDFLoader


import os

from oauthlib.uri_validate import query

load_dotenv()

os.environ["AZURE_OPENAI_ENDPOINT"] = "https://chatgpt-test-0001.openai.azure.com/"
os.environ["AZURE_OPENAI_API_KEY"] = "90643b9c4bee421bba17d72b748ad446"
os.environ["AZURE_OPENAI_API_VERSION"] = "2024-06-01"
os.environ["OPENAI_API_VERSION"] = "2023-05-15"

#加载文档
loader = PyPDFLoader("pdf/datastruct.pdf")
pages = loader.load_and_split()
#文档切分
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=300,
    chunk_overlap=50,
    length_function=len,
    add_start_index=True
)
texts = text_splitter.create_documents([pages[0].page_content, pages[1].page_content])
print(texts)
# curl https://chatgpt-test-0001.openai.azure.com/openai/deployments/testEmbedding/embeddings?api-version=2023-05-15\
#   -H 'Content-Type: application/json' \
#   -H 'api-key: 90643b9c4bee421bba17d72b748ad446' \
#   -d '{"input": "Sample Document goes here"}'
# 写库
embeddings = AzureOpenAIEmbeddings(model="text-embedding-3-large",azure_deployment="testEmbedding",openai_api_version="2023-05-15")
db = Chroma.from_documents(documents=texts, embedding=embeddings, persist_directory='chromadb', collection_name='pdf')
# db = Chroma(
#     collection_name="pdf",
#     embedding_function=embeddings,
#     persist_directory="chromadb",  # 数据存储的路径
# )
# print(db.search(query="课程编号", search_type='similarity'))
#LangChain内置的 RAG 实现
qa_chain = RetrievalQA.from_llm(
    llm=AzureOpenAI(
            model_name='gpt-35-turbo-instruct',
            azure_deployment='gpt-35-turbo-instruct',
            api_version="2023-03-15-preview"
            # other params...
        ),
    retriever=db.as_retriever(),
    prompt = hub.pull("rlm/rag-prompt")
)
query = "课程编号是多少?"
response = qa_chain.invoke(query)
print(response)

# 流式输出
# for chunk in qa_chain.stream(query):
#     print(chunk,end="", flush=True)