import dotenv
import os
import weaviate
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_weaviate import WeaviateVectorStore
from weaviate.auth import AuthApiKey

dotenv.load_dotenv()
class HydeRetriever(BaseRetriever):
    retriever: BaseRetriever
    llm:BaseLanguageModel
    def _get_relevant_documents(
        self, query: str, *, run_manager: CallbackManagerForRetrieverRun
    ) -> list[Document]:

        prompt = ChatPromptTemplate.from_template(
            "请写一篇科学论文来回答这个问题。\n"
            "问题:{question}\n"
            "文章，"
        )
        # 构建生成回退问题链
        chain = (
                {"question": RunnablePassthrough()}
                | prompt
                | self.llm
                | StrOutputParser()
                | self.retriever
        )
        return chain.invoke(query)
 # 导入数据
client = weaviate.connect_to_weaviate_cloud(
    skip_init_checks=True,
    cluster_url=os.getenv("WAEVIATE_URL"),
    auth_credentials=AuthApiKey(os.getenv("WEAVIATE_KEY"))
    )
embedding = OpenAIEmbeddings(model="text-embedding-3-small")
db = WeaviateVectorStore(client=client,
                        index_name="DataSetTest",
                        text_key="text",
                        embedding=embedding)

# 创建Hyde检索器
hyde_retriver = HydeRetriever(
    retriever=db.as_retriever(),
    llm=ChatOpenAI(model="gpt-4o-mini")
)

# 文档检索
docs = hyde_retriver.invoke("关于LLMOps的应用配置文档有哪些")
for doc in docs:
    print(doc)