import os
from typing import Optional, List

from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import OpenAIEmbeddings
from langchain_openai.chat_models.base import BaseChatOpenAI
from pydantic.v1 import BaseModel, Field

# 本地有clash代理，所以配置一下，不然一些依赖下载不下来（chroma）,或者将代理关闭后下载
os.environ['http_proxy'] = '127.0.0.1:7890'
os.environ['https_proxy'] = '127.0.0.1:7890'

os.environ["LANGCHAIN_TRACING_V3"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["LANGCHAIN_PROJECT"] = "langchain-community-demo"

os.environ["OPENAI_API_KEY"] = "sk-1dd16a258a73428d910d38c782e1c94f"

# deepseek-reasoner : DeepSeek-R1
# deepseek-chat : DeepSeek-V3
model_name = "deepseek-reasoner"
deepseek_api_key = "sk-1dd16a258a73428d910d38c782e1c94f"

model = BaseChatOpenAI(
    model=model_name,
    openai_api_key=deepseek_api_key,
    openai_api_base='https://api.deepseek.com',
    max_tokens=1024,
    streaming=True
)

embeddings = OpenAIEmbeddings(model='text-embedding-3-small')

# response = llm.invoke("现在市面上都有哪些不错的基础模型，我在项目中推荐使用哪个？")
# print(response.content)

# 向量数据库持久化目录
persist_directory = 'chroma_data_dir'
# 一些youtube视频链接
urls = [
    "https://www.youtube.com/watch?v=O1s7ZqNe_XA",
    # "https://www.youtube.com/watch?v=dA1cHGACXCo",
    # "https://www.youtube.com/watch?v=ZcEMLz27sL4"
    # "https://www.bilibili.com/video/BV1QGHMepE7z/?spm_id_from=333.337.search-card.all.click&vd_source=0775c3e59510d942e0897cfffea682c0",
    # "https://www.bilibili.com/video/BV1d64y1j7xj/?spm_id_from=333.337.search-card.all.click&vd_source=0775c3e59510d942e0897cfffea682c0"
]

# 加载磁盘中的向量数据库
vectorstore = Chroma(persist_directory=persist_directory, embedding_function=embeddings)

# # 测试向量数据库的相似检索
# result = vectorstore.similarity_search_with_score("你好")
# print(result[0])
# print(result[0][0].metadata['publish_year'])


system = """
您是将用户问题转换为数据库查询的专家，给定一个问题，返回一个数据库查询优化列表，以检索最相关的结果。
"""
prompt = ChatPromptTemplate.from_messages([
    ("system", system),
    ("human", "{question}"),
])


# pydantic
class Search(BaseModel):
    """
    定义数据模型
    """
    # 内容的相似相和发布年份
    query: str = Field(None, description="搜索的用户问题")
    # Optional：可选的，即字段可为空
    publish_year: Optional[int] = Field(None, description="发布年份")


# RunnablePassthrough：question的值未来再去传，这样写保留一个位置
chain = {'question': RunnablePassthrough()} | prompt | model.with_structured_output(Search)

# resp1 = chain.invoke({"question": "你好"})
# print(resp1)
# resp2 = chain.invoke("帮我找一个2024年的电影")
# print(resp2)


def retrieval(search: Search) -> List[Document]:
    _filter = None
    if search.publish_year:
        # "$eq" 是chroma向量数据库的固定语法
        _filter = {"publish_year": {"$eq": search.publish_year}}

    return vectorstore.similarity_search(search.query, filter=_filter)


new_chain = chain | retrieval

result = new_chain.invoke({"question": "2025年关于RAG的论文"})
print(result)
