# pip install youtube-transcript-api pytube
import datetime
import os
from typing import Optional, List

from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain_community.document_loaders import YoutubeLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_chroma import Chroma
from langchain_community.embeddings import DashScopeEmbeddings
from langchain.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from pydantic import BaseModel, Field
from langchain_core.documents import Document

load_dotenv()


os.environ['http_proxy'] = '127.0.0.1:7897'
os.environ['https_proxy'] = '127.0.0.1:7897'

# 1.创建模型
model = ChatOpenAI(
    model='qwen-plus',
    api_key=os.getenv("DASHSCOPE_API_KEY"),
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)

embeddings = DashScopeEmbeddings(model='text-embedding-v1')

persist_dir = 'chroma_data_dir'  # 存放向量数据库的目录

# urls = [
#     "https://www.youtube.com/watch?v=HAn9vnJy6S4",
#     "https://www.youtube.com/watch?V=dA1CHGACXCo",
#     "https://www.youtube.com/watch?v=ZCEMLz27SL4",
#     "https://www.youtube.com/watch?v=hvAPnpSfSGo",
#     "https://www.youtube.com/watch?v=EhLPDL4QrWY",
#     "https://www.youtube.com/watch?v=mmBo8nliu2j0",
#     "https://www.youtube.com/watch?v=rQdibosL1ps",
#     "https://www.youtube.com/watch?v=28lC4fqukoc",
#     "https://www.youtube.com/watch?v=es-9MgxB-Uc",
#     "https://www.youtube.com/watch?V=WLRHWKUKVOE",
#     "https://www.youtube.com/watch?v=ObILtMaRJVY",
#     "https://www.youtube.com/watch?v=DjiXACWYkkU",
#     "https://www.youtube.com/watch?v=o7C9ld6Ln-M",
# ]

# docs = []
# for url in urls:
#     docs.append(YoutubeLoader.from_youtube_url(url, add_video_info=True).load())
#
# print(len(docs))
# print(docs[0])
#
# # 给doc添加额外的元数据：视频发布的年份
# for doc in docs:
#     doc.metadata['publish_year'] = int(
#         datetime.datetime.strptime(doc.metadata['publish_date'], '%Y-%m-%d %H-%M-%S').strftime('%Y'))
#
# print(docs[0][0].metadata)
# print(docs[0].page_content[:500])
#
# # 根据多个doc构建向量数据库
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=30)
# split_doc = text_splitter.split_documents(docs)
#
# # 数据向量化 & 向量数据的持久化
# vector_store = Chroma.from_documents(split_doc, embedding=embeddings, persist_directory=persist_dir)

# 加载磁盘中的向量数据
vector_store = Chroma(persist_directory=persist_dir, embedding_function=embeddings)

# 内容纬度的相似度检索
result = vector_store.similarity_search_with_score('How do I build a RAG agent?')
print(result[0])
print(result[0][0].metadata['publish_year'])

system = """You are an expert at converting user questions into database queries.
You have access to a database of tutorial videos about a software library for building LLM-powered applications.
Given a question, return a list of database queries optimized to retrieve the most relevant results.

If there are acronyms or words you are not familiar with, do not try to reph
"""

prompt = ChatPromptTemplate.from_messages(
    [
        ("system", system),
        ("human", "{question}")
    ]
)


# pydantic
class Search(BaseModel):
    """
    定义搜索的数据模型
    """
    query: str = Field(None, description="Similarity search query applied to video transcrlipts.")
    publish_year: Optional[int] = Field(None, description="Year video was published.")


chain = {'question': RunnablePassthrough()} | prompt | model.with_structured_output(Search)

resp1 = chain.invoke("How do I build a RAG agent?")
print(resp1)
# query='build RAG agent' publish_year=None
resp2 = chain.invoke("videos on RAG published in 2023")
print(resp2)


# query='RAG' publish_year=2023

# 还未真正搜索，只是生成指令模型

def retrieval(search: Search) -> List[Document]:
    _filter = None
    if search.publish_year:
        _filter = {'publish_year': {"$eq": search.publish_year}}

    return vector_store.similarity_search(query=search.query, filter=_filter)


new_chain = chain | retrieval

# result = new_chain.invoke('videos on RAG published in 2023')
result = new_chain.invoke('RAG tutorial')
print([(doc.metadata['title'], doc.metadata['publish_year']) for doc in result])