import datetime
import os 
from operator import itemgetter
from typing import List, Optional
import urllib.parse
import random
from pytube import YouTube

from langchain.chains.sql_database.query import create_sql_query_chain
from langchain_chroma import Chroma
from langchain_community.document_loaders import YoutubeLoader
from langchain_core.documents.base import Document
from langchain_community.utilities import SQLDatabase
from langchain_community.tools import QuerySQLDatabaseTool
from langchain_community.agent_toolkits import SQLDatabaseToolkit
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_core.messages import SystemMessage, HumanMessage
from langgraph.prebuilt import chat_agent_executor
from langchain_text_splitters import RecursiveCharacterTextSplitter
from pydantic.v1 import BaseModel, Field

os.environ['http_proxy'] = 'http://127.0.0.1:7890'
os.environ['https_proxy'] = 'http://127.0.0.1:7890'
os.environ["LANGSMITH_TRACING_V2"] = "true"
os.environ["LANGSMITH_API_KEY"] = "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"
os.environ["OPENAI_BASE_URL"] = "https://api.chatanywhere.tech/v1"
os.environ["OPENAI_API_KEY"] = "sk-pbXvhNj37SZ5SUBzC1Kx4LeXrsnT9EJNDL6mT2Lj2IbgohKa"
os.environ["TAVILY_API_KEY"] = "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"


model = ChatOpenAI(model='gpt-4o-mini')
embeddings = OpenAIEmbeddings(model='text-embedding-3-small')
# 存放向量数据库
persist_dir = "F:\\projects\\langchainDemo\\chroma_data_dir"


def generative_model():
    urls = [ 
        "https://www.youtube.com/watch?v=HAn9vnJy6S4",
        "https://www.youtube.com/watch?v=dA1cHGACXCo",
        "https://www.youtube.com/watch?v=ZcEMLz27sL4",
        "https://www.youtube.com/watch?v=hvAPnpSfSGo",
        "https://www.youtube.com/watch?v=EhlPDL4QrWY",
        "https://www.youtube.com/watch?v=mmBo8nlu2j0",
        "https://www.youtube.com/watch?v=rQdibOsL1ps",
        "https://www.youtube.com/watch?v=28lC4fqukoc",
        "https://www.youtube.com/watch?v=es-9MgxB-uc",
        "https://www.youtube.com/watch?v=wLRHwKuKvOE",
        "https://www.youtube.com/watch?v=ObIltMaRJvY",
        "https://www.youtube.com/watch?v=DjuXACWYkkU",
        "https://www.youtube.com/watch?v=o7C9ld6Ln-M",
    ]
    docs = []
    for url in urls:
        video = YoutubeLoader.from_youtube_url(url)
        video_id = video.video_id
        # 根据 video_id 获取文档元数据
        yt = YouTube(f"https://www.youtube.com/watch?v={video_id}")
        publish_date = yt.publish_date
        document = video.load()
        page_content = document[0].page_content
        doc = Document(page_content=page_content)
        doc.metadata = {"publish_year": int(publish_date.strftime("%Y"))}
        docs.append(doc)
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=30)
    split_doc = text_splitter.split_documents(docs)
    # 向量数据持久化
    Chroma.from_documents(
        split_doc, 
        embeddings, 
        persist_directory=persist_dir
    )


class Search(BaseModel):
    # 定义一个数据模型
    query: str = Field(None, description='Similarity search query applied to video transcripts.')
    publish_year: Optional[int] = Field(None, description='Year video was published')

def retrieval(search: Search) -> List[Document]:
    _filter = None
    if search.publish_year:
        # "$eq"是Chroma向量数据库的固定语法
        _filter = {'publish_year': {"$eq": search.publish_year}}
    # 加载磁盘中的向量数据库
    vectorstore = Chroma(persist_directory=persist_dir, embedding_function=embeddings)
    return vectorstore.similarity_search(search.query, filter=_filter)

def main():
    system = """
    You are an expert at converting user questions into database queries. \
    You have access to a database of tutorial videos about a software library for building LLM-powered applications. \
    Given a question, return a list of database queries optimized to retrieve the most relevant results.
    If there are acronyms or words you are not familiar with, do not try to rephrase them.
    """
    prompt = ChatPromptTemplate.from_messages(
        [
            ('system', system),
            ('human', "{question}")
        ]
    )
    chain = { 'question': RunnablePassthrough()} | prompt | model.with_structured_output(Search)
    new_chain = chain | retrieval
    result = new_chain.invoke('videos on RAG')
    for doc in result:
        print(doc.metadata['publish_year'])
        print(doc.page_content)
        print("-----------------------------------------------------")

if __name__ == "__main__":
    # generative_model()
    main()
    # vectorstore = Chroma(persist_directory=persist_dir, embedding_function=embeddings)
    # result = vectorstore.similarity_search_with_score("videos on RAG published in 2024")
    # print(result[0][0].metadata)
