# todo: 请求 400 timeout
# langchain爬取youtube字母并构建向量数据库
# pip install youtube-transcript-api pytube
import datetime
import os

from langchain_community.document_loaders import YoutubeLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_openai.chat_models.base import BaseChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
from youtube_transcript_api import YouTubeTranscriptApi

# 本地有clash代理，所以配置一下，不然一些依赖下载不下来（chroma）,或者将代理关闭后下载
os.environ['http_proxy'] = '127.0.0.1:7890'
os.environ['https_proxy'] = '127.0.0.1:7890'

os.environ["LANGCHAIN_TRACING_V3"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["LANGCHAIN_PROJECT"] = "langchain-community-demo"

os.environ["OPENAI_API_KEY"] = "sk-1dd16a258a73428d910d38c782e1c94f"

# deepseek-reasoner : DeepSeek-R1
# deepseek-chat : DeepSeek-V3
model_name = "deepseek-reasoner"
deepseek_api_key = "sk-1dd16a258a73428d910d38c782e1c94f"

model = BaseChatOpenAI(
    model=model_name,
    openai_api_key=deepseek_api_key,
    openai_api_base='https://api.deepseek.com',
    max_tokens=1024,
    streaming=True
)

embeddings = OpenAIEmbeddings(model='text-embedding-3-small')

# response = llm.invoke("现在市面上都有哪些不错的基础模型，我在项目中推荐使用哪个？")
# print(response.content)

# 向量数据库持久化目录
persist_directory = 'chroma_data_dir'
# 一些youtube视频链接
urls = [
    "https://www.youtube.com/watch?v=O1s7ZqNe_XA",
    # "https://www.youtube.com/watch?v=dA1cHGACXCo",
    # "https://www.youtube.com/watch?v=ZcEMLz27sL4"
    # "https://www.bilibili.com/video/BV1QGHMepE7z/?spm_id_from=333.337.search-card.all.click&vd_source=0775c3e59510d942e0897cfffea682c0",
    # "https://www.bilibili.com/video/BV1d64y1j7xj/?spm_id_from=333.337.search-card.all.click&vd_source=0775c3e59510d942e0897cfffea682c0"
]


# document数据
docs = []
for url in urls:
    # 构建doc并获取到视频的元数据(标题，作者等) add_video_info = True
    docs.extend(YoutubeLoader.from_youtube_url(url, add_video_info=True).load())
    # try:
    #     video_id = url.split("v=")[1]
    #     transcript = YouTubeTranscriptApi.get_transcript(video_id)
    #     docs.extend(YoutubeLoader.from_youtube_url(url, add_video_info=True).load())
    # except Exception as e:
    #     print(f"Failed to load video {url}: {e}")
print(len(docs))
print(docs[0])
# 给doc添加额外的元数据 如：视频发布年份
for doc in docs:
    doc.metadata["publish_year"] = int(
        datetime.datetime.strptime(doc.metadata["publish_date"], "%Y-%m-%d %H:%M:%S").strftime("%Y")
    )


print(docs[0].metadata)
# 第一个视频字幕内容
print(docs[0].page_content[:500])

# 根据多个doc构建向量数据库
text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=30)
split_doc = text_splitter.split_documents(docs)

# 构建向量数据库并将数据持久化到磁盘
vectorstore = Chroma.from_documents(split_doc, embeddings, persist_directory=persist_directory)