#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project ：python_learning 
@File ：获取youtube主页链接.py
@IDE  ：PyCharm 
@Author ：李涵彬
@Date ：2025/2/13 下午1:21 
"""

import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup


def fetch_video_links(url):
	"""
	下载指定 YouTube 页面的所有视频链接。

	参数:
		url (str): YouTube 频道的主页链接。

	返回:
		list: 包含视频链接的列表。
	"""
	# 设置 Chrome 驱动
	service = Service(ChromeDriverManager().install())
	options = webdriver.ChromeOptions()
	# 可选：使用无头模式（不打开浏览器窗口）
	# options.add_argument("--headless")
	driver = webdriver.Chrome(service=service, options=options)

	try:
		# 打开目标页面
		driver.get(url)
		time.sleep(5)  # 等待页面加载完成

		# 点击"同意"按钮（如果存在）
		try:
			agree_button = driver.find_element(By.XPATH, '//button[@aria-label="同意所有选项"]')
			agree_button.click()
			time.sleep(2)
		except:
			pass

		# 模拟滚动到底部以加载更多视频
		scroll_pause_time = 2  # 滚动等待时间
		last_height = driver.execute_script("return document.documentElement.scrollHeight")
		while True:
			# 滚动到底部
			driver.execute_script("window.scrollTo(0, document.documentElement.scrollHeight);")
			time.sleep(scroll_pause_time)
			# 计算新高度并与旧高度比较
			new_height = driver.execute_script("return document.documentElement.scrollHeight")
			if new_height == last_height:
				break
			last_height = new_height

		# 获取页面内容
		soup = BeautifulSoup(driver.page_source, 'html.parser')

		# 提取视频链接
		video_links = []
		video_elements = soup.find_all('a', {'id': 'video-title-link'})
		for video in video_elements:
			if 'href' in video.attrs:
				link = f"https://www.youtube.com{video['href']}"
				video_links.append(link)

		return video_links

	finally:
		# 关闭浏览器
		driver.quit()


# 输入字典，Key为YouTube名字，Value为网址
youtube_dict = {
	"罗晟Criss的交易工作室": "https://www.youtube.com/@luoshengcriss/videos"
}

# 遍历字典并保存结果到文件
for key, url in youtube_dict.items():
	# 检查URL是否合法
	if not url.startswith("https://www.youtube.com/"):
		print(f"非法的URL地址：{url}")
		continue

	# 获取视频链接
	video_links = fetch_video_links(url)

	# 保存为TXT文件
	filename = f"{key}.txt"
	with open(filename, "w", encoding="utf-8") as file:
		for link in video_links:
			file.write(f"{link}\n")

	print(f"已保存文件：{filename}")
