#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project ：spider_learning 
@File ：bilibili_video_url_spider.py
@IDE  ：PyCharm 
@Author ：李涵彬
@Date ：2024/4/29 下午8:28 
"""
import json
import os
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import WebDriverException, TimeoutException
from tqdm import tqdm
from typing import List, Set

# 配置日志
import logging

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# 创建Chrome浏览器实例
chrome_driver_path = r'C:\Users\luohe\.wdm\drivers\chromedriver\win64\131.0.6778.69\chromedriver-win64\chromedriver.exe'
service = Service(executable_path=chrome_driver_path)
driver = webdriver.Chrome(service=service)


def get_total_pages(up_main_page) -> int:
	"""
	获取up主空间视频总页数。
	"""
	try:
		driver.get(up_main_page)
		wait = WebDriverWait(driver, 10)  # 设置最长等待时间为10秒
		total_pages_span = wait.until(EC.presence_of_element_located((By.CLASS_NAME, "be-pager-total")))
		if total_pages_span:
			total_pages_text = total_pages_span.text
			total_pages = int(total_pages_text.split('共')[1].split('页')[0])
		else:
			total_pages = 1
		return total_pages
	except Exception as e:
		logging.error("获取总页数时发生错误：%s", e)
		return 1


def get_video_page_urls(up_main_page: str, total_pages: int) -> List[str]:
	"""
	获取up主空间视频页面链接。
	"""
	page_urls = []
	for page_num in range(1, total_pages + 1):
		page_url = f"{up_main_page}?tid=0&pn={page_num}&keyword=&order=pubdate"
		page_urls.append(page_url)
	return page_urls


def get_video_links(video_page_url: str) -> List[str]:
	"""
	从视频页面获取视频链接。
	"""
	try:
		driver.get(video_page_url)
		wait = WebDriverWait(driver, 10)  # 设置最长等待时间为10秒
		video_items = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'li.small-item.fakeDanmu-item')))
		video_links = [f"https://www.bilibili.com/video/{item.get_attribute('data-aid')}/" for item in video_items]
		return video_links
	except TimeoutException:
		logging.error("等待视频加载超时")
		return []
	except Exception as e:
		logging.error("获取视频链接时发生错误：%s", e)
		return []


def save_links_to_file(links: List[str], filename: str) -> bool:
	"""
	保存视频链接到文本文件。
	"""
	try:
		existing_links: Set[str] = set()
		if os.path.exists(filename):
			with open(filename, 'r') as f:
				existing_links = set(f.read().splitlines())

		new_links: Set[str] = set(links) - existing_links
		if new_links:
			with open(filename, 'a') as f:
				for link in new_links:
					f.write(link + '\n')
			logging.info("发现%d个新链接，已保存到%s文件中。", len(new_links), filename)
			return True
		else:
			logging.info("没有发现新的链接。")
			return False
	except Exception as e:
		logging.error("保存链接时发生错误：%s", e)
		return False


def crawl_up_video_links(up_main_page: str, filename: str) -> None:
	"""
	爬取up主视频链接。
	"""
	try:
		total_pages = get_total_pages(up_main_page)
		video_page_urls = get_video_page_urls(up_main_page, total_pages)
		all_video_links = []
		for page_url in video_page_urls:
			video_links = get_video_links(page_url)
			all_video_links.extend(video_links)
			if not save_links_to_file(all_video_links, filename):
				break
	except Exception as e:
		logging.error("爬取视频链接时发生错误：%s", e)


if __name__ == '__main__':
	try:
		with open("up_details.json", 'r', encoding='utf-8') as file:
			up_details = json.load(file)
		for user_name, uid in tqdm(up_details.items(), total=len(up_details)):
			logging.info("正在爬取up主：%s", user_name)
			up_main_page = f"https://space.bilibili.com/{uid}/video"
			filename = f"{user_name}_video_links.txt"
			crawl_up_video_links(up_main_page, filename)
	finally:
		driver.quit()
