from bs4 import BeautifulSoup
import requests
from tqdm import tqdm
import logging

# 设置日志记录器
logging.basicConfig(filename='errorlog.log', level=logging.ERROR)

# 定义一个网页抓取类，用于从指定URL获取HTML内容并提取特定信息
class WebPageScraper:
    # 初始化对象时获取网页URL并保存

    def __init__(self, url):
# 初始化对象时获取网页URL并保存
        self.url = url
# 获取网页HTML内容
        self.html_content = self.fetch_page()

    def fetch_page(self):
        try:
            # 向指定的URL发送GET请求，并获取响应
            response = requests.get(self.url)
            # 如果响应状态码不是200，抛出异常，确保请求成功
            response.raise_for_status()
            # 返回获取到的HTML文本内容
            return response.text
        except requests.exceptions.HTTPError as e:
            logging.error(f"Error fetching page {self.url}: {e}")
            return None
        except Exception as e:
            logging.error(f"An unexpected error occurred fetching page {self.url}: {e}")
            return None

    def find_ul_with_classes(self):
        try:
            # 使用BeautifulSoup解析HTML内容，查找具有特定class的ul元素
            soup = BeautifulSoup(self.html_content, 'html.parser')
            return soup.find('ul', class_='summary-verti-list summary-verti-list--summaryVideo')
        except Exception as e:
            logging.error(f"Error finding ul with classes {self.url}: {e}")
            return None

    def get_a_tags_href(self):
        ul_element = self.find_ul_with_classes()
        if ul_element is not None:
            list_items = ul_element.find_all('li')
            links = []
            for item in list_items:
                a_tags = item.find_all('a')
                for a_tag in a_tags:
                    href = a_tag.get('href')
                    links.append(href)
            return links
        else:
            return []

    def find_first_video_src(self):
        try:
            soup = BeautifulSoup(self.html_content, 'html.parser')
            video_tag = soup.find('video')
            if video_tag:
                return video_tag.get('src')
            else:
                return None
        except Exception as e:
            logging.error(f"Error finding first video src {self.url}: {e}")
            return None

# 使用类的方式执行抓取逻辑
if __name__ == "__main__":
    # 实例化一个WebPageScraper对象
    scraper = WebPageScraper('https://bbs.mihoyo.com/ys/obc/channel/map/80/212?bbs_presentation_style=no_header')
    # 调用get_a_tags_href()方法获取a标签的href
    links = scraper.get_a_tags_href()
    
    # 创建一个空列表用于存放视频源
    videosrc_list = []
    # 使用tqdm模块来显示进度条
    for link in tqdm(links, total=len(links)):
        # 拼接完整的链接
        link = 'https://bbs.mihoyo.com' + link
        # 实例化一个WebPageScraper对象
        videoPage = WebPageScraper(link)
        # 调用find_first_video_src()方法获取视频源
        videosrc = videoPage.find_first_video_src()
        # 如果视频源不为空，则添加到列表中
        if videosrc is not None:
            videosrc_list.append(videosrc)
        else:
            # 如果视频源为空，则记录错误日志
            logging.error(f"Element not found for link {link}")
    
    # 打印视频源总数
    print(f"Total video sources found: {len(videosrc_list)}")