import requests
from bs4 import BeautifulSoup
import re
import os
from urllib.parse import urljoin
import time


class VideoCrawler:
    def __init__(self):
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
            'Referer': 'https://www.bilibili.com/'  # B站要求必须有Referer
        }
        self.output_dir = 'videos'
        os.makedirs(self.output_dir, exist_ok=True)

    def get_video_links(self, search_keyword, max_pages=3):
        """获取视频搜索结果中的视频链接"""
        video_links = []
        for page in range(1, max_pages + 1):
            # B站搜索API（实际项目中建议使用官方API而非直接爬取）
            url = f'https://search.bilibili.com/all?keyword={search_keyword}&page={page}'
            response = requests.get(url, headers=self.headers)
            soup = BeautifulSoup(response.text, 'lxml')

            # 提取视频链接（根据B站页面结构调整）
            for item in soup.select('a.bili-video-card__info--title'):
                video_url = urljoin('https://www.bilibili.com', item.get('href'))
                video_links.append(video_url)
            time.sleep(1)  # 控制请求频率
        return video_links

    def get_video_source(self, video_url):
        """从视频详情页提取实际视频源地址"""
        try:
            response = requests.get(video_url, headers=self.headers)
            # 使用正则表达式从页面源码中提取视频地址（B站视频地址通常在JavaScript变量中）
            match = re.search(r'"baseUrl":"(.*?)"', response.text)
            if match:
                # 处理URL转义字符
                video_source = match.group(1).replace('\\', '')
                return video_source
        except Exception as e:
            print(f"获取视频源失败: {e}")
        return None

    def download_video(self, video_source, video_title):
        """下载视频文件"""
        if not video_source:
            print("无视频源地址")
            return

        # 过滤文件名中的非法字符
        safe_title = re.sub(r'[\\/:*?"<>|]', '_', video_title)
        file_path = os.path.join(self.output_dir, f"{safe_title}.mp4")

        try:
            print(f"开始下载: {safe_title}")
            with requests.get(video_source, headers=self.headers, stream=True) as r:
                r.raise_for_status()
                with open(file_path, 'wb') as f:
                    for chunk in r.iter_content(chunk_size=8192):
                        f.write(chunk)
            print(f"下载完成: {file_path}")
        except Exception as e:
            print(f"下载失败: {e}")

    def run(self, search_keyword, max_pages=3):
        """运行爬虫主流程"""
        video_links = self.get_video_links(search_keyword, max_pages)
        print(f"共找到 {len(video_links)} 个视频")

        for link in video_links:
            # 获取视频标题
            title = link.split('/')[-1].replace('?', '_')
            # 获取视频源地址
            video_source = self.get_video_source(link)
            # 下载视频
            self.download_video(video_source, title)
            time.sleep(2)  # 避免频繁请求


if __name__ == "__main__":
    crawler = VideoCrawler()
    crawler.run("Python教程", max_pages=2)