import re
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from config import Config

class VideoLinkCrawler:
    """
    视频链接爬取类
    负责从B站搜索页面爬取视频链接
    """
    
    def __init__(self, driver):
    
        self.driver = driver
        self.video_links = set()  # 使用集合存储链接，避免重复
    
    def get_top300_video_links(self):

        print(f"正在获取综合排序前{Config.TARGET_VIDEOS}的视频...")
        
        all_links = set()  # 存储所有找到的链接
        page = 1  #b站页码
        max_pages = 15  
        
        # 循环爬取直到达到目标数量或最大页数
        while len(all_links) < Config.TARGET_VIDEOS and page <= max_pages:
            print(f"正在爬取第 {page} 页...")
            
            # 构建搜索URL
            search_url = f"{Config.BASE_SEARCH_URL}&page={page}"
            print(f"访问搜索页面: {search_url}")
            
            try:
                # 访问搜索页面
                self.driver.get(search_url)
                time.sleep(5)  # 等待页面加载
                
                # 设置显式等待
                wait = WebDriverWait(self.driver, 15)
                
                # 定义多种CSS选择器来定位视频元素
                selectors = [
                    ".video-item", 
                    ".bili-video-card",
                    "[class*='video']",
                    ".container .content",
                    ".video-list .video-item"
                ]
                
                # 等待页面元素加载
                element_found = self._wait_for_elements(wait, selectors)
                
                # 如果找不到元素但页面包含BV号，继续处理
                if not element_found:
                    page_source = self.driver.page_source
                    if 'BV' in page_source:
                        print("页面中包含BV号，继续处理")
                        element_found = True
                
                # 如果仍然找不到元素，跳过当前页
                if not element_found:
                    print(f"第{page}页无法找到视频元素")
                    page += 1
                    continue
                
                # 滚动页面以加载更多内容
                self._scroll_page()
                
                # 从当前页面提取链接
                links = self._extract_links_from_page()
                
                # 统计新增链接数量
                previous_count = len(all_links)
                all_links.update(links)  # 使用update方法添加新链接到集合
                new_links_count = len(all_links) - previous_count
                
                print(f"第{page}页新增 {new_links_count} 个链接，总共 {len(all_links)} 个链接")
                
                # 如果没有新链接，提前结束爬取
                if new_links_count == 0:
                    print("没有新链接，停止爬取")
                    break
                    
                page += 1
                time.sleep(2)  # 页面间延迟
                
            except Exception as e:
                print(f"第{page}页获取视频链接时出错: {e}")
                page += 1
                continue
        
        # 截取前N个链接
        links_list = list(all_links)[:Config.TARGET_VIDEOS]
        print(f"最终找到 {len(links_list)} 个视频链接")
        
        # 保存链接到文件
        self._save_links_to_file(links_list)
        
        # 更新实例变量并返回结果
        self.video_links = set(links_list)
        return links_list
    
    def _wait_for_elements(self, wait, selectors):

        # 尝试多种选择器直到找到可用的
        for selector in selectors:
            try:
                wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, selector)))
                print(f"使用选择器找到元素: {selector}")
                return True
            except:
                continue
        return False
    
    def _scroll_page(self):
 
    # 翻页    
        # 分三次滚动到页面不同位置
        for i in range(3):
            self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight * {}/3);".format(i+1))
            time.sleep(2)  # 每次滚动后等待
        
        # 滚动到页面底部
        self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        time.sleep(3)  # 等待底部内容加载
    
    def _extract_links_from_page(self):
     
        # 从当前页面提取视频链接
  
        links = set()
        page_source = self.driver.page_source
        
        # 使用正则表达式匹配BV号
        bv_pattern = r'BV[0-9A-Za-z]{10}'  # BV号格式：BV + 10位数字字母
        bv_matches = re.findall(bv_pattern, page_source)
        print(f"正则匹配找到 {len(set(bv_matches))} 个BV号")
        
        # 将BV号转换为完整URL
        for bvid in set(bv_matches):
            video_url = f"https://www.bilibili.com/video/{bvid}"
            links.add(video_url)
        
        # 从页面元素中提取链接（备用方法）
        try:
            video_elements = self.driver.find_elements(By.CSS_SELECTOR, 'a[href*="/video/BV"]')
            print(f"从元素中找到 {len(video_elements)} 个视频链接")
            for element in video_elements:
                href = element.get_attribute('href')
                if href and 'BV' in href:
                    links.add(href)
        except Exception as e:
            print(f"从元素提取链接时出错: {e}")
        
        return links
    
    def _save_links_to_file(self, links_list):

        # 创建输出目录
        Config.create_output_dir()
        
        # 保存链接到文件
        links_path = f'{Config.OUTPUT_DIR}/top300_video_links.txt'
        with open(links_path, 'w', encoding='utf-8') as f:
            for i, link in enumerate(links_list, 1):
                f.write(f"{i}. {link}\n")
        
        print(f"链接已保存到: {links_path}")