import re
import time
import platform
import subprocess
import sys
import os
import argparse
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import TimeoutException, WebDriverException
from urllib.parse import urlparse

def configure_chrome_options():
    """创建并返回预配置的Chrome选项"""
    chrome_options = Options()
    chrome_options.add_argument("--headless")
    chrome_options.add_argument("--no-sandbox")
    chrome_options.add_argument("--disable-dev-shm-usage")
    chrome_options.add_argument("--disable-gpu")
    chrome_options.add_argument("--window-size=1920,1080")
    chrome_options.add_argument("--disable-blink-features=AutomationControlled")
    chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
    chrome_options.add_experimental_option("useAutomationExtension", False)
    chrome_options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36")
    chrome_options.add_argument("accept-language=zh-CN,zh;q=0.9")
    chrome_options.add_argument("--disable-software-rasterizer")
    chrome_options.add_argument("--disable-logging")
    chrome_options.add_argument("--log-level=3")
    return chrome_options

def dynamic_scroll(driver, scroll_attempts=3):
    """执行动态滚动加载"""
    last_height = driver.execute_script("return document.body.scrollHeight")
    for _ in range(scroll_attempts):
        driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        time.sleep(1.5)
        new_height = driver.execute_script("return document.body.scrollHeight")
        if new_height == last_height:
            break
        last_height = new_height

def extract_links_from_soup(soup, base_url):
    """从BeautifulSoup对象中提取链接"""
    link_pattern = re.compile(r'/videos/([^/]+)/?$')
    seen_urls = set()
    
    for a in soup.select('section.video-list a[href*="/videos/"]'):
        if href := a.get('href'):
            if match := link_pattern.search(href):
                full_url = f"{base_url}/videos/{match.group(1)}/"
                seen_urls.add(full_url)
    
    if not seen_urls:
        for a in soup.find_all('a', href=link_pattern):
            if href := a.get('href'):
                if match := link_pattern.search(href):
                    full_url = f"{base_url}/videos/{match.group(1)}/"
                    seen_urls.add(full_url)
    
    validated_links = []
    for link in seen_urls:
        if re.search(r'/videos/(\d+|[a-zA-Z0-9_-]+)/$', link):
            validated_links.append(link)
    
    return validated_links

def movieLinks(url):
    """获取指定页面中的视频链接"""
    driver = None
    try:
        chrome_options = configure_chrome_options()
        driver = webdriver.Chrome(options=chrome_options)
        
        driver.execute_cdp_cmd('Network.setUserAgentOverride', {
            "userAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36"
        })
        driver.execute_cdp_cmd('Page.addScriptToEvaluateOnNewDocument', {
            'source': '''
                Object.defineProperty(navigator, 'webdriver', {
                    get: () => undefined
                })
            '''
        })
        
        driver.set_page_load_timeout(30)
        driver.implicitly_wait(5)

        print(f"正在访问目标URL: {url}")
        driver.get(url)
        
        parsed_uri = urlparse(url)
        base_url = f"{parsed_uri.scheme}://{parsed_uri.netloc}"

        try:
            WebDriverWait(driver, 15).until(
                EC.presence_of_element_located((By.CSS_SELECTOR, "section.video-list"))
            )
            WebDriverWait(driver, 10).until(
                lambda d: d.execute_script('return document.readyState') == 'complete'
            )
        except TimeoutException:
            print("警告：部分内容加载超时")

        page_source = driver.page_source
        driver.quit()
        driver = None

        soup = BeautifulSoup(page_source, 'html.parser')
        links = extract_links_from_soup(soup, base_url)

        if not links:
            raise ValueError("未提取到有效视频链接")
        
        print(f"成功获取视频URL数量: {len(links)}")
        return links

    except Exception as e:
        if driver:
            driver.quit()
        raise e

def main():
    # 设置命令行参数解析
    parser = argparse.ArgumentParser(description='jable.tv视频链接爬虫')
    parser.add_argument('--page', type=int, default=None,
                        help='指定起始页码（覆盖error.log）')
    args = parser.parse_args()

    # 初始化起始页码
    start_page = 1
    
    # 基础URL模板
    base_url = "https://jable.tv/categories/bdsm/{}/"

    
    # 处理用户指定的页码
    if args.page is not None:
        if args.page >= 1:
            start_page = args.page
            print(f"用户指定起始页码: {start_page}")
        else:
            print("错误：页码必须≥1，已重置为默认值1")
    else:
        # 检查错误恢复文件
        if os.path.exists('error.log'):
            try:
                with open('error.log', 'r') as f:
                    start_page = int(f.read().strip())
                    print(f"检测到错误日志，从页码 {start_page} 开始")
            except:
                pass
            os.remove('error.log')

    # 初始化去重集合
    seen_links = set()
    output_file = 'output.txt'
    if os.path.exists(output_file):
        with open(output_file, 'r') as f:
            seen_links = {line.strip() for line in f}


    
    current_page = start_page
    while True:
        max_retries = 50  # 最大重试次数
        retry_delay = 5  # 重试延迟(秒)
        success = False
        
        for retry in range(max_retries):
            try:
                url = base_url.format(current_page)
                print(f"\n正在处理页码: {current_page} (尝试 {retry+1}/{max_retries})")
                print("="*50)
                
                links = movieLinks(url)
                
                # 去重处理
                new_links = [link for link in links if link not in seen_links]
                if not new_links:
                    print("没有新增链接，跳过本页")
                    current_page += 1
                    success = True
                    break
                    
                # 写入文件
                with open(output_file, 'a') as f:
                    for link in new_links:
                        f.write(link + '\n')
                seen_links.update(new_links)
                print(f"新增 {len(new_links)} 个链接，已保存到 {output_file}")
                
                current_page += 1
                success = True
                break  # 成功则跳出重试循环
                
            except Exception as e:
                print(f"\n第 {retry+1} 次尝试失败: {str(e)}")
                if retry < max_retries - 1:
                    print(f"{retry_delay}秒后重试...")
                    time.sleep(retry_delay)
                else:
                    print(f"已达到最大重试次数({max_retries}次)")
                    with open('error.log', 'w') as f:
                        f.write(str(current_page))
                    print(f"错误页码 {current_page} 已保存到 error.log")
                    sys.exit(1)
        
        if not success:
            break

if __name__ == "__main__":
    main()