import os
import time
import random
import re
import requests
from bs4 import BeautifulSoup
import urllib3
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

#处理网络不良的情况，确保资源加载到位，sample_url替换为xchina中套图的大主题第一页直链，
#程序将自动爬取所有套图的图片id直链，配合downloader脚本进行下载

# 禁用SSL警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# 配置Selenium（建议使用）
def setup_selenium():
    chrome_options = Options()
    chrome_options.add_argument("--headless")
    chrome_options.add_argument("--disable-gpu")
    chrome_options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36")
    driver = webdriver.Chrome(options=chrome_options)
    wait = WebDriverWait(driver, 15)
    return driver, wait

# 基础配置
BASE_URL = 'https://xchina.store'
HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    'Referer': BASE_URL
}
PROXIES = {
    'http': 'http://your_proxy:port',  # 替换为实际代理
    'https': 'http://your_proxy:port'
}

def safe_request(url, max_retries=3, use_selenium=False):
    """安全的请求函数"""
    for attempt in range(max_retries):
        try:
            if use_selenium:
                driver.get(url)
                time.sleep(random.uniform(2, 5))
                return driver.page_source
            else:
                response = requests.get(
                    url,
                    headers=HEADERS,
                    proxies=PROXIES,
                    verify=False,
                    timeout=15
                )
                response.raise_for_status()
                return response.text
        except Exception as e:
            print(f"请求失败 (尝试 {attempt+1}/{max_retries}): {str(e)}")
            time.sleep(random.uniform(3, 8))
    return None

def process_page(original_url, f_photo, f_link):
    """处理单个页面"""
    template_url = re.sub(r'\d+\.html$', '{}.html', original_url)
    
    for page in range(1, 201):
        current_url = template_url.format(page)
        print(f"正在处理: {current_url}")
        
        html = safe_request(current_url, use_selenium=True)
        if not html:
            print(f"跳过无法访问的页面: {current_url}")
            continue
            
        soup = BeautifulSoup(html, 'html.parser')
        photo_items = soup.find_all('div', class_='item photo') or \
                     soup.find_all('div', class_=re.compile(r'photo-item'))

        for item in photo_items:
            a_tag = item.find('a')
            if not a_tag or not a_tag.get('href'):
                continue

            href = a_tag['href']
            full_url = f"{BASE_URL}{href}" if href.startswith('/') else href
            f_photo.write(full_url + '\n')

            id_match = re.search(r'id-([a-f0-9]+)\.html$', href)
            if id_match:
                photo_show_url = f"{BASE_URL}/photoShow.php?target=photo&server=1&id={id_match.group(1)}&pageSize=18"
                f_link.write(photo_show_url + '\n')
        
        time.sleep(random.uniform(1, 3))

def main():
    # 初始化Selenium
    global driver
    driver, wait = setup_selenium()
    
    try:
        with open('photo_page.txt', 'w', encoding='utf-8') as f_photo, \
             open('photo_page_link.txt', 'w', encoding='utf-8') as f_link:
            
            # 示例URL（可替换为从文件读取）
            sample_url = "https://xchina.co/photos/album-3/4.html"
            process_page(sample_url, f_photo, f_link)
            
    finally:
        driver.quit()
        print("所有链接已成功提取并保存！")

if __name__ == "__main__":
    main()
