import requests
from bs4 import BeautifulSoup
import urllib3
import re

# 禁用SSL证书验证警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# 配置请求头
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}

# 基础URL
base_url = 'https://xchina.store'

# 原始URL模板处理
original_url = "https://xchina.store/photos/album-8/2.html"
template_url = re.sub(r'\d+\.html$', '{}.html', original_url)

# 打开文件准备写入
with open('photo_page.txt', 'w', encoding='utf-8') as f_photo, \
     open('photo_page_link.txt', 'w', encoding='utf-8') as f_link:

    # 遍历页码1-200
    for page in range(1, 201):
        current_url = template_url.format(page)
        
        try:
            # 发送请求（禁用SSL验证）
            response = requests.get(current_url, headers=headers, verify=False, timeout=10)
            response.raise_for_status()  # 如果状态码非200则抛出异常
        except Exception as e:
            print(f"访问失败: {current_url}，错误信息: {str(e)}")
            exit(1)

        # 解析HTML内容
        soup = BeautifulSoup(response.text, 'html.parser')
        photo_items = soup.find_all('div', class_='item photo')

        # 遍历所有照片项
        for item in photo_items:
            a_tag = item.find('a')
            if not a_tag or not a_tag.get('href'):
                continue

            # 获取并处理href
            href = a_tag['href']
            full_url = f"{base_url}{href}" if href.startswith('/') else href
            f_photo.write(full_url + '\n')

            # 提取ID并生成新链接
            id_match = re.search(r'id-([a-f0-9]+)\.html$', href)
            if id_match:
                photo_show_url = f"{base_url}/photoShow.php?target=photo&server=1&id={id_match.group(1)}&pageSize=18"
                f_link.write(photo_show_url + '\n')

print("所有链接已成功提取并保存！")
