import os
import time
import random
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse, parse_qs, urlencode, urlunparse
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

#处理VPN网络不佳导致图片资源检测失败的情况，程序将自动处理文本中的链接，自动循环获取图片并下载到对应标题文件夹
#例如：https://xchina.co/photoShow.php?server=1&id=642fd37201a00&pageSize=18&index=481

# 配置Selenium选项
def setup_driver():
    chrome_options = Options()
    chrome_options.add_argument("--headless")
    chrome_options.add_argument("--disable-gpu")
    chrome_options.add_argument("--no-sandbox")
    chrome_options.add_argument("--disable-dev-shm-usage")
    chrome_options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36")
    driver = webdriver.Chrome(options=chrome_options)
    return driver, WebDriverWait(driver, 20)  # 最长等待20秒

# 初始化全局变量
driver, wait = setup_driver()

def get_unique_filename(directory, filename):
    """生成唯一的文件名"""
    base, ext = os.path.splitext(filename)
    counter = 1
    while True:
        new_filename = f"{base}_{counter}{ext}" if counter != 1 else filename
        full_path = os.path.join(directory, new_filename)
        if not os.path.exists(full_path):
            return new_filename
        counter += 1

def download_image(img_url, save_dir):
    """下载单张图片"""
    try:
        if not img_url.startswith(('http://', 'https://')):
            img_url = urljoin("https://xchina.co/", img_url)
        
        filename = os.path.basename(img_url.split('?')[0])
        if not filename.lower().endswith(('.jpg', '.jpeg', '.png', '.gif')):
            filename += '.jpg'
        
        unique_filename = get_unique_filename(save_dir, filename)
        save_path = os.path.join(save_dir, unique_filename)
        
        if os.path.exists(save_path):
            print(f"图片已存在: {save_path}")
            return True, save_path
        
        print(f"正在下载: {img_url}")
        response = requests.get(img_url, headers={
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
            "Referer": "https://xchina.co/"
        }, stream=True, timeout=30)
        
        if response.status_code == 200:
            with open(save_path, 'wb') as f:
                for chunk in response.iter_content(8192):
                    if chunk:
                        f.write(chunk)
            print(f"保存成功: {save_path}")
            return True, save_path
        else:
            print(f"下载失败，状态码: {response.status_code}")
            return False, None
            
    except Exception as e:
        print(f"下载异常: {str(e)}")
        return False, None

def process_single_page(page_url, save_dir):
    """处理单个页面"""
    print(f"\n正在处理页面: {page_url}")
    
    try:
        # 访问页面
        driver.get(page_url)
        
        # 等待关键元素加载（根据实际页面调整）
        wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "div.container")))
        time.sleep(random.uniform(2, 5))  # 额外等待2-5秒
        
        # 获取页面内容
        html = driver.page_source
        soup = BeautifulSoup(html, 'html.parser')
        
        # 查找图片 - 多级回退选择器
        img_tags = soup.select('div.container img[src]') or \
                  soup.select('div.gallery img[src]') or \
                  soup.find_all('img', {'src': True})
        
        if not img_tags:
            print("警告：未找到任何图片标签")
            print("调试信息 - 页面内容前1000字符:")
            print(html[:1000])
            return False
        
        print(f"找到 {len(img_tags)} 张图片")
        
        # 下载所有图片
        success_count = 0
        for img in img_tags:
            img_url = img['src']
            alt_text = img.get('alt', '未命名').strip()[:50].replace('/', '_')
            
            # 创建分类目录
            img_dir = os.path.join(save_dir, alt_text)
            os.makedirs(img_dir, exist_ok=True)
            
            # 下载图片
            success, _ = download_image(img_url, img_dir)
            if success:
                success_count += 1
            
            # 随机延迟避免封禁
            time.sleep(random.uniform(0.5, 2))
        
        print(f"本页完成: {success_count}/{len(img_tags)}")
        return success_count > 0
        
    except Exception as e:
        print(f"页面处理异常: {str(e)}")
        return False

def process_gallery(original_url, save_dir):
    """处理整个图集"""
    print(f"\n开始处理图集: {original_url}")
    
    # 解析URL参数
    parsed = urlparse(original_url)
    params = parse_qs(parsed.query)
    
    if 'id' not in params:
        print("错误：缺少必需的id参数")
        return
    
    # 构建基础URL
    base_url = urlunparse(parsed._replace(query=urlencode([
        ('target', params.get('target', ['photo'])[0]),
        ('server', params.get('server', ['1'])[0]),
        ('id', params['id'][0]),
        ('pageSize', '18')
    ])))
    
    # 遍历所有页面
    index = 0
    while index < 2000:  # 最多爬取的图片限制
        page_url = f"{base_url}&index={index}"
        
        # 最多重试3次
        for retry in range(3):
            if process_single_page(page_url, save_dir):
                break  # 成功则继续下一页
                
            print(f"第 {retry+1} 次重试...")
            time.sleep(random.uniform(3, 5))
        else:
            print("连续3次失败，终止爬取")
            break
            
        index += 1
        time.sleep(random.uniform(0.5, 1.2))  # 页面间延迟

def main():
    # 创建保存目录
    save_dir = "xchina_downloads"
    os.makedirs(save_dir, exist_ok=True)
    
    # 读取链接文件或使用示例链接
    if os.path.exists("photo_page_link.txt"):
        with open("photo_page_link.txt", 'r', encoding='utf-8') as f:
            links = [line.strip() for line in f if line.strip()]
    #else:
        #links = ["https://xchina.co/photoShow.php?target=photo&server=2&id=68457129ef534&pageSize=18&index="]
    
    # 处理每个链接
    for link in links:
        print(f"\n{'='*50}")
        print(f"开始处理: {link}")
        print(f"{'='*50}")
        process_gallery(link, save_dir)
    
    # 关闭浏览器
    driver.quit()
    print("\n所有任务已完成！")

if __name__ == "__main__":
    main()
