import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import os
import time
import urllib3

# 禁用 SSL 警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# 定义请求头，模拟浏览器
HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Accept-Language": "en-US,en;q=0.9",
    "Referer": "https://www.google.com/",
}

def get_unique_filename(directory, filename):
    """
    生成唯一的文件名，避免重复
    """
    base, ext = os.path.splitext(filename)
    counter = 1
    while True:
        new_filename = f"{base}_{counter}{ext}" if counter != 1 else f"{base}{ext}"
        full_path = os.path.join(directory, new_filename)
        if not os.path.exists(full_path):
            return new_filename
        counter += 1

def download_image(url, save_dir, max_retries=3):
    """
    下载图片并保存到指定目录
    """
    retries = 0
    while retries < max_retries:
        try:
            # 提取文件名
            filename = os.path.basename(url)
            unique_filename = get_unique_filename(save_dir, filename)
            save_path = os.path.join(save_dir, unique_filename)

            # 下载图片
            response = requests.get(url, headers=HEADERS, stream=True, timeout=30, verify=False)
            if response.status_code == 200:
                with open(save_path, 'wb') as f:
                    for chunk in response.iter_content(8192):
                        if chunk:
                            f.write(chunk)
                return True, save_path
            retries += 1
        except Exception as e:
            retries += 1
        time.sleep(0.1)
    return False, None

def find_images_in_container(soup, container_class, base_url):
    """
    从指定容器中查找所有图片链接
    """
    # 查找容器
    container = soup.find('div', class_=container_class)
    if not container:
        return None

    # 查找容器内的所有图片
    img_tags = container.find_all('img')
    if not img_tags:
        return None

    # 提取图片链接和 alt 属性
    img_data = []
    for img in img_tags:
        img_url = img.get('src')
        alt_text = img.get('alt', 'unknown').strip()  # 获取 alt 属性，默认为 'unknown'
        if img_url:
            img_data.append({
                "url": urljoin(base_url, img_url),
                "alt": alt_text,
            })
    return img_data

def process_link(base_url, save_dir):
    """
    处理单个链接，下载合集图片
    """
    # 从 0 开始尝试下载图片
    index = 0
    while index < 900:  # 假设总共有 500 张图片
        # 拼接页面链接
        page_url = f"{base_url}{index}"
        print(f"尝试访问页面：{page_url}")

        # 访问页面，增加重试机制
        max_retries = 5
        retries = 0
        while retries < max_retries:
            try:
                response = requests.get(page_url, headers=HEADERS, timeout=30, verify=False)
                if response.status_code == 200:
                    break
            except Exception as e:
                print(f"访问页面 {index} 失败，重试 {retries + 1}/{max_retries}: {e}")
                retries += 1
                time.sleep(0.1)
        if retries == max_retries:
            print(f"页面 {index} 访问失败，停止下载")
            break

        # 解析页面内容
        soup = BeautifulSoup(response.text, 'html.parser')

        # 查找容器内的图片
        img_data = find_images_in_container(soup, "container", page_url)
        if not img_data:
            print(f"页面 {index} 的容器内未找到图片，停止下载")
            break

        # 下载容器内的所有图片
        for img in img_data:
            img_url = img["url"]
            alt_text = img["alt"]

            # 创建合集文件夹
            collection_dir = os.path.join(save_dir, alt_text)
            os.makedirs(collection_dir, exist_ok=True)

            # 下载图片
            success, save_path = download_image(img_url, collection_dir)
            if success:
                print(f"下载成功：{save_path}")
            else:
                print(f"图片下载失败：{img_url}")

        index += 1

def main():
    # 定义保存目录
    save_dir = "pic_save"
    os.makedirs(save_dir, exist_ok=True)

    # 读取 pic_links.txt 文件
    links_file = "pic_links.txt"
    if not os.path.exists(links_file):
        print(f"错误：{links_file} 文件不存在！")
        return

    with open(links_file, 'r', encoding='utf-8') as file:
        links = file.readlines()

    # 处理每个链接
    for link in links:
        link = link.strip()
        if not link:
            continue  # 忽略空行

        print(f"\n处理链接：{link}")
        process_link(link, save_dir)

    print("所有链接处理完成！")

if __name__ == "__main__":
    main()
