import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse, parse_qs, urlencode, urlunparse
import os
import time
import urllib3
import re
import uuid
from PIL import Image

#修复删除已下载，创建新同名数字后缀文件夹

# 禁用 SSL 警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# 定义请求头，模拟浏览器
HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Accept-Language": "en-US,en;q=0.9",
    "Referer": "https://www.google.com/",
}

# Windows非法字符
ILLEGAL_CHARS = r'[<>:"/\\|?*\x00-\x1f]'

def sanitize_filename(filename):
    """移除Windows文件名中的非法字符"""
    return re.sub(ILLEGAL_CHARS, '', filename)

def get_unique_filename(directory, filename):
    """生成唯一的文件名，避免重复"""
    # 先清理非法字符
    sanitized = sanitize_filename(filename)
    base, ext = os.path.splitext(sanitized)
    counter = 1
    while True:
        new_filename = f"{base}_{counter}{ext}" if counter != 1 else f"{base}{ext}"
        full_path = os.path.join(directory, new_filename)
        if not os.path.exists(full_path):
            return new_filename
        counter += 1

def download_image(url, save_dir, max_retries=3):
    """下载图片并保存到指定目录，失败时清理残留文件"""
    original_filename = os.path.basename(urlparse(url).path)
    base, ext = os.path.splitext(original_filename)
    
    for retry in range(max_retries):
        temp_filename = None
        try:
            # 生成临时文件名
            temp_filename = f"temp_{uuid.uuid4()}{ext}"
            temp_path = os.path.join(save_dir, temp_filename)
            
            # 下载图片到临时文件
            response = requests.get(url, headers=HEADERS, stream=True, timeout=30, verify=False)
            if response.status_code != 200:
                continue  # 跳过无效响应
            
            # 写入临时文件
            with open(temp_path, 'wb') as f:
                for chunk in response.iter_content(8192):
                    if chunk:
                        f.write(chunk)
            
            # 验证图片完整性
            try:
                img = Image.open(temp_path)
                img.verify()  # 校验图片是否损坏
                img.close()   # 关闭图片
            except Exception as e:
                print(f"图片验证失败：{e}")
                os.remove(temp_path)
                continue  # 校验失败，重试
            
            # 确定最终文件名并重命名
            final_filename = get_unique_filename(save_dir, original_filename)
            final_path = os.path.join(save_dir, final_filename)
            
            # 重命名临时文件为最终文件名
            os.rename(temp_path, final_path)
            return True, final_path
        except Exception as e:
            print(f"下载失败：{str(e)}")
            # 删除临时文件（如果存在）
            if temp_filename and os.path.exists(os.path.join(save_dir, temp_filename)):
                os.remove(os.path.join(save_dir, temp_filename))
            time.sleep(0.1)
    
    return False, None

def find_images_in_container(soup, container_class, base_url):
    """从指定容器中查找所有图片链接"""
    container = soup.find('div', class_=container_class)
    if not container:
        return None

    img_tags = container.find_all('img')
    if not img_tags:
        return None

    img_data = []
    for img in img_tags:
        img_url = img.get('src')
        alt_text = img.get('alt', 'unknown').strip()
        if img_url:
            img_data.append({
                "url": urljoin(base_url, img_url),
                "alt": alt_text,
            })
    return img_data

def process_link(original_url, save_dir):
    """处理单个链接，重构URL并下载图片"""
    parsed = urlparse(original_url)
    params = parse_qs(parsed.query)
    
    if 'id' not in params:
        print(f"错误：URL {original_url} 缺少必需的id参数")
        return
    
    params.setdefault('target', ['photo'])
    params.setdefault('server', ['1'])
    params['pageSize'] = ['18']
    
    ordered_params = [
        ('target', params['target'][0]),
        ('server', params['server'][0]),
        ('id', params['id'][0]),
        ('pageSize', '18')
    ]
    
    new_query = urlencode(ordered_params)
    base_url = urlunparse(parsed._replace(query=new_query))

    alt_dirs = {}  # 记录已处理的alt对应的目录

    index = 0
    while index < 6000:
        page_url = f"{base_url}&index={index}"
        print(f"尝试访问页面：{page_url}")

        max_retries, retries = 5, 0
        while retries < max_retries:
            try:
                response = requests.get(page_url, headers=HEADERS, timeout=30, verify=False)
                if response.status_code == 200:
                    break
            except Exception as e:
                print(f"访问失败，重试 {retries + 1}/{max_retries}: {e}")
                retries += 1
                time.sleep(0.1)
        if retries == max_retries:
            print(f"页面 {index} 无法访问，终止下载")
            break

        soup = BeautifulSoup(response.text, 'html.parser')
        img_data = find_images_in_container(soup, "container", page_url)
        if not img_data:
            print(f"页面 {index} 未找到图片，终止下载")
            break

        for img in img_data:
            alt = img["alt"]
            # 清理alt中的非法字符
            clean_alt = sanitize_filename(alt)
            # 动态生成唯一目录
            if alt not in alt_dirs:
                base_collection = os.path.join(save_dir, clean_alt)
                collection_dir = base_collection
                counter = 1
                while os.path.exists(collection_dir):
                    collection_dir = f"{base_collection}_{counter}"
                    counter += 1
                os.makedirs(collection_dir, exist_ok=True)
                alt_dirs[alt] = collection_dir
            else:
                collection_dir = alt_dirs[alt]

            success, save_path = download_image(img["url"], collection_dir)
            if success:
                print(f"下载成功：{save_path}")
            else:
                print(f"下载失败：{img['url']}")

        index += 1

def main():
    save_dir = "pic_save"
    os.makedirs(save_dir, exist_ok=True)

    if not os.path.exists("photo_page_link.txt"):
        print("错误：photo_page_link.txt 文件不存在！")
        return

    with open("photo_page_link.txt", 'r', encoding='utf-8') as file:
        links = [link.strip() for link in file.readlines() if link.strip()]

    for link in links:
        print(f"\n正在处理链接：{link}")
        process_link(link, save_dir)

    print("所有任务已完成！")

if __name__ == "__main__":
    main()
