import re
import time
import os
import requests
import pygame
import json
# 设置请求头
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36 Edg/121.0.0.0'
}

# 定义基础 URL
#重定向域名--需要修改
iurl = 'https://buondua.com'
iurl_base = iurl + '/tag/'
#搜索参数-名字
iurl_name = '%E7%96%AF%E7%8C%ABss-11613'  # 搜索的关键词，调整为需要的内容
url_2 = ''



url_base= iurl + url_2
print(url_base)
#搜索的页
ipage = ''
ipage1 = '?start=20'
ipage2 = '?start=40'
response = requests.get(iurl_base + iurl_name + ipage, headers=headers)
print(iurl_base + iurl_name +ipage)
ihtml = response.text

# 提取每个作品的 URL
idir_name = re.findall(r'<a class="item-link popunder" href="([^"]+)"', ihtml)
print(idir_name)

# 使用 set 去重
unique_urls = set(idir_name)

# 提取 https://www.4khd.com/content/ 后面的部分
#trimmed_urls = [url.replace('https://www.4khd.com/content/', '') for url in unique_urls]
#filtered_urls = [url for url in trimmed_urls if url != 'https://www.4khd.com']
filtered_urls = idir_name
print('nm',filtered_urls)
with open(f"{iurl_name}.json", "w") as json_file:
    json.dump(filtered_urls, json_file, indent=4)
#rint(trimmed_urls)

# 下载照片的函数
def download_photos(url_name):
    # 定义基础 URL

    # # url_name = '24/crazy-cat-ss-girls-wardrobe-one-piece-maid.html'  # 调整为你的具体 URL
    # url_name = input("请输入要下载的 URL 名称（例如 '24/crazy-cat-ss-girls-wardrobe-one-piece-maid.html'）：")
    page = ''  # 初始页码设置为空
    retry_count = 0
    max_retries = 5  # 最大重试次数
    # 发送请求获取 HTML 代码
    #response = requests.get(iurl + url_name + page, headers=headers)
    #html = response.text
    #print(response.text)

    while retry_count < max_retries:
        try:
            response = requests.get(iurl + url_name + page, headers=headers, timeout=10)  # 设置超时时间，避免卡住
            html = response.text
            response.raise_for_status()  # 检查请求是否成功，若不成功则抛出异常

            # 提取文件夹名称
            dir_name_1 = re.findall(r'<h1>(.*?)</h1>', html)[-1]
            dir_name = ''.join(re.findall(r'[\u4e00-\u9fff0-9]+', dir_name_1))
            print(f"Folder Name: {dir_name}")  # 文件夹名字

            # 提取照片数量
            photos_num = re.findall(r'-(\d+)-photos', html)
            photos_num = int(photos_num[0]) if photos_num else 0
            print(f"Total Photos: {photos_num}")  # 照片数量
            break  # 退出重试循环

        except requests.RequestException as e:
            print(f"下载失败: {e}. 正在重试 (第 {retry_count + 1} 次)...")
            retry_count += 1
            time.sleep(5)  # 等待5秒后重试

            if retry_count >= max_retries:
                max_retries = 0
                print(f"超过最大重试次数，跳过.")



    # 创建文件夹
    if not os.path.exists(dir_name):
        os.makedirs(dir_name)

    # 下载每一页的照片
    for page_num in range(1, (photos_num - 1) // 20 + 3):  # 计算需要下载的页数
        # 定义当前页面的 URL
        page_suffix = f"?page={page_num}" if page_num > 1 else ""
        print(page_suffix)
        print(iurl + url_name + page_suffix)
        # response = requests.get(url_name + page_suffix, headers=headers)

        while retry_count < max_retries:
            try:
                response = requests.get(iurl + url_name + page_suffix, headers=headers, timeout=10)  # 设置超时时间，避免卡住
                response.raise_for_status()  # 检查请求是否成功，若不成功则抛出异常

                # 提取照片 URL
                urls = re.findall(r'(?s)<p>.*?<img[^>]+src="(.*?)"[^>]*>.*?</p>', html)
                print(urls)
                print(f"Downloading URLs from page {page_num}, found {len(urls)} URLs.")

                break  # 退出重试循环

            except requests.RequestException as e:
                print(f"请求page失败: {e}. 正在重试 (第 {retry_count + 1} 次)...")
                retry_count += 1
                time.sleep(5)  # 等待5秒后重试

                if retry_count >= max_retries:
                    max_retries = 0
                    print(f"超过最大重试次数，跳过 {file_name}.")

        response.raise_for_status()
        html = response.text

        # 保存每一张照片
        for url in urls:
            time.sleep(0.3)  # 延迟以防被限制
            f_name = url.split('.')[-2]
            file_name = f_name[-5:] + '.png'
            file_path = os.path.join(dir_name, file_name)

            # 检查文件是否已经存在
            if os.path.exists(file_path):
                print(f"{file_name} 已经存在，跳过下载.")
                continue

            print('正在下载', file_name)

            # 初始化重试计数

            while retry_count < max_retries:
                try:
                    response = requests.get(url, headers=headers, timeout=10)  # 设置超时时间，避免卡住
                    response.raise_for_status()  # 检查请求是否成功，若不成功则抛出异常

                    with open(file_path, 'wb') as file:
                        file.write(response.content)

                    print(f"{file_name} 下载成功.")
                    break  # 退出重试循环

                except requests.RequestException as e:
                    print(f"下载失败: {e}. 正在重试 (第 {retry_count + 1} 次)...")
                    retry_count += 1
                    time.sleep(5)  # 等待5秒后重试

                    if retry_count >= max_retries:
                        max_retries = 0
                        print(f"超过最大重试次数，跳过 {file_name}.")

# 遍历每个 URL 名称并下载照片
for url_name in filtered_urls:
    print(f"开始下载 {url_name} 的照片...")
    download_photos(url_name)