import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import time

def download_large_images(url, save_path, index:str):

    if os.path.exists(save_path):
        print(f'{save_path} already exists, we will not download {url}.')
        return

    # 创建保存目录，如果不存在的话
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    
    # 获取网页内容
    response = requests.get(url)
    response.raise_for_status()  # 如果请求失败，则抛出异常
    
    # 解析HTML文档
    soup = BeautifulSoup(response.text, 'html.parser')
    
    # 查找所有的图片标签
    img_tags = soup.find_all('img')
    
    # 图片计数器
    count = 0
    
    for img in img_tags:
        # 获取图片的src属性
        src = img.get('src')
        if not src:
            continue
        
        # 解析图片URL，确保它是完整的
        img_url = urljoin(url, src)
        if index not in img_url:
            continue

        try:
            # 请求图片资源
            img_response = requests.get(img_url, stream=True)
            img_response.raise_for_status()
            time.sleep(0.1)
            
            # 检查图片大小是否超过1MB
            if int(img_response.headers.get('content-length', 0)) > 1 * 1024:
                # 提取文件名
                img_name = os.path.basename(urlparse(img_url).path)
                img_path = os.path.join(save_path, img_name)
                print(f'Downloading {img_name}')
                # 下载并保存图片
                with open(img_path, 'wb') as f:
                    for chunk in img_response.iter_content(chunk_size=8192):
                        f.write(chunk)
                
                count += 1
        except requests.RequestException as e:
            print(f"Failed to download {img_url}: {e}")
    
    return count

# https://bestgirlsexy.com/page/3/?s=tang+an+qi
# https://bestgirlsexy.com/xiuren%e7%a7%80%e4%ba%ba%e7%bd%91-no-7585-tang-an-qi/
if __name__ == '__main__':
    base_url = 'https://bestgirlsexy.com/xiuren秀人网-no'
    post_url = 'tang-an-qi'
    # index_to_download = [8862, 9004, 8972, 8935, 8824, 8786, 8748]
    # index_to_download = [8683, 8714, 8644, 8607, 8571, 8531, 8942, 8485]
    # index_to_download = [8492, 8485, 8433, 8396, 8357, 8326]
    # index_to_download = [8287, 8250, 8212, 8174, 8142, 8110, 8065, 8034]
    # index_to_download = [8001, 7964, 7928, 7899, 7864, 7825, 7794, 7760]
    # index_to_download = [7726, 7689, 7655]
    index_to_download = [7621, 7585, 7578]
    #015

    # total_url = 'https://bestgirlsexy.com/tang-an-qi-%e5%94%90%e5%ae%89%e7%90%aa-vol-0016-%e5%86%85%e8%b4%ad%e6%97%a0%e6%b0%b4%e5%8d%b0-%e7%bb%b4%e4%bf%ae%e5%86%b0%e7%ae%b1/'
    # download_large_images(total_url, f'./images/tang-an-qi', '0016')

    for index in index_to_download:
        url = f'{base_url}-{index}-{post_url}'
        save_path = f'./images/{post_url}/{index}'
        download_large_images(url, save_path, f'{index}')
