import os
import re
import asyncio
import aiohttp

# 创建 Pic 文件夹
pic_folder = 'Pic'
if not os.path.exists(pic_folder):
    os.makedirs(pic_folder)

# 设置请求头
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}

# 用于记录已经下载过的图片链接
downloaded_images = set()

async def download_image(session, match):
    if match in downloaded_images:
        return
    try:
        pic_name = match.split('/')[-1]
        print(pic_name)
        async with session.get(match, headers=headers) as response:
            response.raise_for_status()
            file_name = os.path.join(pic_folder, pic_name)
            with open(file_name, 'wb') as f:
                while True:
                    chunk = await response.content.read(1024)
                    if not chunk:
                        break
                    f.write(chunk)
            downloaded_images.add(match)
    except Exception as e:
        print(f"下载图片 {match} 时发生错误: {e}")

async def fetch_page(session, url):
    try:
        async with session.get(url, headers=headers) as response:
            response.raise_for_status()
            html = await response.text()
            pattern = r'<img\s+src="([^"]+)"'
            matches = re.findall(pattern, html)
            tasks = [download_image(session, match) for match in matches]
            await asyncio.gather(*tasks)
    except Exception as e:
        print(f"请求页面 {url} 时发生错误: {e}")

async def main():
    async with aiohttp.ClientSession() as session:
        tasks = [fetch_page(session, f'***************************************{i}') for i in range(9978, 11781)]
        await asyncio.gather(*tasks)

if __name__ == "__main__":
    asyncio.run(main())
    
    
