import asyncio
import os
import time

import aiohttp
from bs4 import BeautifulSoup
from fake_useragent import UserAgent

headers = {"User-Agent": UserAgent().random}
"""
使用协程进行优化和封装的代码
"""


async def download_image(session, image_url):
    async with session.get(image_url, headers=headers) as response:
        if response.status == 200:  # 检查响应状态码是否为200
            filename = image_url.split("/")[-1]  # 提取文件名
            file_path = os.path.join(f"./img/{filename}")  # 构建文件保存路径
            with open(file_path, 'wb') as f:  # 打开文件，以二进制写入模式写入内容
                f.write(await response.read())  # 将响应内容写入文件
            print(f"图片 {filename} 下载成功！")
        else:
            print(f"图片 {image_url} 下载失败！")


# 异步抓取和下载图片
async def fetch_and_download_images(url):
    async with aiohttp.ClientSession() as session:  # 创建异步的HTTP会话
        async with session.get(url, headers=headers) as response:  # 发起异步的GET请求
            html = await response.text()  # 获取响应的HTML内容
            soup = BeautifulSoup(html, 'html.parser')  # 使用BeautifulSoup解析HTML
            images = soup.find_all('img')  # 查找所有的<img>标签
            data_original_values = [img['data-original'] for img in images if
                                    'data-original' in img.attrs]  # 提取所有带有"data-original"属性的值

            tasks = []
            for image_url in data_original_values:
                tasks.append(download_image(session, image_url))  # 创建下载图片的任务列表

            await asyncio.gather(*tasks)  # 并发执行所有的下载任务


async def main():
    start_time = time.time()  # 记录开始时间

    tasks = []
    for page in range(1, 3):  # 遍历页数范围
        url = f"https://www.pkdoutu.com/article/list/?page={page}"  # 构建每页的URL
        tasks.append(fetch_and_download_images(url))  # 创建抓取和下载图片的任务

    await asyncio.gather(*tasks)  # 并发执行所有的任务

    end_time = time.time()  # 记录结束时间
    download_time = end_time - start_time  # 计算下载所花费的时间
    print(f"下载完成！总共耗时：{download_time:.2f} 秒")  # 打印下载完成的消息和耗时


if __name__ == '__main__':
    # 运行主程序
    asyncio.run(main())
