import os
import os.path
import time
import asyncio
import aiohttp
from bs4 import BeautifulSoup

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36"
}

def enter():
    '''创建并切换文件夹'''
    os.chdir(r"D:\code\python\crawl\one")
    dir_name = "pics"
    if not os.path.exists(dir_name):
        os.mkdir(dir_name)
    os.chdir(dir_name)
    

async def down():
    '''获取图片的url'''
    index = "http://m.wufazhuce.com/one/"
    max_page = 3061
    total = 100
    tasks = []
    async with aiohttp.ClientSession() as session:
        for page in range(max_page, max_page-total, -1):
            url = index + str(page)
            try:
                async with session.get(url, headers=headers) as res:
                    content = await res.read()
                    soup = BeautifulSoup(content, "lxml")
                    pic_url = soup.find("img", "item-picture-img").get("src")
                    #添加保存图片的任务
                    task = asyncio.create_task(save(page, session, pic_url))
                    tasks.append(task)

            except Exception as e:
                print(f"{url} 404 NOT FOUND !!!")

        for task in tasks:
            await task


async def save(page, session, pic_url):
    '''保存图片'''
    pic_name = f"{page}.jpg"
    try:
        async with session.get(pic_url, headers=headers) as res:
            with open(pic_name, "wb") as f:
                while chunk := await res.content.read(512):
                    f.write(chunk)
            print(f"{pic_name} Download OK (=^-ω-^=)")

    except Exception as e:
        print(e)


def main():
    enter()
    start = time.time()
    asyncio.run(down())
    print(f"Spend {time.time()-start:.1f}S")
    

if __name__ == "__main__":
    main()
