import requests,re,aiohttp,aiofiles,asyncio,os,time
from lxml import etree

#测试时尽量不要一次爬取太多页，没有加延迟和代理，速度嘎嘎快。
#我爬取太多ip异常了，具体现象就是页面返回内容时跳转中，如果和我一样情况，有能力的话可以尝试自行解决。后边就不更新这个了。



#   TODO 下载图片
async def aio_down(img_url,session,name):
    async with session.get(img_url) as cont :
        print(cont)
        async with aiofiles.open(f'img/{name}',mode='wb') as f:
            await f.write(await cont.content.read())

#   TODO 解析每一页中每张图片的链接
async def main(pagenum):
    tasks = []
    async with aiohttp.ClientSession() as session:
        for page in range(pagenum):  #获取页数
            if page == 1:
                url = 'https://pic.netbian.com/new/index.html'
            else :
                url = f'https://pic.netbian.com/new/index_{page}.html'
            async with session.get(url) as resp:
                print(await resp.text())
                tree = etree.HTML(await resp.text())
                li_list = tree.xpath('//*[@id="main"]/div[3]/ul/li')
                for li in li_list:
                    img_url ='https://pic.netbian.com'+''.join(li.xpath('./a/img/@src')).strip()
                    print(img_url)
                    name = img_url.split('/')[6]
                    d = asyncio.create_task(aio_down(img_url,session,name))
                    tasks.append(d)
        await asyncio.wait(tasks)

if __name__ == '__main__':

    
    '''爬取页数，测试时可以写死，建议写1-2'''
    pagenum = eval(input("请输入需要爬取的页数："))
    path = 'img'
    if os.path.exists(path) is False:
        os.mkdir(path)
    # starttime = time.time()
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    loop.run_until_complete(main(pagenum))
    # end = time.time()
    # print(f'下载完成，一共耗时：{end-start}秒')