import asyncio
import aiohttp
import aiofiles
from lxml import etree
from urllib.parse import urljoin

asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
urls = ['https://www.toopic.cn/dnbz/']
urls1 = []
proxies = {
    'http': 'http://103.61.144.206:80',

}
# 请求头设置
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
    'referer': ''
}


# 网页处理
async def deal_html(html, query_param, query_param1=None):
    tree = etree.HTML(html)
    if query_param1:
        print('正在添加')
        print(tree.xpath(query_param1)[-1].xpath('@href'))
        print(tree.xpath(query_param1)[-1].xpath('text()'))
        if tree.xpath(query_param1)[-1].xpath('@href'):
            print('正在添加1')
            tasks.append(asyncio.create_task(
                page_url(
                    urljoin(
                        urls[0], tree.xpath(query_param1)[-1].xpath('@href')[0]),
                    page='//ul[@class="pagination"]/a')))
            print('添加成功', urls[-1])
    # 提取图片二次网页链接
    pic_url_list = tree.xpath(query_param)

    # 创建图片链接列表
    url_list = []
    for pic_url in pic_url_list:
        # 拼接完整链接
        url_list.append(urljoin(urls[0], pic_url))
    return url_list


# 网页获取
async def get_html(url, pic_done=False):
    print(f'正在获取：{url}')
    async with aiohttp.ClientSession() as session:
        async with session.get(url, headers=headers, timeout=10) as response:
            if response.status == 200:
                if pic_done:
                    return await response.read()
                else:
                    return await response.text()
            else:
                print(f"Failed to retrieve {url}: {response.status}")
                return 403


# 下载图片
abc = 0


async def detail_url_download(url):
    global abc
    resps = await get_html(url, True)
    name = url.split('/')[-1]
    print(f'正在下载：{name}')
    async with aiofiles.open(f'{name}', 'wb') as f:
        await f.write(resps)
    print(f'下载完成：{name}')
    abc += 1
    await asyncio.sleep(2)


async def page_url(url, detail=False, paramas='//a[@class="pic"]/@href', page=None):
    resp = await get_html(url)
    print('diyi')
    pic_lists = await deal_html(resp, paramas, page)
    for pic_urls in pic_lists:
        if detail:
            headers['referer'] = url
            await detail_url_download(pic_urls)
        else:
            await page_url(pic_urls, True, '/html/body/div[4]/div[1]/div/div[1]/table/tr/td/img/@src')


tasks = []


async def main():
    tasks.append(asyncio.create_task(page_url(urls[0], page='//ul[@class="pagination"]/a')))
    await asyncio.gather(*tasks)


if __name__ == '__main__':
    asyncio.run(main())
    print(urls)
    print('下载完成', abc)
