import asyncio
import aiohttp
import aiofiles
import requests
from tqdm import tqdm
from lxml import etree
from threading import Thread

#定义VoaCrawler类       
class VoaCrawler:
    def __init__(self, urls):
        self.urls = urls
    #下载
    async def download(self, url, session):
        async with session.get("https://www.51voa.com"+url) as resp:#不知道为什么https://www.51voa.com列表里面遍历时会消失
            fname="C:/Users/User/Desktop/code/pycode/lab13/MP3/"+url.split('/')[-1]#指定文件夹
            async with aiofiles.open(fname+".mp3", mode='wb') as f:
                await f.write(await resp.read())
            print(f"{url} downloaded")
    
    async def crawl(self):
        async with aiohttp.ClientSession() as session:
            tasks = []
            for url in self.urls:
                tasks.append(asyncio.create_task(self.download(url, session)))
            await asyncio.gather(*tasks)

if __name__ == '__main__':
    print("请输入开始页与停止页：",end="")
    start,end=map(int,input().split())
    links = []
    for i in tqdm(range(start, end)):
        url = f'https://www.51voa.com/VOA_Standard_{i}.html'#翻页
        response = requests.get(url)
        html = etree.HTML(response.text)
        for j in range(0,50):
            links = links+html.xpath('//*[@id="righter"]/div[3]/ul/li[%s]/a/@href'%str(j))
        #print(links)
    crawler = VoaCrawler(links)
    asyncio.run(crawler.crawl())
