import requests
from bs4 import BeautifulSoup
from time import sleep
import os
from win32com.client import Dispatch

Headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
}


def get_url(url):
    wd_data = requests.get(url, headers=Headers)
    soup = BeautifulSoup(wd_data.text, 'html.parser')
    return soup


def get_mp3(url):
    soup = get_url(url)
    audios = soup.select('audio')
    mp3 = audios[0]['src']
    # mp3 = ""
    # for audio in audios:
    #     mp3 = mp3 + str(audio['src'])

    print("mp3",mp3)
    return mp3


def get_links(url):
    soup1 = get_url(url)
    # 获取链接
    links = soup1.select('a')
    audios = []
    titles = []
    for alink in links:
        if 'href' in alink.attrs:
            url2 = alink['href']
            title = alink.get_text()
            if (url2[:7] == '/story/'):
                url22 = 'https://paper.i21st.cn' + url2
                audios.append(get_mp3(url22))
                titles.append(title)


    o = Dispatch("ThunderAgent.Agent64.1")
    for onemp3, onetitle in zip(audios, titles):
        mp3name = os.path.basename(os.path.dirname(onemp3)) + "_" + onetitle + ".mp3"
        print(str(onemp3) + "  " + str(mp3name))
        o.AddTask(str(onemp3), str(mp3name))
    o.CommitTasks()


if __name__ == '__main__':
    urls = ['https://paper.i21st.cn/audio_21je2_{}.html'.format(number) for number in range(1, 2)]
    for url in urls:
        get_links(url)
        sleep(2)
