import requests
import re
from bs4 import BeautifulSoup
import urllib.parse as urlparse

a_urls = []  # 待爬取
old_urls = []  # 已经爬取


def download(url):  # 将目录网站下载至内存，返回html源码
    if url is None:
        return None
    # 请求头
    user_agent = 'Mozilla/4.0(compatible;MSIE5.5;Windows NT)'
    headers = {'User-Agent': user_agent}
    r = requests.get(url, headers=headers)
    if r.status_code == 200:
        r.encoding = 'utf-8'
        return r.text
    return None


def get_new_urls(idx_url):  # 得到每个歌单的url
    # 解析目录页的html
    soup = BeautifulSoup(download(idx_url), 'html.parser')
    # 通过正则匹配到歌单的url，得到全部符合要求的url
    pattern = r'playlist\?id=(\d+?)'
    links = soup.find_all('a', href=re.compile(pattern))
    for link in links:
        new_url = link['href']
        # 拼接得到完整url
        new_full_url = urlparse.urljoin('https://music.163.com/', new_url)
        # 如果url没有出现过，则加入待爬取的url列表
        if new_full_url not in a_urls and new_full_url not in old_urls:
            a_urls.append(new_full_url)
