import urllib.request
import urllib.parse
import requests
from lxml import etree
import re
import threading
from pprint import pprint
import time


# 获取数据
def get_response(html_url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:101.0) Gecko/20100101 Firefox/101.0'
    }
    response = requests.get(url=html_url, headers=headers)
    return response


# 获取音乐列表的榜单的数据
def get_music_list(html_url):
    # 获取榜单的url地址
    response = get_response(html_url)
    content = etree.HTML(response.text)
    url_list = content.xpath('//*/div[@class="pc_temp_side"]//li/a/@href')
    name_list = content.xpath('//*/div[@class="pc_temp_side"]//li/a/@title')
    # print(name_list, url_list)
    music_list = zip(name_list, url_list)
    return music_list


def get_music_id(url):
    '''获取榜单的hash id'''
    response = get_response(url)
    # print(response.text)
    Hash_list = re.findall('"Hash":"(.*?)"', response.text)
    album_list = re.findall('"album_id":(.*?),', response.text)
    # 将我们的数据进行打包处理
    music_id_list = zip(Hash_list, album_list)
    # print(music_id)
    return music_id_list


def get_music_info(Hash, album_id):
    link_url = f'https://wwwapi.kugou.com/yy/index.php?r=play/getdata&hash={Hash}&dfid=1bn7gr0nKIVe0tJ9QV1GXIVi&appid=1014&mid=26f6eb25aa4d391db0b30ce6421edc60&platid=4&album_id={album_id}&_=1655558577938'
    res = get_response(link_url)
    title = res.json()['data']['audio_name']
    play_url = res.json()['data']['play_url']
    music_info = [title, play_url]
    return music_info


# 保存数据
def save(title, play_url):
    """保存数据"""
    music_content = get_response(html_url=play_url).content
    with open('D:\\deleteAnyTime\\爬虫获取的数据\\酷狗music\\' + title + '.mp3', mode='wb') as f:
        f.write(music_content)
        print(title, '保存成功')


def multi_thread(items):
    print('进程开始')
    threads = []
    for item in items:
        threads.append(
            threading.Thread(target=save, args=(item['title'], item['url']))
        )

    for thread in threads:
        thread.start()

    for thread in threads:
        thread.join()

    print('进程结束')


def single_thread(items):
    for item in items:
        save(item['title'], item['url'])


# 定义业务逻辑
def main(url):
    datas = []
    data = {}
    list_url = get_music_list(html_url=url)
    for list_name, link in list_url:
        print(f'正在爬取{list_name}')
        music_id_list = get_music_id(url=link)
        for Hash, music_id in music_id_list:
            music_info = get_music_info(Hash, music_id)
            data['title'] = music_info[0]
            data['url'] = music_info[1]
            datas.append(data)
            data = {}
        print(datas)
        start_time = time.time()
        # multi_thread(datas)  # 可选择进程 8s 22首歌
        single_thread(datas)  # 17s 22 首歌
        end_time = time.time()
        print('花费时间是', end_time - start_time)
        datas = []


#
if __name__ == '__main__':
    url = 'https://www.kugou.com/yy/rank/home/1-52144.html?from=rank'
    main(url=url)
