import requests
import time
from lxml import etree

# 目前bug，当最后一页为35条数据时，会无限循环，无法退出

headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
    "content-type": "application/x-www-form-urlencoded",
}
url = "https://music.163.com/discover/playlist/"
params = {
    "order": "hot",  # 按照热度排序
    "cat": "全部",  # category 类别
    "limit": "35",
    "offset": "0",
}

# 获取所有音乐类型

response = requests.get(url, params=params, headers=headers)
# 从服务器获取的原始响应数据转换为可读的文本格式
html_str = response.content.decode()
# 把 HTML 字符串解析成树形结构，以便进行后续的处理
root = etree.HTML(html_str)
music_list = root.xpath("//div[@class='bd']/dl[@class='f-cb']/dd")
# 存储的所有类型
result_type_list = []
for music in music_list:
    music_type = music.xpath("./a/text()")
    result_type_list.extend(music_type)
print(result_type_list)

# 定义一个列表，用来存储跳过的页面
error_list = []

# 总数
sum = 0

# 遍历类型列表
for data in result_type_list:
    page = 0
    # 计数
    count = 0
    while True:
        # 重试次数
        refresh_count = 0

        while True:  # 单页重试逻辑
            try:
                # 更新页码
                params["offset"] = str(page * 35)
                params["cat"] = data
                response = requests.get(url, params=params, headers=headers)
                # 从服务器获取的原始响应数据转换为可读的文本格式
                html_str = response.content.decode()
                # 把 HTML 字符串解析成树形结构，以便进行后续的处理
                root = etree.HTML(html_str)
                music_list = root.xpath("//div[@class='g-wrap p-pl f-pr']/ul/li")
                if not music_list:
                    raise ValueError("该页为空")
                print(f"当前类型是:{params['cat']},开始爬取第{page + 1}页")
                for music in music_list:
                    count += 1
                    music_title = "".join(music.xpath(".//p[@class='dec']/a/text()"))
                    music_url = "https://music.163.com/#" + "".join(music.xpath(".//p[@class='dec']/a/@href"))
                    music_author = "".join(music.xpath(".//a[@class='nm nm-icn f-thide s-fc3']/text()"))
                    music_plays = "".join(music.xpath(".//div[@class='bottom']/span[@class='nb']/text()"))
                    print(f"歌单名:{music_title},作者:{music_author},播放量:{music_plays},封面链接:{music_url}")
                break
            except:
                refresh_count += 1
                print(f"重试{refresh_count}")
                if refresh_count > 5:
                    print(f"跳出{page + 1}页循环，进行下一页循环")
                    # 出错误的页面添加到列表中
                    error_list.append(page)
                    break
        # 页面加1
        page += 1
        print(f"__________第{page}页结束,本页长度为{len(music_list)}__________")
        print(f"当前类型{params['cat']}目前共{count}条数据")
        sum += len(music_list)
        print(f"总共{sum}条数据")
        # while循环结束条件
        if len(music_list) < int(params["limit"]):
            print(f"类型:{params['cat']}已经全部取完")
            print(f"出问题的页面:{error_list}")
            print(f"共爬取{sum}条数据")
            break

        time.sleep(2)
