# -*- coding: utf-8 -*-
# @Time : 2022/3/1 14:23
# @Author : Liuqing
# @File : kugouTop100.py
# @Software : PyCharm

# 爬取酷狗音乐内地榜前100
import requests
import time
import lxml
from bs4 import BeautifulSoup

headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                  "Chrome/98.0.4758.102 Safari/537.36",
    "cookie": "kg_mid=630606a263276125b47403d674123777; kg_dfid=1tyVuT1ePHta1rQWHP01dFoh; "
              "KuGooRandom=6651644920409534; kg_dfid_collect=d41d8cd98f00b204e9800998ecf8427e; "
              "Hm_lvt_aedee6983d4cfc62f509129360d6bb3d=1645924955,1645925158,1646041215,1646097192; "
              "ACK_SERVER_10016=%7B%22list%22%3A%5B%5B%22bjreg-user.kugou.com%22%5D%5D%7D; "
              "ACK_SERVER_10015=%7B%22list%22%3A%5B%5B%22bjlogin-user.kugou.com%22%5D%5D%7D; "
              "ACK_SERVER_10017=%7B%22list%22%3A%5B%5B%22bjverifycode.service.kugou.com%22%5D%5D%7D; "
              "kg_mid_temp=630606a263276125b47403d674123777; Hm_lpvt_aedee6983d4cfc62f509129360d6bb3d=1646115922 "
}


def get_info(url_link):
    web_data = requests.get(url_link, headers=headers)
    soup = BeautifulSoup(web_data.text, 'lxml')
    ranks = soup.select('#rankWrap > div.pc_temp_songlist > ul > li > span.pc_temp_num')
    titles = soup.select('#rankWrap > div.pc_temp_songlist > ul > li > a')
    times = soup.select('#rankWrap > div.pc_temp_songlist > ul > li > span.pc_temp_tips_r > span')
    try:
        for rank, title, length_of_time in zip(ranks, titles, times):
            data = {
                'rank': rank.get_text().strip(),
                'singer': title.get_text().split('-')[0],
                'song': title.get_text().split('-')[1],
                'time': length_of_time.get_text().strip()
            }
            print(data)
            print('                                                                      ')
    except IndexError as e:
        print(e)


if __name__ == '__main__':
    url_list = ['https://www.kugou.com/yy/rank/home/{}-31308.html?from=rank'.format(number) for number in range(1, 6)]
    for url_link in url_list:
        get_info(url_link)
        print('--------------------------这是一页的分割线-------------------------------')
        time.sleep(1)
