import requests
from bs4 import BeautifulSoup
import time
#import re

# headers 用chrom浏览器——F12——切换到network 栏,刷新下要访问的目标网站，就能找到`User-Agent` 复制下来就好。用于伪装浏览器，便于爬虫稳定性
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 '
                 '(KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'}

def get_info(url):
    wb_data = requests.get(url, headers = headers) #get方法中加入请求头
    soup = BeautifulSoup(wb_data.text, 'html.parser') #对返回的结果进行解析
    # BeautifulSoup的select() 函数的使用见我的另一篇博文http://blog.csdn.net/la6nf/article/details/79338920
    # nth-child 在python中运行会报错，需改为 nth-of-type
    ranks = soup.select('#rankWrap > div.pc_temp_songlist > ul > li > span.pc_temp_num > strong')
    links = soup.select('#rankWrap > div.pc_temp_songlist > ul > li > a')
    times = soup.select('#rankWrap > div.pc_temp_songlist > ul > li > span.pc_temp_tips_r > span')
    #time2 = re.findall('<span class="pc_temp_time">(.*?)</span>', wb_data.text, re.S)

    # get_text()从html标签中提取文本，strip()去除字符串两端的空格
    for rank, link, time,  in zip(ranks, links, times):
        data = {
            'rank': rank.get_text().strip(),
            'singer': link.get_text().split('-')[0].strip(),
            'song': link.get_text().split('-')[1].strip(),
            'time': time.get_text().strip(),
            'link': link.get('href')
        }
        print (data)

if __name__ == '__main__':  # 相当于java中的main方法
    urls = ['http://www.kugou.com/yy/rank/home/{}-8888.html?from=rank'.format (num) for num in range(1,10)]
    for url in urls:
        get_info(url)
        #睡眠2秒
        time.sleep(2)
