# import requests
# from lxml import etree
#
# url = "http://www.baidu.com"
# head = {'User-Agent':
# 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36'}
# r = requests.get(url=url, headers=head)
# r.encoding = r.apparent_encoding
#
# neir = r.text
#
# html_tree = etree.HTML(neir)
# doc = html_tree.xpath('//input[@id="su"]/@value')[0]
# print(doc)



import requests
from lxml import etree
import csv
import pandas
def get_html(url):
    head = {'User-Agent':
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36'}
    try:
        r = requests.get(url=url,headers=head)
        r.encoding = r.apparent_encoding
        r.raise_for_status()
        return r.text
    except Exception as e:
        print(e)
def parser(heml):
    doc = etree.HTML(heml)
    title = doc.xpath('//div[@id="rankWrap"]/div[2]/ul/li/a/@title')
    href = doc.xpath('//div[@id="rankWrap"]/div[2]/ul/li/a/@href')
    out_dict = {"歌曲":title,"地址":href}
    return out_dict
def save_csv(path,content):
    df = pandas.DataFrame(content)
    df.to_csv(path)

if __name__ == '__main__':
    url = "http://www.bspider.top/kugou/"
    luanma = get_html(url)
    shuju = parser(luanma)
    dizhi ='d:\\kugou.csv'
    save_csv(dizhi,shuju)