#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 24 13:42:35 2022

@author: cythnia
"""

##爬虫课
##1.爬虫工具包介绍
#-------#
#hotsearch-content-wrapper > li:nth-child(1) > a > span.title-content-title

#读取工具包
import requests #访问网页
from bs4 import BeautifulSoup #转换为好处理的数据格式
#请求头：模拟浏览器
headers={
    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36'
    }
url='https://www.baidu.com/'
#获取html
html=requests.get(url,headers=headers)
data=html.text
print(data)
#转换格式
soup=BeautifulSoup(data,'lxml')
names=soup.select('#hotsearch-content-wrapper > li:nth-child(3) > a > span.title-content-title')
for i in names:
    print(i.get_text())
 #---------------------------------------------------------------------------------#
#爬虫第二课
##百度今日热门事件数据爬取
#------#
#导入工具包
import requests
from bs4 import BeautifulSoup
#爬取链接
url='https://y.qq.com/n/ryqq/toplist/4'
#设置请求头
headers={
    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36'
    }
#爬取网页信息
html=requests.get(url,headers=headers)
html.encoding=html.apparent_encoding #统一编码规则不乱码
data=html.text
#用soup转换格式
soup=BeautifulSoup(data,'html.parser')
#获取数据
##1.热点名称
name=soup.select('#app > div > div.main > div.mod_toplist > div.topList_mod_songlist > ul.songlist__list > li > div > div.songlist__songname > span > a:nth-child(2)')

for i in redu:
    #print(i)
    #print('-----------------')
    print(i.get_text())

##2.热搜指数
redu=soup.select('#app > div > div.main > div.mod_toplist > div.topList_mod_songlist > ul.songlist__list > li > div > div.songlist__artist > a')

##3.超链接

lianjie=soup.select('#app > div > div.main > div.mod_toplist > div.topList_mod_songlist > ul.songlist__list > li > div > div.songlist__songname > span > a:nth-child(2)' )
   
   # for item in soup.select('#sanRoot > main > div.container.right-container_2EFJr > div > div > div > div.content_1YWBm > div.hot-desc_1m_jR.small_Uvkd3 > a'):
     #   detail_url = item.get('href')
      #  print(detail_url)
##########查找a标签，
#for a in soup.find_all('a', href=True):
 #  if a.get_text(strip=True):
   #    m=a['href'] ####### 获取href中的值】
    #   print(m)


#for a in soup.find_all('div', class_c='c-single-text-ellipsi',href=True):
  #  if a.get_text(strip=True):
   #  m=a['href']
   #  print(m)
#创建list
import pandas as pd 
import numpy as np
lis=[]
for i in zip(name,redu,lianjie):
        lis.append([i[0].get_text(),i[1].get_text(),i[2]['href']])
        
lis

#设置为二维数据
result=pd.DataFrame(lis,
                    columns=['歌曲名称','歌手','歌曲地址'],
                    )
result.index=result.index+1
result['歌曲地址']='https://y.qq.com'+result['歌曲地址']
result

result.to_excel('/Users/cythnia/Desktop/qq音乐热歌排行榜.xlsx')
