import requests
from bs4 import BeautifulSoup

url='https://www.baidu.com/'
#构造字典(请求头)
h={
   'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'
}
#get请求:url,包含User-Agent
r=requests.get(url,headers=h)

soup=BeautifulSoup(r.content.decode(),'lxml')

"""
1.查找标签
2.查找标签的文本内容
3.查找标签的属性值
4.查找某标签内的标签
"""

#1.查找标签:查找div标签，且该标签的类名为title-text c-font-medium c-color-t
#注意:属性 是字典结构(key:value)
r1=soup.find('div',{'class':'title-text c-font-medium c-color-t'})
print(r1)

#2.查找标签的文本内容：text string

r2=soup.find('div',{'class':'title-text c-font-medium c-color-t'}).string
print(r2)

# r2=soup.find('div',{'class':'title-text c-font-medium c-color-t'})
# print(r2.get_text())

#3.查找某标签的属性值
# 获取百度热榜的6条文本内容和详细url
# r3=soup.find_all('a',{'class':'title-content c-link c-font-medium c-line-clamp1'})
# for i in r3:
#     print(i.get_text())

r3=soup.find_all('li',{'class':'hotsearch-item'})
#将解析出的内容保存在列表中
list_data=[]
for i in r3:
    name=i.select('span')[1].string
    url=i.find('a')['href']
    #将文本内容和url 构造成字典
    data={'标题':name,'详情链接':url}
    #将字典数据保存到列表
    list_data.append(data)
#将列表写入文件
with open('data.txt','w',encoding='utf-8') as f:
     f.write(list_data.__str__())