'''
爬虫实例
'''

# 引用库
import requests,re
from bs4 import  BeautifulSoup
import traceback

# 模拟浏览器代理
headers = {
    # 控制data类型 json类型字符串
    'Cookie': 'clientid=3;did=web_83d5b11d08e996c017936d1b9b',
    'accept': '*/*',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
    'Connection': 'keep-alive',
    'content-type': 'application/json',
    'sec-ch-ua': '" Not;A Brand";v="99", "Microsoft Edge";v="103", "Chromium";v="103"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': "Windows",
    'Sec-Fetch-Dest': 'empty',
    'Sec-Fetch-Mode': 'cors',
    'Sec-Fetch-Site': 'same-origin',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.114 Safari/537.36 Edg/103.0.1264.62',
}

 
# 赋值给url
url = 'https://blog.csdn.net/baidu_17201253/article/details/118699532'
url='https://www.runoob.com/w3cnote/python-spider-intro.html'
 
# 将获取到的数据存到response变量中
response = requests.get(url, headers=headers,json='请求包含数据')
#返回json时
# json_data = response.json()
 
soup = BeautifulSoup(response.text,'html.parser')
pagetitle = soup.find("title")
print("titile：",pagetitle.get_text())

#获取所有的链接
links = soup.find_all('a')
print("所有的链接")
for link in links:
    try:
        print(link.name,link['href'],link.get_text())
    except BaseException as exception:
        print("异常",exception)
        traceback.print_exc()
 
print("获取特定的URL地址")
link_node = soup.find('a',href="http://example.com/elsie")
if link_node:
    print(link_node.name,link_node['href'],link_node['class'],link_node.get_text())
 
print("正则表达式匹配")
link_node = soup.find('a',href=re.compile(r"ti"))
if link_node:
    try:
        print(link_node.name,link_node['href'],link_node['class'],link_node.get_text())
    except BaseException as exception:
        print("异常",exception)
        traceback.print_exc()
 
print("获取P段落的文字")
p_node = soup.find('p',class_='story')
if p_node:
    print(p_node.name,p_node['class'],p_node.get_text())