from bs4 import BeautifulSoup
import re
html_doc = """<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; 
and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
# 根据html_doc创建BeautifulSoup类对象，并指定使用lxml解析器解析文档
soup = BeautifulSoup(html_doc, features='lxml')

# —————————— 参数name ——————————
# 查找名称为title的节点
# print(soup.find_all('title'))

# # 查找id属性值中含有link1关键字的所有节点
# print(soup.find_all(id=re.compile("link1")))

# 查找所有名称为title或a的节点
# print(soup.find_all(["title", "a"]))

# —————————— 参数attrs ——————————
# 查找属性名称为id、值为' link1'的节点
print(soup.find_all(attrs={'id':'link1'}))

# —————————— 参数recursive ——————————
# 查找直接子节点<head >
# print(soup.html.find_all("head", recursive=False))

# —————————— 参数text ——————————
# 查找所有文本为Elsie的节点
# print(soup.find_all(text="Elsie"))

# —————————— 参数limit ——————————
# 查找至多1个节点<a>
# print(soup.find_all("a", limit=1))

# —————————— 参数**kwargs ——————————
# 查找属性名称为id、值为'link2'的节点
# print(soup.find_all(id='link3'))

# 查找的节点名称为class
# print(soup.find_all("p", class_="title"))