# beautifulSoup Demo
import requests

import re
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')

r = requests.get('http://python123.io/ws/demo.html')
demo = r.text

# 导入
from bs4 import BeautifulSoup
soup =BeautifulSoup(demo,'html.parser')
# 更加友好的显示HTML内容，格式化内容
print(soup.prettify())

# 基础元素
print(soup.title)


tag = soup.a
print(tag)
print(tag.string)
print(tag.attrs)
print(tag.attrs['class'])
print(tag.attrs['href'])
print(type(tag))

print(soup.a.parent.name)
print(soup.a.parent.parent.name)

print(soup.p)
print(soup.p.string)

newsoup = BeautifulSoup("<b><!--This is comment--></b><p>this is not a comment</p>","html.parser")
print(newsoup.b.string)
print(newsoup.p.string)
print(type(newsoup.b.string))

print("=================children========================")
print(soup.head)
print(soup.head.contents)
print(soup.body.contents)
print(len(soup.body.contents))

for child in soup.body.children:
	print(child)



print("====================parent=====================")
print(soup.title.parent)
print(soup.html.parent)

for parent in soup.a.parents:
	if parent is None:
		print(parent)
	else:
		print(parent.name)


print("====================平行遍历=====================")
print(soup.a.next_sibling) #平行的下一个标签
print(soup.a.next_sibling.next_sibling)
print(soup.a.previous_sibling)
print(soup.a.previous_sibling.previous_sibling)

# 遍历后续节点
for sibling in soup.a.next_siblings:
	print(sibling)

# 遍历后续节点
for sibling in soup.a.previous_siblings:
	print(sibling)


print("====================找出所有a标签=====================")
for link in soup.find_all("a"):
	print(link.get("href"))

print(soup.find_all(['a','b']))

for tag in soup.find_all(re.compile('b')):
	print(tag)

print(soup.find_all('p','course'))
print(soup.find_all(id='link1'))
print(soup.find_all(id=re.compile('link')))
print(soup.find_all('a',recursive=False))
print(soup.find_all(string = 'Basic Python'))
print(soup.find_all(string = re.compile('Python')))

print(soup('p','course'))
