from bs4 import BeautifulSoup
from lxml import etree
import requests
import chardet
import re


def get_content():
    r = requests.get("https://www.baidu.com")
    r.encoding = chardet.detect(r.content)['encoding']
    soup = BeautifulSoup(r.text, 'lxml', from_encoding='utf-8')
    # 获取页面内容
    print(soup.prettify())
    # 获取其中第一个a标签
    print(soup.a)
    # 获取a标签的class名
    print(soup.a['class'])
    # 获取a标签的内容
    print(soup.a.string)
    # 根据节点找内容
    print(soup.html.head.title.string)
    print("循环")
    # 遍历节点内容    遍历去除空格的节点内容
    # soup.html.body.strings
    for i in soup.html.body.stripped_strings:
        print(repr(i))
    # 遍历节点父节点
    for i in soup.a.parents:
        print(i.name)
    print("兄弟节点")
    print(soup.a.next_sibling)
    print("兄弟节点")
    print(soup.a.prev_sibling)
    print("兄弟的兄弟节点")
    print(soup.a.next_sibling.next_sibling)
    # 获取所有a标签。 也可以使用数组['a','b']
    # 使用limit，查询指定长度的节点
    arr = soup.findAll('a', limit=2)
    for a in arr:
        print(a['href'])
    # 根据id查找
    print(soup.findAll(id="lh"))
    # 根据class获取符合条件的a标签
    print(soup.findAll('a', class_='lb'))
    # 使用正则查找包含某个字符的节点
    print(soup.findAll(href=re.compile("jianyi")))
    # 针对特殊的类型，例如data-foo,可以使用字典遍历
    print(soup.findAll(attrs={"data-foo": "value"}))


def lxml_test():
    # 使用lxml补完内容
    r = requests.get("https://www.baidu.com")
    r.encoding = chardet.detect(r.content)['encoding']
    # 补充欠缺的标签
    html = etree.HTML(r.text)
    result = etree.tostring(html)
    print(result)


# lxml使用xpath
def get_content_xpath():
    r = requests.get("https://www.baidu.com")
    r.encoding = chardet.detect(r.content)['encoding']
    html = etree.HTML(r.text)
    img_src = html.xpath("//div[@id='u1']/a")
    print(img_src[0].text)
