import requests
from lxml import etree, html


def base_use():
    tree = etree.parse("../datas/example.xml")
    root = tree.getroot()
    books = root.findall("book")
    for book in books:
        category = book.attrib.get("category")
        title = book.find("title").text
        author = book.find("author").text
        year = book.find("year").text
        price = book.find("price").text
        print(title, author, year, price)


# base_use()

def create_xml():
    root = etree.Element("root")

    child1 = etree.Element("child1", attrib={"id": "child1"})
    child1.text = "我是子元素1"
    root.append(child1)

    child2 = etree.SubElement(root, "child2", attrib={"id": "child2"})
    child2.text = "我是子元素2"
    child3 = etree.SubElement(root, "child3", attrib={"id": "child3"})
    child3.text = "我是子元素3"

    print(etree.tostring(root))


# create_xml()

def base_index_use():
    res = requests.get("http://127.0.0.1:5000")
    html_tree = etree.HTML(res.text)
    # 标头
    title = html_tree.cssselect("title")
    print(title)

    # 中间元素
    links = html_tree.cssselect(".container ul li a")
    for link in links:
        print(link.text, link.attrib.get("href"))

    # 获取底部栏
    footer_p = html_tree.cssselect(".footer p")[0]
    footer_ps = html_tree.cssselect(".footer p")
    footer_h1 = html_tree.cssselect(".footer h1")[0]
    for footerp in footer_ps:
        print(footerp.text)
    # print(footer_p.text.strip())
    # print(footer_h1.text.strip())


def parse_html_by_xpath():
    # ctrl+shift+x 打开关闭
    # 按住shift鼠标滑动，就考研找到对应的选择路径
    # 根路径 （从html开始的绝对路径 以/开头） /html/head/title
    # 任意路径 (从任意路径开始 以//开头) //title
    # 当前路径（从当前路径的内部标签查找 以./开头） ./li
    # 带有顺序的写法 //ul/li[7]/a
    # 带有属性 //*[@属性名] //*[@属性名=值]

    res = requests.get("http://127.0.0.1:5000")
    html_tree = etree.HTML(res.text)
    print(html_tree, dir(html_tree))


parse_html_by_xpath()
