"""
pyquery 类似于jQuery
pyquery 依赖 cssselect (主要使用css选择器)
pyquery 依赖 lxml (主要处理xml与xtml格式数据)

lxml：解析、生成和操作XML和HTML文档
xml：一个可扩展的标记语言，严格的语法要求（标签嵌套）
html：超文本语言 扩展了xml
"""

from lxml import etree, html
import requests


def base_fun():
    tree = etree.parse("../datas/example.xml")

    root = tree.getroot()
    products = root.findall("product")
    for product in products:
        category = product.attrib.get("category")
        name = product.find("name").text
        price = product.find("price").text
        description = product.find("description").text
        print(name, price, description)


# base_fun()

def create_xml():
    root = etree.Element("root")

    child1 = etree.Element("child1", attrib={"id": "child1"})
    child1.text = "我是子元素1"
    root.append(child1)

    child2 = etree.Element("child2", attrib={"id": "child2"})
    child2.text = "我是子元素2"
    root.append(child2)
    child3 = etree.Element("child3", attrib={"id": "child3"})
    child3.text = "我是子元素3"
    root.append(child3)
    print(etree.tostring(root))


# create_xml()

def parse_html():
    res = requests.get("http://127.0.0.1:5000")
    html_tree = html.fromstring(res.text)
    main = html_tree.find(".//body/div/div")
    sections = main.findall("./div")
    for section in sections:
        h1 = section.find("./h1").text
        p = section.find("./p").text
        print(h1, p)


# parse_html()

# lxml 只要使用解析方法 cssselcet xpath

def parse_html_by_css():
    res = requests.get("http://127.0.0.1:5000")
    # print(res.text)
    html_tree = etree.HTML(res.text)
    # 解析左侧内容
    sections = html_tree.cssselect(".main .section")
    for section in sections:
        h1 = section.cssselect("h1")[0]
        p = section.cssselect("p")[0]
        print(h1.text, p.text)

    # 解析右侧超级链接
    links = html_tree.cssselect(".aside ul li a")
    for link in links:
        print(link.text, link.attrib.get("href"))

    # 解析底部栏
    footer = html_tree.cssselect(".footer .container")[0]
    print(footer.text.strip())


# parse_html_by_css()

def parse_html_by_xpath():
    res = requests.get("http://127.0.0.1:5000")
    # print(res.text)
    html_tree = etree.HTML(res.text)
    links = html_tree.xpath("//a")
    for link in links:
        print(link.text,link.attrib.get("href"), link.xpath("./text()")[0], link.xpath("./@href")[0])

    sections = html_tree.xpath("//div[@class='section']")
    for section in sections:
        h1 = section.xpath("./h1/text()")[0]
        p = section.xpath("./p/text()")[0]
        print(h1, p)

    footer = html_tree.xpath("//div[@class='footer']/div/text()")[0]
    print(footer.strip())

# 安装xpath-helper 谷歌商店直接装，或安装包
# ctrl-shift-x 打开
# 按住shift键鼠标滑动就可以找到对应元素的选择路径
# spath-helper只是一个辅助

# 根路径 （从html开始的绝对路径 以/开头） /html/head/title
# 任意路径 （从任意路径开始 以//开头） //title
# 当前路径 （从当前路径向内部标签查找 以./开头） //body
# 带有顺序的写法 //ul/li[7]/a //li[5] | //li[6] | //li[7]  //li[position() > 5]
# 带有属性
#   //div[@class="aside"]   //*[@属性名]
#   //div[@class]   //*[@属性名=值]
#   //*[contains(@href,"7")]    包含属性href中有7
#   //*[starts-with(@href,"http")]  有href属性以http开头
#   //li[last() - 2]    从后往前找
#   //li[last() - 2] | //li[3]  或者

parse_html_by_xpath()
