import re

import requests
from bs4 import BeautifulSoup
from lxml import etree

# html = etree.HTML(htmltext) //传入文本方式
htmlObj = etree.parse('./indextest.html',etree.HTMLParser()) #传入文件方式
result = etree.tostring(htmlObj)
print(result.decode("utf-8"))


# # // 获取子孙节点
print(htmlObj.xpath('//*'))  #获取所有的子孙节点
print(htmlObj.xpath('//input')) #获取所有的input 子孙节点
#
# # ／ 获取子节点
print(htmlObj.xpath('/*'))  #获取所有的子节点
print(htmlObj.xpath('/input'))  #获取所有的子节点
print(htmlObj.xpath('/*/body/form/input')) #获取form的子节点

# . 获取当前节点
print(htmlObj.xpath('/*/body/form/.')) #获取f

# ..获取父节点
print(htmlObj.xpath('/*/body/form/..'))

# [@]属性匹配
print(htmlObj.xpath('//input[@value = "submit"]')) #单属性匹配
print(htmlObj.xpath('//p[@class = "www" and @id = "p2"]')) #多属性匹配
print(htmlObj.xpath('//p[contains(@class ," ")]'),"匹配属性中部分关键字") #属性值模糊匹配

# #属性获取@
print(htmlObj.xpath('//p/@id')[0])

# 文本获取(可以理解为文本节点)
print(htmlObj.xpath('//p[@id="p1"]/text()'))

#选取指定的节点
print(htmlObj.xpath('//p[1]/text()'),"选取指定的节点")
print(htmlObj.xpath('//p[position()<3]/text()'))
print(htmlObj.xpath('//p[last()]/text()'))
print(htmlObj.xpath('//p[last()-1]/text()'))

#节点选择器
print(htmlObj.xpath('//p[1]/ancestor::*'),"祖先标签")
print(htmlObj.xpath('//p[1]/ancestor::div'))
print(htmlObj.xpath('//p[1]/attribute::*'),"返回所有属性")
print(htmlObj.xpath('//div[1]/child::*'),"返回所直接子标签")
print(htmlObj.xpath('//div[1]/child::p[@id="p1"]'),"返回指定直接子标签")
print(htmlObj.xpath('//div[1]/descendant::*'),"返回子孙标签")
print(htmlObj.xpath('//div[1]/descendant::p[@id="p1"]'),"返回指定子孙标签")
print(htmlObj.xpath('//div[1]/following::*'),"返回兄弟同级标签")
print(htmlObj.xpath('//div[1]/following::*[3]'),"返回兄弟同级标签")





#2.BeautifulSoup解析=================================
soup = BeautifulSoup(result,'lxml')
print(soup.prettify() ,'BeautifulSoup解析===========')

#元素选择
print(soup.p.string,'节点的内容')#默认拿第一个的
print(soup.title.string)#默认拿第一个的
print(type(soup.p))

print(soup.title.name ,'节点标签的名称')

print(soup.p.attrs ,"节点属性")
print(soup.p.attrs['class'])

print(soup.div.p.attrs,'嵌套节点选择')

print(soup.div.contents,"关联选择 子节点列表")
print(soup.div.contents[1].attrs['class'])
print(soup.div.children,"直接子节点元素集合")
# for  chidr in soup.div.children:
#     print(chidr)
print(soup.div.descendants,"子孙节点")
# for  chidr in soup.div.descendants:
#     print(chidr)
print(soup.p.parent,"父节点")
print(soup.p.parents,"祖先节点")
print(soup.p.next_sibling,"兄弟节点")
print(soup.p.previous_sibling)
print(soup.p.next_siblings)
print(soup.p.previous_siblings)
print(list(soup.p.parents)[0].attrs['id'],"提取元素")


#方法选择器
# find_all(narne , attrs , recursive , text , **kwargs)
print(soup.find(name='div'),"根据标签名获取元素") #取第一个
print(soup.find_all(name='div')) #取所有

print(soup.find_all(attrs={"id":"p3"}),"根据属性获取元素")
print(soup.find_all(attrs={"class":"www"}))
print(soup.find_all(id = "p3")) #第二种写法
print(soup.find_all(class_ = "www"))

print(soup.find(text=re.compile("2")),"根据文本获取元素")


#css选择器
print(soup.select('div'),"标签择器=======")
print(soup.select('div p'))
print(soup.select('.www'),"类选择器")
print(soup.select('#p4'),"id选择器")

print(soup.select("div")[0].select("p") ,"嵌套选取")

print(soup.select("#p1")[0]['id'],"获取属性")
print(soup.select("#p1")[0].attrs['id']) #第二种写法

print(soup.select("#p1")[0].string,"获取文本")
print(soup.select("#p1")[0].get_text()) #第二种写法




from pyquery import PyQuery as pq
#3.pyquery解析
# doc = pq(url='http://www.baidu.com')
# doc = pq(requests.get('http://www.baidu.com').text)
doc = pq(filename='./indextest.html')
print(doc)

print(doc('#div1 .www' ))
print(doc('#div1' ).find('#p3'))
print(doc('#p3' ).parent())













