
from pprint import pprint

import requests
# from lxml import etree
from lxml import etree

#获取网页
def get_page():
    url = 'https://www.douban.com/group/explore'
    #伪造请求头
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
    }
    respose = requests.get(url, headers=headers)
    if respose.status_code == 200:
        # respose.conent返回的是字节流，用decode转换成字符串
        return respose.content.decode('utf-8')
    return None

#使用xpath解析网页
def parse_page(html):
    #把HTML文本对象转换成etree的字节对象
    etree_html = etree.HTML(html)
    # print(etree_html)
    # print(type(etree_html))

    # // 表示匹配子子孙孙 * 表示任意类型节点 //* 子子孙孙任意类型节点
    result = etree_html.xpath('//*')
    # print(result)

    #匹配所有img标签
    result = etree_html.xpath('//img')
    # pprint(result)




    #print(result)
    #print(len(result))

    #匹配所有a标签里的文字
    result = etree_html.xpath('//a/text()')
    # pprint(result)
    # print(len(result))

    # / 表示直接的儿子节点
    result = etree_html.xpath('//div/h3/a/text()')
    # pprint(result)

    #根据属性值来获取节点[@clss="likes"]
    result = etree_html.xpath('//div[@class="likes"]/text()')
    # print(result)
    # print(result[::2])

    #或者切片
    likes_list=[]
    for i in range(len(result)):
        if i % 2 == 0:
            likes_list.append(result[i])
            #pprint(likes_list)

    #获取属性值用@attr
    result = etree_html.xpath('//div[@class="pic"]/div[@class="pic-wrap"]/img/@src')
    # pprint(result)

    #属性多值匹配：如果是一个属性里面包含多个值，但是你只知道一个值得时候，用contains
    # result = etree_html.xpath('//div[contains(@class,"grid-16-8")]//div[@class="likes"]/text()')
    # result = etree_html.xpath('//div[contains(@class="grid-16-8" clearfix]//div[@class="likes"]/text()')
    # pprint(result)

    #contains这些方法，可以使用 and or 这些条件
    result = etree_html.xpath('//span[@class="pubtime" and contains(text(),"昨天")]/text()')
    # pprint(result)

    # ..表示父节点
    result = etree_html.xpath('//span[@class="pubtime" and contains(text(),"昨天")]/../../h3/a/text()')
    # pprint(result)

    # 获取昨天16:00 到 18:00间的数据
    result = etree_html.xpath(
        '//span[@class="pubtime" and contains(text(), "昨天") and (starts-with(substring-after(text(),"昨天"), "16:") or starts-with(substring-after(text(),"昨天"), "18:"))]/text()')
    # pprint(result)

    # 根据节点顺序号来取指定节点，从1开始，不是从0开始
    # [1] [first()] [last()] [position() < 4 ]
    result = etree_html.xpath('//div[@class="channel-item"][1]/div/h3/a/text()')
    # pprint(result)

    # 获取第3个到第5个标题
    result = etree_html.xpath('//div[@class="channel-item"][position() >= 3 and position() <= 5]/div/h3/a/text()')
    #pprint(result)

    # following: :* 获取当前节点结束标签之后的所有节点
    result = etree_html.xpath('//div[@class="channel-item"][2]/following::*')
    # pprint(result)
    # print(len(result))

    # following-sibling::* 获取当前节点结束标签之后的所有同级节点
    result = etree_html.xpath('//div[@class="channel-item"][2]/following-sibling::*')
    #pprint(result)
    # print(len(result))

    #

def main():
    html = get_page()
    #print(html)
    parse_page(html)


if __name__ == '__main__':
    main()

