"""__author__=晏"""
# https://www.douban.com/group/explore

import requests



# 获取网页
from lxml import etree


def get_page():
    url = 'https://www.douban.com/group/explore'
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
    }
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        # response.content返回的是字节流,decode后变成字符串
        return response.content.decode('utf-8')
    return None


# 使用xpath解析网页
def parse_page(html):
    # 把html文本对象转换成etree的节点对象
    etree_html = etree.HTML(html)
    # print(etree_html)

    # // 表示匹配子子孙孙   * 表示任意节点    //* 子子孙孙任意类型节点
    # result = etree_html.xpath('//*')
    # print(result)
    # 匹配所有的img标签
    # result = etree_html.xpath('//img')
    # print(result)

    # 找出所有a标签里面的文字
    # result = etree_html.xpath('//a/text()')
    # print(result)

    # / 表示直接的儿子节点
    # result = etree_html.xpath('//div/h3/a/txt()')

    # 根据属性值来获取节点[@class="likes"]
    # results = etree_html.xpath('//div[@class="likes"]/text()')
    # likes_list = []
    # for i in range(len(results)):
    #     if i % 2 == 0:
    #         likes_list.append(results[i])
    # print(likes_list)

    # 获取属性值用@attr
    # result = etree_html.xpath('//div[@class="pic"]/div[@class="pic-wrap"]/img/@src')
    # print(result)

    # 如果是一个属性里面包含多个值，但是你只知道一个值的时候，用contains
    # results = etree_html.xpath('//div[contains(@class, "grid-16-8")]//div[@class="likes"]/text()')
    # print(results)

    # contains这些方法可以使用and or这些条件
    # results = etree_html.xpath('//span[@class="pubtime" and contains(text(), "昨天")]/text()')
    # print(results)

    # ..表示父节点
    # results = etree_html.xpath('//span[@class="pubtime" and contains(text(), "昨天")]/../../h3/a/text()')
    # print(results)

    # 获取昨天16:00到18:00之间的数据
    # results = etree_html.xpath('//span[@class="pubtime" and contains(text(), "昨天") and (starts-with (substring-after( text(), "昨天"), "16:") or starts-with (substring-after( text(), "昨天"), "17:"))]/text()')
    # print(results)

    # 根据顺序号来取指定节点，从1开始，不是从0开始
    # [1] [first()] [last()] [position() < 4]
    # 获取第2个标题
    # results = etree_html.xpath('//div[@class="channel-item"][2]/div/h3/a/text()')
    # print(results)
    # 获取第3个到第5个标题
    # results = etree_html.xpath('//div[@class="channel-item"][position() >=3 and position() <= 5]/div/h3/a/text()')
    # print(results)

    # following::* 当前节点结束标签之后的所有节点
    # results = etree_html.xpath('//div[@class="channel-item"][2]/following::*')
    # print(results)

    # following-sibling::* 当前节点结束标签之后的所有同级节点
    # results = etree_html.xpath('//div[@class="channel-item"][2]/following-sibling::*')
    # print(results)

    # 先获取大的记录，再在记录里面去获取条目，就不会出现数量不对称的问题(Index out of range)
    chanel_items = etree_html.xpath('//div[@class="channel-item"]')
    for item in chanel_items:
        # 重点注意！
        title = item.xpath('./div[@class="bd"]/h3/a/text()')[0]
        print(title)


def main():
    html = get_page()
    # print(html)
    parse_page(html)

if __name__ == '__main__':
    main()
