from pprint import pprint

import requests
from lxml import etree
import re


# 获取网页
def get_page(url):

    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
    }
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        # response.content返回的是字节流，decode后变成字符串
        return response.content.decode('utf-8')
    return None

# 解析详情页
def parse_detail_page(html):
    etree_html = etree.HTML(html)
    result = etree_html.xpath('//div["wy_contMain fontSt"]//span//text()')
    #pprint(result)
    # 把列表转换成大的字符串
    result_str = ''.join(result)

    #获取数据时间
    titles = etree_html.xpath('//h1[@class="blue fontNb"]/text()')
    print(titles)
    print(titles[0])


    # 获取每日新增病例数字
    pattern = re.compile('新增.*?确诊病例(\d+)例', re.S)
    xzs = re.findall(pattern,result_str)
    print(xzs)


# 使用xpath解析网页
def parse_page(html):
    # 把html文本对象转换成etree的节点对象
    etree_html = etree.HTML(html)

    items  = etree_html.xpath('//div[@class="wy_contMain fontSt"]/ul/li/a[starts-with(text(),"截至")]')
    #pprint(items)
    for item in items:
        link = item.xpath('./@href')[0]
        title = item.xpath('./text()')[0]
        #print(link)
        #print(title)
        full_link = 'http://wsjkw.sc.gov.cn'+ link
        # 爬取详情页的信息
        detail_html = get_page(full_link)
        # 解析详情页信息
        parse_detail_page(detail_html)

def main():
    url = 'http://wsjkw.sc.gov.cn/scwsjkw/gggs/tygl.shtml'
    html = get_page(url)
    # print(html)
    parse_page(html)


if __name__ == '__main__':
    main()