import re
from pprint import pprint

import requests
from lxml import etree
# import mysql_helper

# con = mysql_helper.get_connection()
# cursor = mysql_helper.get_cursor(con)


# 获取网页
def get_page(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
    }
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        # response.content返回的是字节流
        return response.content.decode('utf-8')
    return response.status_code


# 解析详情页
def parse_detail_page(html):
    etree_html = etree.HTML(html)
    res = etree_html.xpath('string(//div[@class="wy_contMain fontSt"])')
    title = etree_html.xpath('//div[@class="wy_dis_main"]/h1/text()')
    # 把列表转换成字符串
    result_str = ''.join(res)
    title_str = ''.join(title)
    # print(result_str)
    # 1.获取时间
    pattern = re.compile(r'截至(.*?)24时', re.S)
    date = re.findall(pattern, title_str)[0]
    if len(date) == 3:
        date = '1月' + date
    print(f'日期：{date}')
    # 2.获取每日新增病例数
    pattern = re.compile(r'新增[\u4e00-\u9fa5]*?确诊病例(\d+)例', re.S)
    xzs = re.findall(pattern, result_str)
    if xzs:
        print('新增病例:', xzs[0])
    else:
        xzs = 0
        print('新增病例未统计', xzs)
    # 3.获取每日新增治愈病例数
    pattern = re.compile(r'新增治愈出院病例(\d+)例', re.S)
    new_cure = re.findall(pattern, result_str)
    if new_cure:
        print('新增治愈病例:', new_cure[0])
    else:
        new_cure = 0
        print('无新增治愈病例', new_cure)
    # 4.获取死亡病例数
    pattern = re.compile(r'死亡(\d+)人', re.S)
    death = re.findall(pattern, result_str)
    if death:
        print('死亡病例数:', death[0])
    else:
        death = 0
        print('无死亡病例', death)


# 使用xpath解析网页
def parse_page(html):
    # 把html文本对象转换成etree节点对象
    etree_html = etree.HTML(html)
    items = etree_html.xpath('//div[@class="contMain fontSt"]/ul/li/a')
    url = 'http://wsjkw.sc.gov.cn'  # 前域名
    for item in items:
        link = item.xpath('./@href')[0]
        full_link = url + link
        # print(full_link, title)
        # 爬取详情页信息
        detail_html = get_page(full_link)
        # 解析详情页信息
        parse_detail_page(detail_html)


def main():
    url = 'http://wsjkw.sc.gov.cn/scwsjkw/gzbd01/ztwzlmgl.shtml'
    html = get_page(url)
    # print(html)
    parse_page(html)


if __name__ == '__main__':
    main()
