from pprint import pprint

import requests
import re
from lxml import etree



# 获取网页
def get_page(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
    }
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        # response.content返回的是字节流，decode后变成字符串
        return response.content.decode('utf-8')
    return None


# 解析详情页
def parse_detail_page(html):
    etree_html = etree.HTML(html)
    result = etree_html.xpath('//div[@class="wy_contMain fontSt"]//span/text()')
    # print(result)
    # 把列表转换成一个大的字符串
    result_str = ''.join(result)

    # 获取数据时间
    titles = etree_html.xpath('//h1[@class="blue fontNb"]/text()')
    pattern = re.compile('截至(.*?)我')

    date = re.findall(pattern, str(titles))
    date1 = ''.join(date)
    print(date1)



    # 获取每日新增病例数字
    pattern = re.compile('我省.*?新增.*?病例(\d+)例', re.S)
    xzs = re.findall(pattern, result_str)
    print(xzs)

    # 获取确诊病例总数
    pattern = re.compile(r'累计[\u4e00-\u9fa5]*?确诊病例(\d+)例', re.S)
    total = re.findall(pattern, result_str)
    # print(total)



    data_one_day = {'date': date1[0], 'increment': (xzs[0] if xzs else ''), 'total': total[0]}
    # print(data_one_day)

    return data_one_day
# 使用xpath解析网页
def parse_page(html):
    data_total = []
    # 把html文本对象转换成etree的节点对象
    etree_html = etree.HTML(html)
    items = etree_html.xpath('//div[@class="contMain fontSt"]/ul/li/a[starts-with(text(), "截至")]')
    for item in items:
        link = item.xpath('./@href')[0]
        title = item.xpath('./text()')[0]
        # //div[@class="contMain fontSt"]/ul/a/../span
        data = item.xpath('../span/text()')
        #print(link)
        #print(title)
        #print(data)
        full_link = 'http://wsjkw.sc.gov.cn' + link
        # 爬取详情页的信息
        detail_html = get_page(full_link)
        # 解析详情页信息
        data_one_day = parse_detail_page(detail_html)

        data_total.append(data_one_day)
        # print(data_total)

    return data_total
# 把数据转换格式
#         result = []
#         for i in range(len(items)):
#             data_dict = {
#                 '统计时间': dates[i],
#
#             }
#             # mysql_helper.save_db(con, cursor, data_dict)
#             result.append(data_dict)
#
#         return result


def main():
    url = 'http://wsjkw.sc.gov.cn/scwsjkw/gzbd01/ztwzlmgl.shtml'
    html = get_page(url)
    # print(html)
    parse_page(html)


if __name__ == '__main__':
    main()