import json

import requests
import re
from lxml import etree

# 获取网页
def get_page(url):
    headers =  {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
    }
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        # response.content返回的是字节流，decode后变成字符串
        return response.content.decode('utf-8')
    return None

# 解析详情页
def parse_detail_page(html):
    etree_html = etree.HTML(html)
    result = etree_html.xpath('//div[@class="wy_contMain fontSt"]//span/text()')
    # print(result)
    # 把列表转换成一个大的字符串
    result_str = ''.join(result)

    # 获取数据时间
    titles = etree_html.xpath('//h1[@class="blue fontNb"]/text()')
    # print(titles)

    # 获取每日新增病例数字
    pattern = re.compile('.*?(\d+月\d+日).*?新增.*?确诊病例\d+例', re.S)
    z = re.findall(pattern, result_str)
    # print(z)
    pattern = re.compile('\d+月\d+日.*?新增.*?确诊病例(\d+)例', re.S)
    x = re.findall(pattern, result_str)
    pattern = re.compile('\d+月\d+日.*?新增.*?出院病例(\d+)例', re.S)
    y = re.findall(pattern, result_str)
    # print(y)

    result = []
    for i in range(1):
        data_dict = {}
        data_dict['time'] = z[0]
        data_dict['sicken'] = x[0]
        if y:
            data_dict['cure'] = y[0]
        else:
            y.append(0)
            data_dict['cure'] = y[0]
        result.append(data_dict)
    # print(result)

    return result


# 使用xpath解析网页
def parse_page(html):
    # 把html文本对象转换成etree的节点对象
    etree_html = etree.HTML(html)
    items = etree_html.xpath('//div[@class="wy_contMain fontSt"]/ul/li/a[starts-with(text(), "截至")]')
    result_list = []
    for item in items:
        link = item.xpath('./@href')[0]
        title = item.xpath('./text()')[0]
        # print(link)
        # print(title)
        full_link = 'http://wsjkw.sc.gov.cn' + link
        # 爬取详情页的信息
        detail_html = get_page(full_link)
        # 解析详情页信息
        result = parse_detail_page(detail_html)
        if result:
            result_list.append(result)
    # print(result_list)
    return result_list

# 保存json文件
def save_josn(result_list):
    json_str = json.dumps(result_list, ensure_ascii=False)
    with open('yiqing.json', 'w', encoding='utf-8') as f:
        f.write(json_str)


def main():
    url = 'http://wsjkw.sc.gov.cn/scwsjkw/gggs/tygl.shtml'
    html = get_page(url)
    # print(html)
    result = parse_page(html)
    print(result)
    # print(result)
    # result_list.append(result)
    # print(result_list)
    save_josn(result)

if __name__ == '__main__':
    main()