"""__author__=Hanlin"""
# 获取网页
import json

import mysql_helper
from lxml import etree
import re
import requests


con = mysql_helper.get_connection()
cursor = mysql_helper.get_cursor(con)
dates = []
n_xzs = []
n_xzcys = []
n_sws = []



    # 获取所有的页

def get_page(url):

    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
    }

    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        # 因为response.content返回的是字节流，decode之后变成字符串
        return response.content.decode('utf-8')
    return None


def parse_detail_page(html):
    etree_html = etree.HTML(html)
    result = etree_html.xpath('//div[@class="wy_contMain fontSt"]//span/text()')
    # print(result)
    # 把列表转换成一个大的字符串
    result_str = ''.join(result)

    # 获取数据的时间
    titles = etree_html.xpath('//h1[@class="blue fontNb"]/text()')
    pattern = re.compile('截至(.*?)我')
    date = re.findall(pattern, str(titles))
    n_date = ''.join(date)

    # 获取每日新增的病例数
    pattern = re.compile(r'我省.*?新增.*?病例(\d+)例')

    xzs = ''.join(re.findall(pattern, result_str))

    # 获取每日出院的病例数
    pattern = re.compile(r'新增.*?治愈出院病例(\d+)例')

    xzcy = re.findall(pattern, result_str)
    if xzcy:
        n_xzcy = ''.join(xzcy)
    else:
        n_xzcy = '暂无数据'

    # 获取累计死亡人数
    pattern = re.compile('死亡(\d+)人')

    sw = re.findall(pattern, result_str)
    if sw:
        n_sw = ''.join(sw)
    else:
        n_sw = '暂无死者出现'

    dates.append(n_date)
    n_xzs.append(xzs)
    n_xzcys.append(n_xzcy)
    n_sws.append(n_sw)







def parse_page(html):
    # 把html文本对象转换成etree的节点对象
    etree_html = etree.HTML(html)
    items = etree_html.xpath('//div[@class="wy_contMain fontSt"]/ul/li/a[starts-with(text(), "截至")]')

    for item in items:
        link = item.xpath("./@href")[0]
        title = item.xpath("./text()")[0]
        # print(link)
        # print(title)
        full_link = 'http://wsjkw.sc.gov.cn' + link
        # 返回详情页的信息
        detail_html = get_page(full_link)
        # 解析详情页
        parse_detail_page(detail_html)

    result = []
    for i in range(len(items)):
        data_dict = {
            '统计时间' : dates[i],
            '新增病例' : n_xzs[i],
            '新增治愈' : n_xzcys[i],
            '累计死亡' : n_sws[i]
        }
        mysql_helper.save_db(con, cursor, data_dict)
        result.append(data_dict)

    return result



def save_json(result):
    json_str = json.dumps(result, ensure_ascii=False)
    with open('yiqing.json', 'w', encoding='utf-8') as f:
        f.write(json_str)

def main():

    url = 'http://wsjkw.sc.gov.cn/scwsjkw/gggs/tygl.shtml'
    html = get_page(url)
    # print(html)
    result = parse_page(html)
    save_json(result)


    # for page in range(2):
    #     if page == 0:
    #         url = 'http://wsjkw.sc.gov.cn/scwsjkw/gggs/tygl.shtml'
    #         html = get_page(url)
    #         # print(html)
    #         parse_page(html)
    #     else:
    #         url = 'http://wsjkw.sc.gov.cn/scwsjkw/gggs/tygl_2.shtml'
    #         html = get_page(url)
    #         # print(html)
    #         parse_page(html)



if __name__ == '__main__':
    main()