from urllib import request
from re import findall
from bs4 import BeautifulSoup
from json import dump, loads, load
from datetime import date


def main():
    url = "https://ncov.dxy.cn/ncovh5/view/pneumonia"
    res = request.Request(url)
    res.add_header('user-agent',
                   'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36')
    res = request.urlopen(res)  # 发送请求，接受返回对象
    html = res.read()  # 读取网页
    bs = BeautifulSoup(html, features="lxml")  # 用标签建立结点
    """爬取世界疫情信息"""
    data = bs.find('script', id='getListByCountryTypeService2true')
    json_str = findall(r'\[.+\]', str(data))  # 正则，提取列表之间的数据
    final_data = loads(json_str[0])  # 将字符串转为字典
    """统计当日世界疫情指标"""
    confirm_count, current_confirm_count, dead_count, cured_count = 0, 0, 0, 0
    for item in final_data:
        confirm_count += item['confirmedCount']
        current_confirm_count += item['currentConfirmedCount']
        dead_count += item['deadCount']
        cured_count += item['curedCount']

    global_digits = {'confirmedCount': confirm_count, 'currentConfirmedCount': current_confirm_count,
                     'deadCount': dead_count, 'curedCount': cured_count,
                     'deathRate': round((dead_count / (dead_count + cured_count)) * 100, 2)}
    with open('./data/world_num.json', 'w', encoding='utf8') as fp:
        dump(global_digits, fp, ensure_ascii=False)
    with open('./data/cov_info_of_world.json', 'w', encoding='utf8') as fp:
        dump(final_data, fp, ensure_ascii=False)
    """爬取各省份信息"""
    data = bs.find('script', id='getAreaStat')  # 过滤到存放数据标签
    json_str = findall(r'\[.+\]', str(data))  # 正则，提取列表之间的数据
    final_data = loads(json_str[0])  # 将字符串转为字典
    with open('./data/cov_info_of_china.json', 'w', encoding='utf8') as fp:
        dump(final_data, fp, ensure_ascii=False)
    """爬取当日中国疫情指标"""
    data = bs.find('script', id='getStatisticsService')
    orin_data = str(data)
    # 正则表达式匹配模式
    model = ['"currentConfirmedCount":\d+', '"suspectedCount":\d+', '"seriousCount":\d+',
             '"confirmedCount":\d+', '"curedCount":\d+', '"deadCount":\d+']

    data_set = []
    for i in range(0, 6):
        data = findall(model[i], orin_data)
        data = findall(':\d+', data[0])
        data = str(data[0])
        data = data.replace(':', '')
        data_set.append(int(data))

    final_data = dict(
        [('currentConfirmedCount', data_set[0]), ('suspectedCount', data_set[1]), ('seriousCount', data_set[2]),
         ('confirmedCount', data_set[3]), ('cureCount', data_set[4]), ('deadCount', data_set[5]),
         ('date', str(date.today()))])
    with open('./data/cov_num.json', 'r') as f:
        data_list = load(f)
        # 更新当日数据
        if data_list[-1]['date'] == final_data['date']:
            if data_list[-1] != final_data:
                data_list[-1] = final_data
                with open('./data/cov_num.json', 'w') as d:
                    dump(data_list, d)
        # 更新次日数据
        else:
            data_list.append(final_data)
            with open('./data/cov_num.json', 'w') as d:
                dump(data_list, d)


if __name__ == '__main__':
    main()
