#导库
import requests  #向服务器请求资源的库
import re        #用于分析网页源码的正则表达式的库
import json      #用于解读json的库
import csv       #用于数据采集后保存格式的库
#浏览区伪装
headers ={
    #浏览器基本信息
    "user-agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Mobile Safari/537.36"
}

url = "https://voice.baidu.com/act/newpneumonia/newpneumonia/?from=osari_aladin_banner"
# 1.请求数据
response = requests.get(url=url,headers=headers)
# 2.获取数据
html_data = response.text
# 3. 解析数据
json_str = re.findall('"component":\[(.*)\],',html_data)[0]    #  正则表达式中： '(.*)' 表示0或任意一段字符；此外，'(.*)' 的前后字段表示能够匹配的字符串
json_dict = json.loads(json_str)                  #需要把字符串转为字典形式，方便取数，使用json库帮忙
caseList = json_dict["caseList"]
with open('data.csv', mode='a', encoding='utf-8', newline='') as f:    #该动作的赋值
    csv_writer = csv.writer(f)
    header_list = ['area', 'curConfirm', 'confirmedRelative', 'confirmed', 'crued', 'died']  #设置标题
    csv_writer.writerow(header_list)                                                         #把标题代入
for case in caseList:                                                                        # for 循环
    area = case['area']                                #省份
    curConfirm = case['curConfirm']                    #确诊人数
    confirmedRelative = case['confirmedRelative']      #当前确诊
    confirmed = case['curConfirm']                     #累计确诊
    crued = case['crued']                         #治愈人数
    died = case['died']                          #死亡人数
    print(area,curConfirm,confirmedRelative,confirmed,crued,died)
    with open('data.csv', mode='a', encoding='utf-8', newline='') as f:
        csv_writer = csv.writer(f)
        csv_writer.writerow([area,curConfirm,confirmedRelative,confirmed,crued,died])
