# -*- conding:UTF-8 -*-

'''
@ Project: funny
@ File: shuju.py
@ Author: 韩成瑾
@ Date：2022/10/16 21:16
url = 'https://news.qq.com/zt2020/page/feiyan.htm#/area?adcode=440000'

'''
# TODO 爬虫代码逻辑
#  1.发送请求
#  2.获取数据
#  3.解析数据
#  4.保存数据



import requests
import csv

f = open('疫情数据.csv',mode='a',encoding='utf-8',newline='')
csv_writer = csv.writer(f)
csv_writer.writerow(['name\tconfirm_new\tconfirm_all\tnowConfirm\tdead\theal'])


url = 'https://api.inews.qq.com/newsqa/v1/query/inner/publish/modules/list?modules=localCityNCOVDataList,diseaseh5Shelf'
# 1.发送请求
requests = requests.post(url=url)

# 2.获取数据  <Response [200]>
#  .text: 文本数据 html网页内容 json数据
#  .content: 获取二进制数据 图片 视频 音频
#  .json: json数据{}/[]  直接用 .json() 字典/列表
json_data = requests.json()

# 3.解析数据
data = json_data['data']['diseaseh5Shelf']['areaTree'][0]['children']
for i in data:

    name = i['name']  # 地区
    confirm_new = i['today']['confirm']  # 新增确诊
    confirm_all = i['total']['confirm']  # 累计确诊
    nowConfirm= i['total']["nowConfirm"]  # 现有确诊
    dead= i['total']['dead']  # 累计死亡
    heal = i['total']['heal']  # 治愈人数
    print(f"{name}新增确诊{confirm_new}例,累计确诊{confirm_all}例,现有确诊{nowConfirm}例,累计死亡{dead}人,治愈{heal}人")
    # print(name,confirm_new,confirm_all,nowConfirm,dead,heal)

    # 保存数据
    # csv_writer.writerow([name,'\t', confirm_new,'\t', confirm_all,'\t', nowConfirm,'\t', dead,'\t', heal])
    csv_writer.writerow([f"{name}新增确诊{confirm_new}例,累计确诊{confirm_all}例,现有确诊{nowConfirm}例,累计死亡{dead}人,治愈{heal}人"])




