import json
import os
import time
import requests
import pandas as pd
from fake_useragent import UserAgent


class NetEaseNews(object):

    def __init__(self):
        # 访问网易实时疫情播报平台网址
        self.url = "https://c.m.163.com/ug/api/wuhan/app/data/list-total"
        self.historyUrl = "https://c.m.163.com/ug/api/wuhan/app/data/list-by-area-code?areaCode=520000"
        self.headers = {'User-Agent': UserAgent().random}
        self.data = self.get_html()

    def get_html(self):
        try:
            r = requests.get(url=self.url, headers=self.headers, timeout=3)
            r.encoding = r.apparent_encoding
            status = r.status_code
            json_data = json.loads(r.text)
            if json_data is not None:
                print("爬虫状态码: " + str(status))
            return json_data["data"]
        except Exception as e:
            print(e)

    def save_to_csv(self, data, name):
        """定义保存数据的函数"""
        # 保存的文件名名称
        file_name = name + "_" + time.strftime("%Y_%m_%d", time.localtime(time.time())) + ".csv"
        data.to_csv(file_name, index=None, encoding="utf_8_sig")
        # 检查是否保存成功，并打印提示文本
        if os.path.exists(file_name):
            print(file_name + " 保存成功")
        else:
            print('保存失败')

    def parser_common(self, values):
        # 获取今日疫情数据 和 修改列名
        today_data = pd.DataFrame([city["today"] for city in values])
        today_data.columns = ["today_" + i for i in today_data.columns]
        # 获取累计疫情数据 和 修改列名
        total_data = pd.DataFrame([city["total"] for city in values])
        total_data.columns = ["total_" + i for i in total_data.columns]

        return pd.concat([today_data, total_data], axis=1)

    def parser_today_city(self):
        """
        获取中国各个省份下市级区域最新的疫情数据
        :return: DataFrame格式的数据
        """
        detail_df = pd.DataFrame()
        province_data = self.data["areaTree"][2]["children"]
        for cities in province_data:
            name = cities["name"]  # 省份名字
            df = pd.DataFrame(cities["children"])[["name", "lastUpdateTime"]]  # 市名、数据更新时间
            df["province"] = name

            # 今日和累计疫情数据
            common_data = self.parser_common(cities["children"])
            info = pd.concat([df, common_data], axis=1)

            detail_df = detail_df.append(info)
        self.save_to_csv(detail_df, "china_today_city")
        return detail_df

    def parser_today_province(self):
        """
        获取中国各个省份最新的疫情数据
        :return: DataFrame格式的数据
        """
        province_data = self.data["areaTree"][2]["children"]

        detail_df = pd.DataFrame(province_data)[["id", 'name', "lastUpdateTime"]]

        common_data = self.parser_common(province_data)
        detail_df = pd.concat([detail_df, common_data], axis=1)

        self.save_to_csv(detail_df, "today_province")
        return detail_df

    def parser_today_world(self):
        """
        获取世界各国最新的疫情数据
        :return: DataFrame格式的数据
        """
        world_data = self.data["areaTree"]

        detail_df = pd.DataFrame(world_data)[['name', "lastUpdateTime"]]

        common_data = self.parser_common(world_data)
        detail_df = pd.concat([detail_df, common_data], axis=1)
        self.save_to_csv(detail_df, "today_world")
        return detail_df

    def parser_chinaDayList(self):
        """
        获取中国国内历史数据（最近60天的）
        :return: 获取中国国内历史数据
        """
        chinaDayList = self.data["chinaDayList"]

        detail_df = pd.DataFrame(chinaDayList)['date']
        common_data = self.parser_common(chinaDayList)

        detail_df = pd.concat([detail_df, common_data], axis=1)
        self.save_to_csv(detail_df, "chinaDayList")

        return detail_df

    def run(self):
        # 程序入口
        self.parser_today_city()
        self.parser_today_province()
        self.parser_today_world()
        self.parser_chinaDayList()


if __name__ == '__main__':
    NetEaseNews = NetEaseNews()
    NetEaseNews.run()