import requests
from bs4 import BeautifulSoup
import re
import json
from tqdm import tqdm

class CoronaVirusSpider(object):

    def __init__(self):
        """加载目标url，丁香医生网站链接"""
        self.home_url = 'https://ncov.dxy.cn/ncovh5/view/pneumonia'

    def get_content(self, url):
        """
        获取响应内容
        :param url: 请求的URL
        :return: 响应的内容字符串
        """
        response = requests.get(url)
        return response.content.decode()

    def fetch_data(self, page_url, tag):
        """
        从目标网址页面提取数据
        :param page_url: 目标网址url
        :param tag:
        :return:
        """
        # 从疫情首页, 提取疫情数据
        soup = BeautifulSoup(page_url, 'lxml')
        script = soup.find(id=tag)
        text = script.text
        # 提取json格式的字符串
        json_str = re.findall(r'\[.+\]', text)[0]
        # 将json转换为字典类型
        obj = json.loads(json_str)
        return obj

    def load_data(self, path):
        """
        根据路径加载数据
        :param path:文件路径地址
        :return: 文件数据
        """
        with open(path, encoding='utf-8') as fp:
            data = json.load(fp)
        return data

    def save(self, data, path):
        """
        保存数据
        :param data:
        :param path:
        :return:
        """
        with open(path, 'w',encoding='utf-8') as fp:
            json.dump(data, fp, ensure_ascii=False)

    def get_last_day_corona_virus(self):
        """
        爬取最近一日世界疫情数据
        :return:
        """
        # 获取首页内容
        home_page = self.get_content(self.home_url)
        # 解析数据
        last_day_corona_virus = self.fetch_data(home_page, tag='getListByCountryTypeService2true')
        # 保存数据
        self.save(last_day_corona_virus, 'data/last_day_corona_virus.json')

    def get_corona_virus(self):
        """
        采集所有的各国疫情数据
        :return:
        """
        # 加载各国疫情数据
        country = self.load_data('data/last_day_corona_virus.json')

        # 获取各国疫情数据
        corona_virus = self.parse_corona_virus(country, '采集1月23日以来各国疫情信息')

        # 保存文件
        self.save(corona_virus, 'data/corona_virus.json')

    def get_last_day_corona_virus_of_china(self):
        """
        采集最近一日各省疫情数据
        :return:
        """
        # 发起请求
        home_page = self.get_content(self.home_url)
        # 解析链接, 获取最近一日各省疫情数据
        last_day_corona_virus_of_china = self.fetch_data(home_page, tag='getAreaStat')
        # 保存文件
        self.save(last_day_corona_virus_of_china, 'data/last_day_corona_virus_of_china.json')

    def get_corona_virus_of_china(self):
        """
        采集全部的全国各省的疫情数据
        :return:
        """
        # 加载最近一日全国疫情信息
        province = self.load_data('data/last_day_corona_virus_of_china.json')

        # 获取各省疫情数据L
        corona_virus = self.parse_corona_virus(province, '采集1月22日以来各省疫情信息')
        # 保存文件
        self.save(corona_virus, 'data/corona_virus_of_china.json')

    def parse_corona_virus(self, urls, desc):
        """
        解析数据
        :param urls:链接url列表
        :param desc:描述
        :return: 疫情数据列表
        """
        # 定义列表, 用于存储各国从1月23日以来疫情数据
        corona_virus_data = []
        # 遍历各国疫情数据, 获取统计的URL
        for obj_data in tqdm(urls, desc):
            # 发送请求, 获取各省疫情json字符串
            url = obj_data['statisticsData']
            data_str = self.get_content(url)
            # 解析各省疫情json字符串, 并添加列表中
            statistics_data = json.loads(data_str)['data']
            # 添加国家或省份标记
            for one_day in statistics_data:
                one_day['provinceName'] = obj_data['provinceName']
                if obj_data.get('countryShortCode'):
                    one_day['countryShortCode'] = obj_data['countryShortCode']

            corona_virus_data.extend(statistics_data)
        return corona_virus_data

    def crawler_last_day(self):
        """
        爬取最近一天的疫情信息
        :return:
        """
        self.get_last_day_corona_virus()
        self.get_last_day_corona_virus_of_china()


    def get_corona_virus_of_anhui(self):
        """
        爬取安徽省疫情数据
        :return:
        """
        # 加载最近一日全国疫情信息
        provinces = self.load_data('data/last_day_corona_virus_of_china.json')

        # 获取安徽省疫情数据
        corona_virus_data = []
        for obj_data in tqdm(provinces, '采集1月22日以来安徽省疫情信息'):
            # 发送请求, 获取各省疫情json字符串
            if obj_data['provinceName'] == '安徽省':
                url = obj_data['statisticsData']
                data_str = self.get_content(url)
                # 解析各省疫情json字符串, 并添加列表中
                statistics_data = json.loads(data_str)['data']
                # 添加国家或省份标记
                for one_day in statistics_data:
                    time = str(one_day['dateId'])
                    one_day['dateId'] = time[:4]+'年'+time[4:6]+'月'+time[6:]+'日'

                corona_virus_data.extend(statistics_data)
        # 保存文件
        self.save(corona_virus_data, 'data/corona_virus_of_anhui.json')

    def crawler_all(self):
        """
        爬取所有的疫情信息
        :return:
        """
        print("E02014049邓宇")
        self.get_corona_virus()
        self.get_corona_virus_of_china()
        self.get_corona_virus_of_anhui()


if __name__ == '__main__':
    corona_spider = CoronaVirusSpider()
    corona_spider.crawler_last_day()
    corona_spider.crawler_all()
    # corona_spider.get_corona_virus_of_anhui()
