import requests
from bs4 import BeautifulSoup
import re
import json


class CoronaViruSpider(object):

    def __init__(self):
        self.home_url = 'https://ncov.dxy.cn/ncovh5/view/pneumonia'

    def get_content_from_url(self, url):
        """
        根据url, 获取响应内容的字符串数据
        :param url: 请求的URL
        :return: 响应的字符串内容
        """
        response = requests.get(url)
        return response.content.decode()

    def parse_home_page(self, home_page):
        """
        解析首页内容,获取解析后的python数据
        :param home_page:首页的内容
        :return:解析后的python数据
        """
        # 2.从疫情首页,提取最近一日各国疫情数据
        soup = BeautifulSoup(home_page, 'lxml')
        script = soup.find(id='getListByCountryTypeService2true')
        text = script.string
        # print(text)
        # 3.从疫情数据中,获取json格式的字符串
        json_str = re.findall(r'\[.+\]', text)[0]
        # print(json_str)
        # 4.把json字符串转换成python类型
        data = json.loads(json_str)
        return data

    def save(self, data, path):
        # 5.以json格式保存最近一日各国疫情数据
        with open(path, 'w', encoding='utf8')as fp:
            json.dump(data, fp, ensure_ascii=False)

    def crawl_last_day_corona_virus(self):
        """
        采集最近一天的各国疫情信息
        :return:
        """
        home_page = self.get_content_from_url(self.home_url)

        last_day_corona_virus = self.parse_home_page(home_page)

        self.save(last_day_corona_virus, 'data/last_day_corona_virus.json')

    def crawl_corona_virus_of_china(self):
        """
        采集从1月22日以来的全国各省的疫情数据
        :return:
        """
        # 加载最近一日全国疫情数据
        # 遍历最近一日全国疫情数据,获取各省URL
        # 解析各省疫情json字符串,并添加到列表
        # 以json格式保存数据

    # def run(self):
    #     self.crawl_last_day_corona_virus()


if __name__ == '__main__':
    spider = CoronaViruSpider()
    spider.crawl_last_day_corona_virus()

