import re
import requests
from bs4 import BeautifulSoup
import json
from tqdm import tqdm


class Spider:
    def __init__(self):
        self.home_url = 'https://ncov.dxy.cn/ncovh5/view/pneumonia'

    def get_content_from_url(self, url):
        response = requests.get(url)
        home_page = response.content.decode()
        return home_page

    def parse_home_page(self, home_page, tag_id):
        soup = BeautifulSoup(home_page, 'lxml')
        script = soup.find(id=tag_id)
        text = script.text
        json_str = re.findall(r'\[.+\]', text)[0]
        # 转换为python
        python_data = json.loads(json_str)
        return python_data

    def save(self, python_data, path):
        with open(path, 'w', encoding='utf8') as fp:
            json.dump(python_data, fp, ensure_ascii=False)

    def crawl_last_day(self):
        '''
        最新各国疫情数据
        :return:
        '''
        home_page = self.get_content_from_url(self.home_url)
        last_day_data = self.parse_home_page(home_page, tag_id='getListByCountryTypeService2true')
        self.save(last_day_data, '疫情数据/最新各国疫情数据.json')

    def crawl_1月23_day(self):
        '''
        1月23以来各国疫情数据
        :return:
        '''
        with open('疫情数据/最新各国疫情数据.json', encoding='utf8') as fp:
            last_day_data = json.load(fp)
        # 定义一个列表,存储各国的1月23日以来的数据
        list = []
        # print(last_day_data)
        for country in tqdm(last_day_data, '采集1月23日以来各国疫情数据'):
            url = country['statisticsData']
            country_data = self.get_content_from_url(url)
            python_country_data = json.loads(country_data)['data']
            for oneday in python_country_data:
                oneday['provinceName'] = country['provinceName']
                oneday['countryShortCode'] = country['countryShortCode']
                # print(oneday)

            list.extend(python_country_data)

        self.save(list, '疫情数据/1月23各国疫情数据.json')

    def crawl_china_province_data(self):
        '''
        中国各省数据
        :return:
        '''
        home_page = self.get_content_from_url(self.home_url)
        python_data = self.parse_home_page(home_page, tag_id='getAreaStat')
        self.save(python_data, '疫情数据/中国各省数据.json')

    def crawl_china_province_1月23_data(self):
        '''
        中国各省1月23以来数据
        :return:
        '''
        with open('疫情数据/中国各省数据.json', encoding='utf8') as fp:
            province_data = json.load(fp)
        list = []
        for province in tqdm(province_data, '采集1月23日以来中国各省疫情数据'):
            url = province['statisticsData']
            province_data = self.get_content_from_url(url)
            python_province_data = json.loads(province_data)['data']
            for oneday in python_province_data:
                oneday['provinceName'] = province['provinceName']

                # print(oneday)
            # print(f"\n{province['provinceName']}\n")
            list.extend(python_province_data)

        self.save(list, '疫情数据/1月23中国各省疫情数据.json')

    def run(self):
        # self.crawl_last_day()
        # self.crawl_1月23_day()
        # self.crawl_china_province_data()
        self.crawl_china_province_1月23_data()

if __name__ == '__main__':
    spider = Spider()
    spider.run()
