import requests
from bs4 import BeautifulSoup
import re
import json
from tqdm import tqdm
import logging
import random
import pandas as pd

# 初始化日志对象
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
logger = logging.getLogger(__name__)

# 浏览器User-Agent列表
user_agent_list = [
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:73.0) Gecko/20100101 Firefox/73.0',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) Gecko/20100101 Firefox/21.0',
    'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20130331 Firefox/21.0',
    'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Ubuntu/11.10 Chromium/27.0.1453.93 Chrome/27.0.1453.93 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36',
    'Mozilla/5.0 (compatible; WOW64; MSIE 10.0; Windows NT 6.2)'
]


class CoronaVirusSpider(object):

    def __init__(self):
        # 丁香园首页的数据
        self.home_url = 'https://ncov.dxy.cn/ncovh5/view/pneumonia'
        self.session = requests.session()

    def get_content_from_url(self, url):
        '''
        根据URL,获取响应内容的字符串数据
        :param url: 请求的URL
        :return: 响应内容的字符串
        '''
        while True:
            response = None
            self.session.headers.update({'User-Agent': random.choice(user_agent_list)})
            try:
                response = self.session.get(url=url)
            except requests.exceptions.ChunkedEncodingError:
                logger.error('获取连接失败', url)
                continue
            break
        home_page = response.content.decode()
        return home_page

    def parse_home_page(self, home_page, tag_id):
        '''
        :param home_page: 首页的内容
        :return: 解析后的python数据
        '''
        soup = BeautifulSoup(home_page, 'lxml')
        script = soup.find(id=tag_id)
        text = script.string
        # 获取在网页中的数据
        json_str = re.findall(r'\[.+\]', text)[0]
        data = json.loads(json_str)
        return data

    def parse_corona_virus(self, last_day_corona_virus_of_china, desc):
        corona_virus = []
        # 遍历各省疫情数据，获取统计的URL
        for province in tqdm(last_day_corona_virus_of_china, desc):
            statistics_data_url = province['statisticsData']
            # 根据遍历的url for循环发起请求
            statistics_data_json_str = self.get_content_from_url(statistics_data_url)
            statistics_data = json.loads(statistics_data_json_str)['data']
            # 对拿到的数据进行遍历获取想要的数据
            for one_day in statistics_data:
                one_day['provinceName'] = province['provinceName']
                if province.get('countryShortCode'):
                    one_day['countryShortCode'] = province['countryShortCode']
            corona_virus.extend(statistics_data)
        return corona_virus

    def load(self, path):
        '''
        根据路径加载数据
        :param path:对应路径
        '''
        with open(path, encoding="utf-8") as fp:
            data = json.load(fp)
        return data

    def save(self, data, path):
        """
        保持json数据到json文件
        :param data:
        :param path:
        :return:
        """
        with open(path, 'w', encoding="utf-8") as fp:
            json.dump(data, fp, ensure_ascii=False)

    def crawl_last_day_corona_virus(self):
        '''
        采集最近一天的各国疫情信息
        '''
        home_page = self.get_content_from_url(self.home_url)
        last_day_corona_virus = self.parse_home_page(home_page, 'getListByCountryTypeService2true')
        self.save(last_day_corona_virus, 'data/metadata/last_day_corona_virus.json')

    def crawl_corona_virus(self):
        '''
        采集各国疫情数据
        '''
        last_day_corona_virus = self.load('data/metadata/last_day_corona_virus.json')
        corona_virus = self.parse_corona_virus(last_day_corona_virus, '采集各国疫情信息')
        self.save(corona_virus, 'data/metadata/corona_virus.json')

    def crawl_last_day_corona_virus_of_china(self):
        '''
        采集最近一日各省疫情数据
        '''
        home_page = self.get_content_from_url(self.home_url)
        data = self.parse_home_page(home_page, tag_id='getAreaStat')
        self.save(data, 'data/metadata/last_day_corona_virus_of_china.json')

    def crawl_corona_virus_of_china(self):
        '''
        采集全国各省疫情数据
        '''
        last_day_of_china = self.load('data/metadata/last_day_corona_virus_of_china.json')
        corona_virus = self.parse_corona_virus(last_day_of_china, '采集疫情开始以来各省疫情信息')
        self.save(corona_virus, 'data/metadata/corona_virus_of_china.json')

    def process_data(self, input_path, out_pah):
        datas = pd.read_json(input_path)
        # 1.2 按照累计确诊数量, 对数据进行排序, 获取从确诊数量从高到低的国家名称
        datas.sort_values(by='confirmedCount', ascending=False, inplace=True)
        country_names = datas['provinceName'].unique()

        # 1.3 使用透视表,统计每一天, 每个国家的确诊人数
        final_data = datas.pivot_table('currentConfirmedCount', index='dateId', columns='provinceName')
        # 调整列的顺序
        final_data = final_data[country_names]

        # 1.4 使用0填充缺失值
        final_data.fillna(0, inplace=True)

        # 1.5 把每一天, 每个国家的确诊人数写入文件
        final_data.to_csv(out_pah)

    def process_data_run(self):
        self.process_data('data/metadata/corona_virus_of_china.json', 'data/metadata/currentConfirmedCount_china.csv')
        self.process_data('data/metadata/corona_virus.json', 'data/metadata/currentConfirmedCount.csv')

    # 根据url获取对应的json数据
    def get_json_from_url(self, url, path):
        while True:
            response = None
            self.session.headers.update({'User-Agent': random.choice(user_agent_list)})
            try:
                response = self.session.get(url=url)
            except requests.exceptions.ChunkedEncodingError:
                logger.error('获取连接失败', url)
                continue
            break
        data = response.json()
        self.save(data, path)


    def area_data_day(self):
        with tqdm(total=100) as pbar:
            pbar.set_description('获取时间序列数据:')
            # self.get_json_from_url('https://lab.isaaclin.cn/nCoV/api/area', 'data/metadata/area_data_day.json')
            pbar.update(50)
            # self.get_json_from_url('https://lab.isaaclin.cn/nCoV/api/area?latest=0','data/metadata/area_data_timeline.json')
            pbar.update(50)


    def run(self):
        """
        调用各个函数的方法
        :return:
        """
        self.crawl_last_day_corona_virus()
        self.crawl_corona_virus()
        # self.area_data_day()
        self.crawl_last_day_corona_virus_of_china()
        self.crawl_corona_virus_of_china()

        self.process_data_run()


if __name__ == '__main__':
    spider = CoronaVirusSpider()
    spider.run()
