import requests         #HTTP请求库，可用于网络请求和网络爬虫等
import json             #json库 ； json --> .dump()、.loads()
import re               #re模块 --> 实现正则匹配 ; 获取字符串中所有匹配的字符串，返回一个列表
from lxml import etree      #lxml库 --> XML和HTML的解析器，解析和提取XML和HTML中的数据 ； 引入etree模块（解析 HTML/XML 文档）
import time

url_guowai="https://opendata.baidu.com/data/inner?tn=reserved_all_res_tn&dspName=iphone&from_sf=1&dsp=iphone&resource_id=28565&alr=1&query=%E5%9B%BD%E5%A4%96%E6%96%B0%E5%9E%8B%E8%82%BA%E7%82%8E%E6%9C%80%E6%96%B0%E5%8A%A8%E6%80%81&cb=jsonp_1645606059431_23122"
url_china='https://opendata.baidu.com/data/inner?tn=reserved_all_res_tn&dspName=iphone&from_sf=1&dsp=iphone&resource_id=28565&alr=1&query=%E5%9B%BD%E5%86%85%E6%96%B0%E5%9E%8B%E8%82%BA%E7%82%8E%E6%9C%80%E6%96%B0%E5%8A%A8%E6%80%81&cb=jsonp_1645608463010_72545'

header={
    'Accept': '*/*',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Connection': 'keep-alive',
    'Cookie': '__yjs_duid=1_a7342b52dcd01361449b602d2ab03b261632922400111; BAIDUID=F00A1AB913FF542E94279DED7C6F1EA8:FG=1; BIDUPSID=F00A1AB913FF542E94279DED7C6F1EA8; PSTM=1641797954; H_PS_PSSID=35835_35105_31254_34584_35490_35841_35246_35948_35802_35323_26350_35941; delPer=0; PSINO=1; BA_HECTOR=21802k8g0k858181641h1bt1t0r; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598',
    'Host': 'opendata.baidu.com',
    'Referer': 'https://voice.baidu.com/',
    'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="98", "Google Chrome";v="98"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': "Windows",
    'Sec-Fetch-Dest': 'script',
    'Sec-Fetch-Mode': 'no-cors',
    'Sec-Fetch-Site': 'same-site',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36'
}

header2={
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36'
}

class Spider_yiqi:
    def __init__(self):
        self.home_hot_search=[]
        self.basepath='/home/yiqing/houduan/'
        # self.abroad_hot_search=[]
        self.mapLastUpdatedTime=''

    def time_transfer(self,timeStamp):
        timeArray = time.localtime(timeStamp)
        return time.strftime("%Y-%m-%d %H:%M:%S", timeArray)

    def save_json(self,data,save_path,keys):        #数据的遍历提取
        yiqing_data=[]
        for d in data:
            ldata = {}
            for k in keys[0:-2]:
                ldata[k] = d[k]
            ldata[keys[-2]] = []
            for dd in d['subList']:
                city = {}
                city[keys[-1]] = dd[keys[-1]]
                for k in keys[1:-2]:
                    city[k] = dd[k]
                ldata[keys[-2]].append(city)
            yiqing_data.append(ldata)
        with open(save_path, "w", encoding='utf8') as f:
            json.dump(yiqing_data, f, indent=4, ensure_ascii=False)     #json.dump --> 编码为json并将数据写入文件 ; 序列化后存在4的缩进 ； 输出保证将所有输入的非 ASCII 字符转义

    def save_json2(self,data,save_path):
        with open(save_path, "w", encoding='utf8') as f:
            json.dump(data,f, indent=4, ensure_ascii=False)         #传入python对象转换为json格式并存储到IO流

    def get_hot_search(self):                   #右下数据处理
        response = requests.get(url_china, headers=header).text
        res = re.findall('\((.+)\)', response)[0]
        json_data = json.loads(res)             #json.loads --> JSON字符串转换为Python字典
        for d in json_data['Result'][0]['DisplayData']['result']['items']:
            self.home_hot_search.append({
                'eventTime':self.time_transfer(int(d['eventTime'])),
                'eventDescription':d['eventDescription']
            })
        self.save_json2(self.home_hot_search, 'home_hot_search.json')
            # self.abroad_hot_search.append(d['eventDescription'])

    def get_data(self):
        #requests对象的.text() --> http响应内容的字符串形式
        html=requests.get("https://voice.baidu.com/act/newpneumonia/newpneumonia",headers=header2).text
        #etree.HTML --> 用来解析字符串格式的HTML文档对象，将传进去的字符串转变成_Element对象
        html = etree.HTML(html)
        # 作为_Element对象，可以调用remove()、xpath()等方法  ； .xpath()返回列表形式 ; .text()选中文本子节点
        yiqing_data=html.xpath('//script')[11].xpath('./text()')[0]
        # json.loads --> JSON字符串转换为Python字典
        data = json.loads(yiqing_data)
        data=data['component'][0]
        print(data)
        #['mapLastUpdatedTime', 'caseList', 'caseOutsideList', 'summaryDataIn', 'summaryDataOut', 'trend', 'foreignLastUpdatedTime', 'globalList', 'allForeignTrend', 'topAddCountry', 'topOverseasInput', 'asymptomaticTopProvince', 'newAddTopProvince', 'topCountryAddTrend']
        # 获得全国每个省份和每个城市的数据，包括：感染数、死亡数、治愈数
        self.save_json(data['caseList'],"yiqing_china.json",['area','confirmed','died','crued','subList','city'])
        # 获得国外疫情数据
        self.save_json(data['caseOutsideList'], "yiqing_guowai.json", ['area', 'confirmed', 'died', 'crued','curedPercent','diedPercent','subList','city'])
        # 获取国内数据汇总
        data['summaryDataIn']['mapLastUpdatedTime'] = data['mapLastUpdatedTime']
        self.save_json2(data['summaryDataIn'],'summaryDataIn.json')
        # 获取国外数据汇总
        self.save_json2(data['summaryDataOut'],'summaryDataOut.json')
        # 国内增长趋势
        self.save_json2(data['trend'],'trend.json')
        self.save_json2(data['globalList'],'globalList.json')
        self.save_json2(data['allForeignTrend'],'allForeignTrend.json')
        self.save_json2(data['topAddCountry'],'topAddCountry.json')
        self.save_json2(data['globalList'],'globalList.json')
        self.save_json2(data['topOverseasInput'],'topOverseasInput.json')
        self.save_json2(data['asymptomaticTopProvince'],'asymptomaticTopProvince.json')
        self.save_json2(data['newAddTopProvince'],'newAddTopProvince.json')
        self.save_json2(data['topCountryAddTrend'],'topCountryAddTrend.json')

    def update_data(self):
        self.get_hot_search()
        self.get_data()

if __name__=="__main__":
    spider=Spider_yiqi()
    spider.update_data()
