# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import csv
import re
xgzyqurl = 'http://wjw.wuhan.gov.cn/front/web/list3rd/yes/803'
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'}

data = {
    'pageNum':'2',
    'numPerPage':'15'
    }


#获取分页分页总数，和页面显示个数
def getPostData(url):
    data = {}
    r = requests.get(url, headers=headers)
    r.encoding = r.apparent_encoding
    soup = BeautifulSoup(r.text, 'html.parser')
    page = soup.select('#pageNum')
    data['pageNum'] = page[0]['value']
    data['numPerPage'] = soup.select('#numPerPage')[0]['value']
    txt =  soup.select('.control span')[0].text
    pageCountstr = re.findall(r'\d+', txt)
    data['pageCount'] = pageCountstr[1]

    print(data)
    print('\n')
    return data
#获取分页页面“武汉市新冠肺炎疫情动态”所有 URL
def getOnePageurl(url, pageNum, numPerPage, xgAllUrl):
    data = {
        'pageNum': pageNum,
         'numPerPage': numPerPage
    }
    r = requests.post(url, headers=headers, data = data)
    r.encoding = r.apparent_encoding
    soup = BeautifulSoup(r.text, 'html.parser')
    listA =  soup.select('.xxgksublist a')
    mainTxt = '武汉市新冠肺炎疫情动态'
    for a in listA:
        if mainTxt in a['title']:
            timeStr = re.findall(r'[（(](.*?)[)）]', a['title']) 
            if len(timeStr) > 0:
                print(a['href'])
                print(timeStr)
                xgAllUrl[timeStr[0]] = a['href']

#获取“武汉市新冠肺炎疫情动态”核酸检测人数和无症状人数
def getOneDayData(url):
    r = requests.get(url, headers=headers)
    r.encoding = r.apparent_encoding
    soup = BeautifulSoup(r.text, 'html.parser')
    listp =  soup.select('.TRS_Editor p')
    mainTxt = '全市新增无症状感染者'
    jctxt = '全市核酸检测'
    data = []
    for p in listp:
        contentTxt = ''.join(span.text.strip() for span in p.select("span"))
        if mainTxt in contentTxt:
            splitTxt = [ x for x in re.split('，|。', contentTxt) if mainTxt in x]
            if len(splitTxt) > 0:
                pepoleCountstr = re.findall(r'\d+', splitTxt[0])
                data.append(pepoleCountstr[0])
        if jctxt in contentTxt:
            allpepolestr = re.findall(r'\d+', contentTxt)
            data.append(allpepolestr[len(allpepolestr) - 1])

    return data


#保存爬取数据到文件
def savedata2file(yqdata):
    with open('yiqishuju.csv', 'w') as f:
        yqdata.reverse()
        for lineTxt in yqdata:
            f.write(",".join(lineTxt))
            f.write("\n")

#用 matplotlib.pyplot 绘制图表
import matplotlib.pyplot as plt 
def showplot(yqdata):
    TestNum = []
    okNum = []
    x = []
    for onedate in yqdata:
        x.append(onedate[0])
        okNum.append(onedate[1])
        TestNum.append(onedate[2])

    # 激活第一个 subplot
    plt.rcParams['font.sans-serif']=['SimHei']
    plt.subplot(2,  1,  1)  
    # 绘制第一个图像 
    plt.plot(x, okNum) 
    plt.xticks([x[i] for i in range(len(x)) if i %3 == 0])
    plt.title('确诊无症状患者')  
    # 将第二个 subplot 激活，并绘制第二个图像
    plt.subplot(2,  1,  2) 
    plt.plot(x, TestNum) 
    plt.xticks([x[i] for i in range(len(x) ) if i %3==0])
    plt.title('核酸检查人数')  
    # 展示图像
    plt.show()


if __name__ == '__main__':
    postData = getPostData(xgzyqurl)
    pageConut = int(postData['pageCount'])
    xgAllUrl = {}
    for i in range(pageConut):
        getOnePageurl(xgzyqurl, str(i+1), data['numPerPage'], xgAllUrl)
    
    yqdata = []
    print(xgAllUrl)
 
    for (date, url) in xgAllUrl.items():
        oneDayDate = getOneDayData(url)
        if len(oneDayDate) == 2:
            oneDayDate.insert(0, date)
            yqdata.append(oneDayDate)

    print(yqdata)
    savedata2file(yqdata)
    showplot(yqdata)

