#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import re
import requests
from bs4 import BeautifulSoup
from lxml import etree

gzbd_base_url = "http://wsjkw.sc.gov.cn"
gabd_region = "四川"
gzbd_page_count = 3

# 处理所有新闻数据
def gzbd_all_page_data():
    gzbd_all_data = []

    url_list = list("http://wsjkw.sc.gov.cn/scwsjkw/gzbd01/ztwzlmgl_{0}.shtml"
                    .format(i) for i in range(2, gzbd_page_count + 1))
    url_list.insert(0, "http://wsjkw.sc.gov.cn/scwsjkw/gzbd01/ztwzlmgl.shtml")
    for url in url_list:
        # news_data = gzbd_news_page_data(url)
        news_data = gzbd_news_page_data_xpath(url)
        gzbd_all_data += news_data

    print(gzbd_all_data)

# 处理新闻列表页数据 ---- bs4
def gzbd_news_page_data(url):
    news_data = []

    r = requests.get(url)
    r.encoding = r.apparent_encoding

    bs = BeautifulSoup(r.text, "html.parser")
    li_list = bs.find("div", attrs={"class":"contMain fontSt"}).find_all("li")
    for li in li_list:
        span = li.findChildren("span", recursive=False)[0];
        a = li.findChildren("a", recursive=False)[0];
        new_page_dict = gzbd_new_page_data(gzbd_base_url + a.get("href"))
        new_page_dict["时间"] = span.get_text()
        new_page_dict["地区"] = gabd_region
        news_data.append(new_page_dict)

    return news_data

# 处理新闻页数据 ---- bs4
def gzbd_new_page_data(url):
    print("visit:", url)
    new_page_dict = {}

    r = requests.get(url)
    r.encoding = r.apparent_encoding

    bs = BeautifulSoup(r.text, "html.parser")
    span_list = bs.find_all("span", attrs={"style":"font-size: 12pt;"})
    for span in span_list:
        span_text = span.get_text()
        if not span_text.__contains__("全省累计"):
            continue

        new_page_dict = new_line_parse(span_text)
    return new_page_dict

# 新闻字符串匹配（正则表达式，使用dict字典来装）
def new_line_parse(new_line):
    new_page_dict = {}

    new_re_1 = "全省累计报告新型冠状病毒肺炎确诊病例(\d+)例\(其中境外输入(\d+)例\），" \
               "累计治愈出院(\d+)例，死亡(\d+)例，目前在院隔离治疗(\d+)例，(\d+)人尚在接受医学观察。"
    new_re_2 = "全省累计报告新型冠状病毒肺炎确诊病例(\d+)例\(其中境外输入(\d+)\），" \
               "累计治愈出院(\d+)例，死亡(\d+)例，目前在院隔离治疗(\d+)例，(\d+)人尚在接受医学观察。"
    re_result_1 = re.search(new_re_1, new_line)
    re_result_2 = re.search(new_re_2, new_line)
    if re_result_1:
        new_page_dict["确诊数"] = re_result_1.group(1)
        new_page_dict["输入数"] = re_result_1.group(2)
        new_page_dict["治愈数"] = re_result_1.group(3)
        new_page_dict["死亡数"] = re_result_1.group(4)
        new_page_dict["隔离数"] = re_result_1.group(5)
        new_page_dict["观察数"] = re_result_1.group(6)
    if re_result_2:
        new_page_dict["确诊数"] = re_result_2.group(1)
        new_page_dict["输入数"] = re_result_2.group(2)
        new_page_dict["治愈数"] = re_result_2.group(3)
        new_page_dict["死亡数"] = re_result_2.group(4)
        new_page_dict["隔离数"] = re_result_2.group(5)
        new_page_dict["观察数"] = re_result_2.group(6)

    print(new_page_dict)
    return new_page_dict

# 处理新闻列表页数据 ---- xpath
def gzbd_news_page_data_xpath(url):
    news_data = []

    r = requests.get(url)
    r.encoding = r.apparent_encoding

    xpath = etree.HTML(r.text)
    li_list = xpath.xpath("//div[@class='contMain fontSt']//li");
    for li in li_list:
        span = li.xpath("./span")[0]
        a = li.xpath("./a")[0]
        new_page_dict = gzbd_new_page_data_xpath(gzbd_base_url + a.xpath("attribute::href")[0])
        new_page_dict["时间"] = span.text
        new_page_dict["地区"] = gabd_region
        news_data.append(new_page_dict)

    print(news_data)
    return news_data

# 处理新闻页数据 ---- xpath
def gzbd_new_page_data_xpath(url):
    print("visit:", url)
    new_page_dict = {}
    r = requests.get(url)
    r.encoding = r.apparent_encoding

    xpath = etree.HTML(r.text)
    # new_span = xpath.xpath("//span[@style='font-size: 12pt;'][contains(.//text(),'全省累计')]")[0];
    # new_page_dict = new_line_parse(new_span.text)
    span_list = xpath.xpath("//span[@style='font-size: 12pt;']/text()")
    for span_text in span_list:
        if span_text and not span_text.__contains__("全省累计"):
            continue
        new_page_dict = new_line_parse(span_text)

    return new_page_dict

if __name__ == '__main__':
    # new_page_url = "http://wsjkw.sc.gov.cn/scwsjkw/gzbd01/2020/9/2/02e7dbad55354b1abbbce7871acea7d7.shtml"
    new_page_url = "http://wsjkw.sc.gov.cn/scwsjkw/gzbd01/2020/10/10/0b8c99f4a6584790a5502888f5b12beb.shtml"
    # gzbd_new_page_data(new_page_url)
    # gzbd_new_page_data_xpath(new_page_url)
    news_page_url = "http://wsjkw.sc.gov.cn/scwsjkw/gzbd01/ztwzlmgl.shtml"
    # gzbd_news_page_data(news_page_url)
    # gzbd_news_page_data_xpath(news_page_url)
    gzbd_all_page_data()