import re;
import requests, bs4;
from lxml import etree;
from _GZBD import gzbd_storage;

'''
GZBD spider
'''
__author__ = 'HymanHu';


domain = "http://wsjkw.sc.gov.cn";
region = "四川";
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:86.0) Gecko/20100101 Firefox/86.0",
};

# 构造新闻列表页分页链接
def get_all_news_data(page_count):
    gzbd_all = [];
    url_list = list("http://wsjkw.sc.gov.cn/scwsjkw/gzbd01/ztwzlmgl_%d.shtml" % item for item in range(2, page_count + 1));
    url_list.insert(0, "http://wsjkw.sc.gov.cn/scwsjkw/gzbd01/ztwzlmgl.shtml");
    for url in url_list:
        gzbd_all += get_news_page_data(url);

    print(gzbd_all);
    return gzbd_all;


# 爬取新闻列表页内容
def get_news_page_data(url):
    print("Visit site：%s" % (url,));
    gzbd_list = [];

    r = requests.get(url, headers=headers);
    if r.status_code == 200:
        r.encoding = r.apparent_encoding;

        soup = bs4.BeautifulSoup(r.text, 'html.parser');
        li_list = soup.find(name="div", attrs={"class": "contMain fontSt"}).find_all(name="li");
        for li in li_list:
            child_span = li.findChildren("span", recursive=False)[0];
            child_a = li.findChildren("a", recursive=False)[0];
            gzbd_data = get_new_page_data(domain + child_a.get("href"))
            gzbd_data["日期"] = child_span.get_text();
            gzbd_data["地域"] = region;
            gzbd_list.append(gzbd_data);
    else:
        print("Request error, return status code is %d" % (r.status_code,));

    print(gzbd_list);
    return gzbd_list;


# 爬取单张新闻业内容
def get_new_page_data(url):
    print("Visit site：%s" % (url,));
    gzbd_data = {};

    r = requests.get(url, headers=headers);
    if r.status_code == 200:
        r.encoding = r.apparent_encoding;

        # 使用 bs4 解析
        soup = bs4.BeautifulSoup(r.text, 'html.parser');
        span_list = soup.find_all(name="span", attrs={"style": "font-size: 12pt;"});
        for span in span_list:
            span_text = span.get_text();
            if not span_text.__contains__("全省累计"):
                continue;
            gzbd_data = gzbd_data_parse(span_text);

        # 使用 xpath 解析
        xpath = etree.HTML(r.text);
        # 根据 span 属性定位所有 span list
        # span_text_list = xpath.xpath("//span[@style='font-size: 12pt;']/text()");
        # 根据 审判 属性和功能函数 contains 精确定位需要的新闻段落
        # span_text = xpath.xpath("//span[@style='font-size: 12pt;'][contains(.//text(),'全省累计')]")[0].text;
        # print(gzbd_data_parse(span_text));
    else:
        print("Request error, return status code is %d" % (r.status_code,));

    print(gzbd_data);
    return gzbd_data;

# 新闻字符串解析，提取数字，放入字典内
def gzbd_data_parse(span_text):
    gzbd_data = {};
    pattern = "全省累计报告新型冠状病毒肺炎确诊病例(\d+)例\(其中境外输入(\d+)例\），" \
              "累计治愈出院(\d+)例，死亡(\d+)例，目前在院隔离治疗(\d+)例，(\d+)人尚在接受医学观察。";
    ma = re.search(pattern, span_text);
    if ma:
        gzbd_data["确诊数"] = ma.group(1);
        gzbd_data["输入数"] = ma.group(2);
        gzbd_data["治愈数"] = ma.group(3);
        gzbd_data["死亡数"] = ma.group(4);
        gzbd_data["隔离数"] = ma.group(5);
        gzbd_data["观察数"] = ma.group(6);
    return gzbd_data;

if __name__ == "__main__":
    # new_page_url = "http://wsjkw.sc.gov.cn/scwsjkw/gzbd01/2021/3/23/83159fd0ab9d485e85a170baf5096600.shtml";
    # get_new_page_data(new_page_url);
    # news_page_url = "http://wsjkw.sc.gov.cn/scwsjkw/gzbd01/ztwzlmgl.shtml";
    # get_news_page_data(news_page_url);
    gzbd_all = get_all_news_data(3);
    gzbd_storage.gzbd_mysql(gzbd_all);