#!/usr/bin/python3
# coding=utf-8

import requests
import random
from bs4 import BeautifulSoup

useragent_list = [
    "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.63 Safari/537.36"]
headers = {"User-Agent": random.choice(useragent_list), "Content-Type": "application/json; charset=UTF-8"}
url = "http://sxwjw.shaanxi.gov.cn/sy/ztzl/fyfkzt/yqtb/index.html"

def quezhenAndWuzhenzhuangInfo(url:str):
    res=requests.get(url,headers=headers)
    res.encoding="utf-8"
    article=BeautifulSoup(res.text,"html.parser")
    info=article.select(".view.TRS_UEDITOR.trs_paper_default.trs_word p")[0].text
    quezhen_and_wuzhengzhuang_list=info.split("；")
    print("确诊信息：  ",quezhen_and_wuzhengzhuang_list[0].split("其中")[1])
    print("无症状信息：",quezhen_and_wuzhengzhuang_list[1].split("其中")[1].split("。")[0])
    print("出院信息：  ",quezhen_and_wuzhengzhuang_list[1].split("其中")[1].split("。")[1])

if __name__ == "__main__":
    res = requests.get(url,headers=headers)
    res.encoding="utf-8"
    page=BeautifulSoup(res.text,"html.parser")
    yiqingdongtai_list=page.select(".rt.w-gl-w868.f-mr20 .cm-news-list.gl-news-list li")
    # 从疫情动态页面中获取每一天的疫情动态超链接
    for item in yiqingdongtai_list[::-1]:
        title=item.select("li a")[0].text
        date=item.select("li span")[0].text
        print(date,"  "+title)
        href=item.select("li a")[0].attrs.get("href").replace("../../..","http://sxwjw.shaanxi.gov.cn/sy")
        # 从获取到的特定某一天的疫情动态超链接中访问具体一天的疫情信息
        quezhenAndWuzhenzhuangInfo(href)
        print("-"*80)
    print("最近 {} 天疫情信息".format(len(yiqingdongtai_list)))
