# -*- coding: UTF-8 -*-
import urllib
import os
from bs4 import BeautifulSoup
import requests
import re
import time
import pandas as pd


# 获取卫计委新闻发布页面
def getIndexList(iniUrl, indexs=10):
    UrlList = []
    for i in range(indexs):
        if i == 0:
            continue
        elif i == 1:
            newurl = iniUrl + "/index.html"
        else:
            newurl = iniUrl + "/index_" + str(i) + ".html"
        UrlList.append(newurl)
    return UrlList


# 抓取网页文本
def getHTMLText(url):
    try:
        headers = {
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36"}
        r = requests.get(url, timeout=30, headers=headers, allow_redirects=False)
        print(r.status_code)
        r.raise_for_status()  # 如果状态不是200，引发HTTPError异常
        r.encoding = r.apparent_encoding
        # print(r.text)
        return r.text
    except:
        print("网页访问异常！")


# 获取居住地网页链接
def getAddressUrl(url):
    webcontent = getHTMLText(url)
    soup = BeautifulSoup(webcontent, "html.parser")
    # info = soup.find_all(text=re.compile('.*本市各区确诊病例、无症状感染者居住地信息*.'))
    info = soup.find_all('a', attrs={'title': re.compile('.*确诊病例、无症状感染者居住地信息*.')})
    # info = soup.find_all('li')
    links = []
    for i in info:
        link = i.get('href')
        links.append(link)
    time.sleep(5)
    return links


# 获取地址文本
def getaddressText(url, filename='D:\VirusTracker\data2\shanghai'):
    text = getHTMLText(url)
    soup = BeautifulSoup(text, "html.parser")
    if 'weixin' in url:
        timetxt = soup.find('h1', 'rich_media_title')
        timetxt = timetxt.get_text()
        todate = re.findall('\n(.*?)（0-24时）本市各区确诊病例、无症状感染者居住地信息', timetxt)
        todate = todate[0]
        info = soup.find('div', 'rich_media_content')
        pattern = '</span></p><p>.*?<span style="font-size: 16px;">([^已<2022].+?)?[，。、<]+?'
    else:
        timetxt = soup.find('h2', 'Article-title')
        timetxt = timetxt.get_text()
        todate = re.findall('^(.*?)（0-24时）本市各区确诊病例、无症状感染者居住地信息', timetxt)
        todate = todate[0]
        info = soup.find('div', 'Article_content')
        pattern = '</span></p><p .*?><span style="font-size: 16px;font-family: 宋体">([^已<2022].+?)?[，。、<;]+?'
    x = str(info)
    addresses = re.findall(pattern, x)
    filename = filename + todate + 'address.txt'
    df = pd.DataFrame(addresses)
    df.to_csv(filename)
    # with open(filename, 'w') as f:
    #     for addr in addresses:
    #         f.write(addr)


if __name__ == '__main__':
    # with open('links.csv', 'r') as f:
    #     links = f.readlines()
    # for link in links:
    #     getaddressText(link.strip())
   getaddressText('https://mp.weixin.qq.com/s/YNeLEO7BZouZRfyD2TWOlA')