'''
根据所缺日期的公告，获取对应日期的公告，避免过度爬取
'''
import os
import re
import datetime
import requests
from bs4 import BeautifulSoup
from lxml import etree
import get_cookie

if not os.path.exists('./pagetext'):
    os.mkdir('./pagetext')

cookie = get_cookie.update_cookies()


lastdates = []


class GetNeedDate:
    def __init__(self):
        self.news_date = ''
        self.local_date = ''
        self.date_list = []

    def get_local_date(self):
        for root, dirs, files in os.walk("E:\code_git\Project\DA_Covid19\pagetext"):
            for each in files:
                lastdate = re.findall('(\d+\S\d+\S\d+).txt', each)  # \d任意数字，\S任意非空字符
                lastdates.extend(lastdate)
        datetime_list = [datetime.datetime.strptime(time, '%Y-%m-%d') for time in lastdates]  # strptime 转化为时间格式，以便排序
        datetime_list.sort(reverse=True)  # 倒序  直接使用.reverse()不行
        # local_date = (datetime_list[0] + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
        self.local_date = datetime_list[0]
        print('local_date', self.local_date)
        return self.local_date

    def get_news_date(self):
        header = {
            'User-Agent': 'Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) '
                          'Chrome / 80.0.3987.87Safari / 537.36',
            'Host': 'www.nhc.gov.cn',
            'cookie': cookie
        }
        url = 'http://www.nhc.gov.cn/xcs/yqtb/list_gzbd.shtml'
        response = requests.get(url, headers=header)
        print(response.status_code)
        text = etree.HTML(response.text)
        r_date = text.xpath('//li/span/text()')
        news_date = r_date[0]
        self.news_date = datetime.datetime.strptime(news_date, '%Y-%m-%d')
        return self.news_date

    def needdatas(self):
        if self.news_date == self.local_date:
            print('数据无需更新')
        else:
            d = (self.news_date - self.local_date).days
            for i in range(1, d + 1):
                self.date_list.append(datetime.datetime.strftime(self.local_date + datetime.timedelta(days=i),
                                                                 '%Y-%m-%d'))  # strftime转成时间格式
            print(self.date_list)
        return self.date_list


class GetData:
    def __init__(self, date_list):
        self.header = {
            'User-Agent': 'Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) '
                          'Chrome / 80.0.3987.87Safari / 537.36',
            'Host': 'www.nhc.gov.cn',
            'cookie': cookie
        }
        self.date_list = date_list

    def get_data(self):
        url = "http://www.nhc.gov.cn/xcs/yqtb/list_gzbd.shtml"
        response = requests.get(url, headers=self.header)
        print(response.status_code)
        soup = BeautifulSoup(response.text, 'lxml')
        r_date = [x.string for x in soup.find_all(class_='ml')]
        # print(r_date)
        r_title = []
        for i in self.date_list:
            r_title.append(soup.find(name='span', text=i).find_previous_sibling(name='a').attrs['title'])
        # print(r_title)

        r_href = []
        title_date = []
        for i in self.date_list:
            for x in r_date:
                if i == x:
                    r_href.append('http://www.nhc.gov.cn' + \
                                  soup.find(name='span', text=i).find_previous_sibling(name='a').attrs['href'])
                    title_date.append(i)
        for x, y, z in zip(r_href, r_title, title_date):  # 获取内容，写入文档
            response = requests.get(x, headers=self.header)
            # print(response)
            # print(response.text)
            soup = BeautifulSoup(response.text, 'lxml')
            # text_p = soup.find(name='p').get_text()
            text_p = soup.find_all(name="p")
            # print(text_p)
            text = ''
            for i in text_p:
                # print(i)
                text += str(i.text)  # 不能用string
            filename = './pagetext/{}{}.txt'.format(y, z)
            with open(filename, 'w', encoding='utf-8')as f:
                f.write(text)


if __name__ == '__main__':
    date = GetNeedDate()
    date.get_local_date()
    date.get_news_date()

    data_list = date.needdatas()
    data = GetData(data_list)
    data.get_data()
