import requests
from bs4 import BeautifulSoup
from lxml import etree
import os
import datetime
import re
import openpyxl
from selenium import webdriver
import get_cookie

if not os.path.exists('./pagetext'):
    os.mkdir('./pagetext')


cookie = get_cookie.update_cookies()


class GetData:
    def __init__(self, url):
        self.header = {
            'User-Agent': 'Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) '
                          'Chrome / 80.0.3987.87Safari / 537.36',
            'Host': 'www.nhc.gov.cn',
            'cookie': cookie
        }
        # 'cookie': ''
        self.news_title = ''
        self.news_href = ''
        self.url = url

    def get_data(self, url):
        response = requests.get(url, headers=self.header)
        print(response.status_code)
        # print(response.text)
        # soup = BeautifulSoup(response.text, 'lxml')
        # print(soup.prettify())
        # today_format = datetime.datetime.today().strftime('%Y-%m-%d') # 获取当天日期，年月日
        # print(today_format)
        text = etree.HTML(response.text)
        r_date = text.xpath('//li/span/text()')
        r_path = text.xpath('//li/a/@href')
        r_title = text.xpath('//li/a/@title')
        print(r_date)
        # print(r_path)
        # print(type(r_path))
        # print(r_title)
        news_list = []
        for i in r_path:  # 获取公告的连接
            self.news_href = 'http://www.nhc.gov.cn' + i
            news_list.append(self.news_href)
        # print(news_list)

        for x, y, z in zip(news_list, r_title, r_date):  # 获取内容，写入文档
            response = requests.get(x, headers=self.header)
            # print(response)
            # print(response.text)
            soup = BeautifulSoup(response.text, 'lxml')
            text = soup.find(name='p').get_text()
            filename = './pagetext/{}{}.txt'.format(y, z)
            with open(filename, 'w', encoding='utf-8')as f:
                f.write(text)

    def get_all_data(self):  # 遍历所有页面，获取数据
        for i in range(2, 6):
            url2 = 'http://www.nhc.gov.cn/xcs/yqtb/list_gzbd_' + str(i) + '.shtml'
        # print(url2)
            GetData.get_data(self, url2)
        print('完成')

    # for i in r:
    #     self.news_href = 'http://www.nhc.gov.cn' + \
    #                      soup.find_all(name='span', text=i).find_previous_siblings(name='a').attrs['href']
    #     self.news_title = soup.find_all(name='span', text=i).find_previous_siblings(name='a').attrs['title']
    #     news_list.append(self.news_href)
    #     news_title.append(self.news_title)
    # print(news_list)
    # print('news_title', news_title)
    #
    # for x in news_list:
    #     response = requests.get(x, headers=self.header)
    #     print(response)
    #     soup = BeautifulSoup(response.text, 'lxml')
    #     text = soup.find(name='p').get_text()
    #     for y in news_title:
    #         filename = './pagetext/{}.txt'.format(y)
    #         with open(filename, 'w', encoding='utf-8')as f:
    #             f.write(text + '\n')

    # text_time = soup.find_all(name='span', class_='ml')
    # print(text_time)
    # print(type(text_time))
    # self.latest_news_title = soup.find(name='span', text='2020-04-25').find_previous_sibling(name='a').attrs['title'] # .attrs 选择器：获取属性
    # self.latest_news_href = 'http://www.nhc.gov.cn' + soup.find(name='span', text='2020-04-24').find_previous_sibling(name='a').attrs['href']
    # # print(self.latest_news_title)
    # print(self.latest_news_href)

    # return self.latest_news_href, self.latest_news_title


if __name__ == '__main__':
    url = 'http://www.nhc.gov.cn/xcs/yqtb/list_gzbd.shtml'
    datas = GetData(url)
    datas.get_data(url)
    datas.get_all_data()

