# coding=utf-8

# 对CSDN博客信息进行爬取，获取博客的主题、链接、日期、访问量、评论数等信息
from urllib import request
from bs4 import BeautifulSoup
import emailSend44 as email444
from apscheduler.schedulers.blocking import BlockingScheduler

addr = "https://tech.ifeng.com/c/81dln15AOMB"  # 设定抓取网址
hope_title = "嫦五发射成功！创下五个“首次”！一图了解这次“挖土”之旅_凤凰网"  # 预期的标题内容


# addr = "http://www.bjp2p.com.cn/news/20200224001"  # 设定抓取网址
# hope_title = "人民银行：多措并举，彻底化解互联网金融风险"  # 预期的标题内容

class CSDNSpider:

    # 初始化爬取的页号、链接以及封装Header
    def __init__(self, url=addr):
        self.url = url
        self.header = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36",
        }

    # 请求网页得到BeautifulSoup对象
    def getBeautifulSoup(self):
        # 请求网页
        req = request.Request(self.url, headers=self.header)
        res = request.urlopen(req)
        print(res.status)

        # 以html5lib格式的解析器解析得到BeautifulSoup对象
        # 还有其他的格式如：html.parser/lxml/lxml-xml/xml/html5lib
        soup = BeautifulSoup(res, 'html.parser')
        # print(soup)
        return soup

    def check(self):
        spider = CSDNSpider()
        content = spider.getBeautifulSoup()
        title = content.find("title").get_text();
        print(title)
        if title != hope_title:
            print("服务器异常")
            email444.sendEmail()


def job():
    print("定时任务启动")
    spider = CSDNSpider()
    spider.check();


if __name__ == "__main__":
    scheduler = BlockingScheduler()
    scheduler.add_job(job, 'interval', seconds=5)  # 单位是秒
    scheduler.start()
    # job()
