# -*- coding: utf-8 -*-
# Time: 2022/6/12 15:11
import requests
from lxml import etree
from pymongo import MongoClient


class DataModel(object):

    def __init__(self, title, url, auth, date, content):
        self.title = title                  # 新闻标题
        self.url = url                      # 新闻链接
        self.auth = auth                    # 发布者
        self.date = date                    # 发布时间
        self.content = content              # 新闻内容


class DBMongo(object):
    def __init__(self):
        self.client = MongoClient("121.41.206.74")
        self.db = self.client["python"]['day6-3']

    def __del__(self):
        self.client.close()

    def save_data(self, data: DataModel):
        self.db.insert_one(data.__dict__)
        print(f"[写入MongoDB数据] {data.__dict__}")


class NewsSpider(DBMongo):

    def __init__(self):
        super(NewsSpider, self).__init__()
        self.first = lambda x: str(x[0]).strip() if x else ""
        self.http = "http://www.hn.chinanews.com.cn"

    def get_url(self):
        _index_url = "http://www.hn.chinanews.com.cn/news/gnxw/"
        req = requests.get(_index_url)
        req.encoding = "gbk"
        html = etree.HTML(req.text)
        for uri in html.xpath('//ul[@class="NLPul"]'):
            _uri = self.first(uri.xpath(".//a/@href"))
            if _uri.startswith(self.http):
                yield _uri
            else:
                yield f"{self.http}{_uri}"

    def get_page_data(self, url):
        req = requests.get(url)
        req.encoding = "gbk"
        html = etree.HTML(req.text)
        title = self.first(html.xpath('//div[@class="newbiaoti"]//text()'))
        auth, date = self.first(html.xpath('//*[@id="anthor"]/text()')).split(" 发布时间：", 1)
        # 新闻详情直接存储网页源代码
        content = etree.tostring(html.xpath('//*[@id="zoom"]')[0], encoding="utf-8").decode()
        return DataModel(title=title, url=url, auth=auth, date=date, content=content)

    @classmethod
    def run(cls):
        """ 运行爬虫 """
        spider = cls()
        for url in spider.get_url():
            spider.save_data(spider.get_page_data(url))


if __name__ == '__main__':
    NewsSpider.run()


