# -*- coding: utf-8 -*-
# Time: 2022/6/11 17:28
import requests
from lxml import etree
from pymongo import MongoClient


class DBMongo(object):

    def __init__(self):
        self.client = MongoClient("121.41.206.74")
        self.db = self.client["python"]['day6-2']

    def __del__(self):
        self.client.close()


class DataModel(object):

    def __init__(self, number, state:str, title:str, response_time, political_time):
        self.number = number                  # 编号
        self.state = state                    # 状态
        self.title = title                    # 标题
        self.response_time = response_time    # 响应时间
        self.political_time = political_time  # 问政时间


class PoliticalSpider(DBMongo):

    def __init__(self):
        super(PoliticalSpider, self).__init__()
        self.first = lambda x: str(x[0]).strip() if x else ""

    def get_data(self, page:int):
        """ 获取url链接 """
        _page = page or 1
        for p in range(1, _page + 1):
            url = f"http://wzzdg.sun0769.com/political/index/politicsNewest?id=1&page={p}"
            yield from self._get_data(url)

    def _get_data(self, url):
        """ 获取并解析数据 """
        req = requests.get(url)
        html = etree.HTML(req.text)
        ul_list = html.xpath('//ul[@class="title-state-ul"]/li')
        for ul in ul_list:
            state1 = self.first(ul.xpath('./span[@class="state1"]/text()'))
            state2 = self.first(ul.xpath('./span[@class="state2"]/text()'))
            state3 = self.first(ul.xpath('./span[@class="state3"]/a/text()'))
            state4 = self.first(ul.xpath('./span[@class="state4"]/text()'))
            state5 = self.first(ul.xpath('./span[5]/text()'))
            yield DataModel(state1, state2, state3, state4, state5)

    def save_data(self, data: DataModel):
        self.db.insert_one(data.__dict__)
        print(f"[写入MongoDB数据] {data.__dict__}")

    @classmethod
    def run(cls, num:int):
        """
        运行爬虫
        :param num: 爬取页数
        """
        spider = cls()
        for _data in spider.get_data(num):
            spider.save_data(_data)

if __name__ == '__main__':
    PoliticalSpider.run(10)  # 爬取10页数据


