import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

class SunCrawlSpider(CrawlSpider):
    name = "SunCrawlSpider"
    allowed_domains = ["wz.sun0769.com"]
    start_urls = ["https://wz.sun0769.com/political/index/supervise"]

    rules = (
        # 翻页链接
        Rule(LinkExtractor(allow=r'/political/index/supervise\?page=\d+'), process_links='process_pagination_links'),
        # 帖子详情链接
        Rule(LinkExtractor(allow=r'/political/politics/index\?id=\d+'), callback='parse_item')
    )

    def process_pagination_links(self, links):
        # 只跟进前两页的链接
        return links[:2]

    def parse_item(self, response):
        # 提取投诉帖子的编号、帖子的URL、帖子的标题及帖子的内容
        post_number = response.xpath('//span[contains(text(), "编号")]/text()').get().split("：")[-1]
        post_url = response.url
        post_title = response.xpath('//p[@class="focus-details"]/text()').get()
        post_content = response.xpath('//div[@class="details-box"]/pre/text()').get()

        # 以JSON文档的形式输出爬取到的数据
        yield {
            "编号": post_number,
            "链接": post_url,
            "标题": post_title,
            "内容": post_content
        }