import scrapy
class Sun(scrapy.Spider):
    name = "Sun"
    allowed_domains = ["wz.sun0769.com"]
    start_urls = ["https://wz.sun0769.com/political/index/supervise"]
    page = 1
    limit = 2
    def parse(self, response):
        # 获取每个帖子的链接，并交给parse_item处理
        post_links = response.xpath('//a[@class="color-hover"]/@href')
        for link in post_links:
            yield response.follow(link, self.parse_item)

        # 获取下一页链接，并交给parse方法递归处理
        next_page_url = response.xpath('//a[@class="arrow-page prov_rota"]/@href').get()
        self.page += 1
        if next_page_url and self.page <= self.limit:
            yield response.follow(next_page_url, self.parse)


    def parse_item(self, response):
        # 提取投诉帖子的编号、帖子的URL、帖子的标题及帖子的内容
        post_number = response.xpath('//span[contains(text(), "编号")]/text()').get().split("：")[-1]
        post_url = response.url
        post_title = response.xpath('//p[@class="focus-details"]/text()').get()
        post_content = response.xpath('//div[@class="details-box"]/pre/text()').get()

        # 以JSON文档的形式输出爬取到的数据
        yield {
            "编号": post_number,
            "链接": post_url,
            "标题": post_title,
            "内容": post_content
        }