import scrapy
from latest_politics.items import LatestPoliticsItem
MAX_PAGE = 100

class GetLatestPoliticsSpider(scrapy.Spider):
    name = "get_latest_politics"
    allowed_domains = ["wz.sun0769.com"]
    start_urls = ["https://wz.sun0769.com/political/index/politicsNewest?id=1&page=1"]

    def parse(self, response, **kwargs):
        page = int(response.url.split('page=')[1])

        li_xpath = response.xpath("//ul[@class='title-state-ul']//li")
        for i in li_xpath:
            # 编号
            number = i.xpath("./span[1]/text()").extract_first()
            # 状态
            status = i.xpath("./span[2]/text()").extract_first().strip()
            # 问政标题
            political_title = i.xpath("./span[3]/a/text()").extract_first().strip()
            # 响应时间
            response_time = i.xpath("./span[4]/text()").extract_first().strip()
            # 问政时间
            political_inquiry_time = i.xpath("./span[5]/text()").extract_first()
            # 问政详情
            details_url = "https://wz.sun0769.com" + i.xpath("./span[3]/a[@class='color-hover']/@href").extract_first()

            # # 获取当前页码
            # current_page = int(response.url.split('page=')[1])
            #
            # # 找出最后一个页码链接
            # last_page = response.css("body > div.public-content > div.width-12 > div.mr-three.paging-box > div > a:nth-child(2)::text").extract_first()
            # print(last_page)
            # # 转成整数作为最大页码
            # max_page = int(last_page)

            # 创建一个item类型的对象
            item = LatestPoliticsItem()
            item["number"] = number
            item["status"] = status
            item["political_title"] = political_title
            item["response_time"] = response_time
            item["political_inquiry_time"] = political_inquiry_time
            item["details_url"] = details_url

            # 提取详情页面的URL并发送请求
            # 请求传参的技术，将此处的item对象传递给指定的函数:meta作用，可以将一个字典传递给callback指定的回调函数
            yield scrapy.Request(url=details_url, callback=self.parse_details, meta={'item': item})

            if page < MAX_PAGE:
                next_page = page + 1
                next_url = response.url.replace(f'page={page}', f'page={next_page}')
                # 提取下一页的URL并发送请求
                yield scrapy.Request(next_url, callback=self.parse)
            # 这里有一个问题,我需要自己设置变量来确认页码的最大值,怎么自动获取

            # 构建下一个页码的链接
            # if current_page < max_page:
            #     next_page = current_page + 1
            #     next_url = response.url.replace(f'page={current_page}', f'page={next_page}')
            #
            #     yield Request(url=next_url, callback=self.parse)

    def parse_details(self, response):
        # 解析第二个目标页面的内容并提取所需的数据
        # details_item = LatestPoliticsItem()

        detailed_information = response.css("div[class='details-box'] pre::text").extract_first()
        detailed_information = ''.join(detailed_information).strip()

        # details_item["detailed_information"] = detailed_information

        dict_meat = response.meta
        item = dict_meat["item"]
        item["detailed_information"] = detailed_information

        yield item
        # yield details_item
