import scrapy
from urllib.parse import urljoin
from ..items import SunpoliticItem


class SunSpider(scrapy.Spider):
    name = 'sun'
    allowed_domains = ['wz.sun0769.com']
    start_urls = ['https://wz.sun0769.com/political/index/politicsNewest']
    allowed_try_times = 101
    page_increment = 1

    def parse(self, response, **kwargs):
        # 在pipelines里定义了open_spider后，就可以在这取到了
        # print(self.hello, "*" * 100)

        self.page_increment += 1
        self.allowed_try_times -= 1
        li_list = response.xpath("//li[@class='clear']")
        for each in li_list:
            series_no = each.xpath(".//span[@class='state1']/text()").extract_first()
            status = each.xpath(".//span[@class='state2']/text()").extract_first()
            rest_time = each.xpath(".//span[@class='state4']/text()").extract_first()
            title = each.xpath(".//span[@class='state3']/a/text()").extract_first()
            url = each.xpath(".//span[@class='state3']/a/@href").extract_first()
            url = urljoin(self.start_urls[0], url)

            status = status.replace("\n", "")
            status = status.strip()

            rest_time = rest_time.replace("\n", "")
            rest_time = rest_time.strip()

            item = SunpoliticItem(
                series_no=series_no,
                status=status,
                rest_time=rest_time,
                title=title,
                url=url
            )
            yield item
        print(f"if 之前 allowed_try_times are {self.allowed_try_times}, page is{self.page_increment}")
        if self.allowed_try_times > 0:
            next_url = self.start_urls[0] + "?id=1" + f"&page={self.page_increment}"
            print(f"allowed_try_times are {self.allowed_try_times}, page is{self.page_increment}, next url is {next_url}")
            yield scrapy.Request(
                url=next_url,
                callback=self.parse
            )

