from datetime import datetime

import scrapy

from FirstScrapy.items import PageScrapyItem


class PagescripySpider(scrapy.Spider):
    def __init__(self, **kwargs):
        self.PageNumber = 1
        self.DateTime = datetime.now()
        super().__init__(**kwargs)

    name = 'PageScripy'
    start_urls = ['https://www.xiaohua.com/duanzi?page=1']

    def parse(self, response, **kwargs):
        joke = response.xpath("""/html/body/div[1]/div[8]/div[2]/div[2]/div""")
        # print("len is ", len(joke.extract())) 测试如果是尾页则总数量为1
        if response.status == 200 and len(joke.getall()) > 1:
            for i in joke:
                _joke_controller = i.xpath("""div/div/a/i/text()""").get()
                _joke_text = i.xpath("""p/a/text()""").get()
                item = PageScrapyItem(joke_controller=_joke_controller, joke_text=_joke_text,
                                      joke_create_time=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
                yield item
            print(f""""successfully_url":{response.url}, "status":{response.status}, "message":‘第{self.PageNumber}页信息爬取成功，共爬取{len(joke.extract())}条信息记录’""")
            self.PageNumber += 1
            next_page = f"{self.start_urls[0][:-2]}={self.PageNumber}"
            yield scrapy.Request(next_page, callback=self.parse)
        elif response.status != 200:
            print(
                f'''"error":{response.url}, "status":{response.status}, "message":‘页面丢失，请联系管理员,花费 {(datetime.now() - self.DateTime).seconds} s’''')
        elif len(joke.getall()) <= 1:
            print(
                f'''"error":{response.url}, "status":{response.status}，"message":‘全部信息爬取成功，请前往查看,花费 {(datetime.now() - self.DateTime).seconds} s’''')
        else:
            print(f""""message":‘未知信息错误，请联系管理员,花费 {(datetime.now() - self.DateTime).seconds} s’""")
