import re

import scrapy


class YouxinSpider(scrapy.Spider):
    name = 'youxin'
    allowed_domains = ['xin.com']
    start_urls = ['https://www.xin.com/suqian/i1/']
    page = 1  # 记录当前要爬取的页数

    # 重写此方法的目的是为了携带请求头headers
    def start_requests(self):
        headers = {
            "Host": "www.xin.com",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36 SLBrowser/6.0.1.9171"
        }

        yield scrapy.Request(url=self.start_urls[0], callback=self.parse, headers=headers)

    def parse(self, response):
        print("处理info回调函数被执行")
        li_lists = response.xpath('//div[@class="carlist-show"]//ul/li')
        for temp in li_lists:
            title = temp.xpath('.//h2/span/text()').extract_first()
            type_ = re.match(r"(.*?)20", title).group(1)
            year = temp.xpath('.//div[@class="pad"]/span/text()[1]').extract_first().strip()
            year = re.search(r"(.*?)年", year).group(1)
            price = temp.xpath('.//div[@class="pad"]/p/em/text()[1]').extract_first().strip()
            price = price[:5]
            print(title, type_, year, price)
            item = {
                "type": type_,
                "title": title,
                "year": year,
                "price": price,
                "time": "2020年12月10日"
            }
            yield item
        self.page += 1
        if self.page <= 5:
            next_page_url = "https://www.xin.com/suqian/i%s/" % self.page
            yield scrapy.Request(url=next_page_url, callback=self.parse)
