# -*- coding: utf-8 -*-
import scrapy
from ..items import GuaziItem

class GuaziSpider(scrapy.Spider):
    name = 'guazi'
    allowed_domains = ['www.guazi.com']

    # 1.去掉start_urls变量
    # 2.重写start_requests()方法
    def start_requests(self):
        """生成所有的地址一次性交给调度器入队列"""
        url = 'https://www.guazi.com/bj/buy/o{}/#bread'
        for page in range(1, 6):
            page_url = url.format(page)
            # 请求交给调度器入队列
            yield scrapy.Request(url=page_url, callback=self.parse_first_page)

    def parse_first_page(self, response):
        """提取数据:一级页面解析函数"""
        li_list = response.xpath('//ul[@class="carlist clearfix js-top"]/li')
        for li in li_list:
            # 给items.py中的GuaziItem类实例化
            item = GuaziItem()
            item['href'] = 'https://www.guazi.com' + li.xpath('./a[1]/@href').get()
            item['title'] = li.xpath('.//h2[@class="t"]/text()').get()
            item['price'] = li.xpath('.//div[@class="t-price"]/p/text()').get()

            # 交给调度器入队列
            # meta参数:在不同的解析函数间传递数据
            yield scrapy.Request(url=item['href'], meta={'item':item}, callback=self.parse_two_page)

    def parse_two_page(self, response):
        """二级页面解析函数"""
        # meta:作为response的属性
        item = response.meta['item']
        item['km'] = response.xpath('//ul[@class="assort clearfix"]/li[2]/span/text()').get()
        item['cc'] = response.xpath('//ul[@class="assort clearfix"]/li[3]/span/text()').get()
        item['typ'] = response.xpath('//ul[@class="assort clearfix"]/li[4]/span/text()').get()

        # 至此,一条完整的汽车数据抓取完成,交给项目管道处理
        yield item














