# -*- coding: utf-8 -*-
import scrapy


class SpiderCarBrandSpider(scrapy.Spider):
    name = 'spider_car_origin_add_national'
    base_url = 'https://xl.16888.com'
    start_urls = ['https://xl.16888.com/brand.html']


    # url提取者，对response的url的提取，在这里进行
    def url_solver(self,response):
        next_urls = response.xpath('//a[@class="lineBlock num"]/@href').extract()

        # 提取后，与原本的 next_urls 进行合并
        response.meta['next_urls']=response.meta.get('next_urls',[])+next_urls

    # 数据提取者，对response的提取数据，在这里进行
    def data_solver(self, response):
        tr = response.xpath('//table/tr')
        msgs = []
        for td in tr:
            msg = Item(td, 'brandNational')
            msg.add_path('brand', 'td[3]/a/text()')
            msg.add_path('national', 'td[4]/text()')
            # 如果是起始的 品牌 国家 这种信息，那么将爬取不到，会是0，所以跳过这条信息
            if msg['brand'] == 0:
                continue
            print(msg)
            msgs.append(msg)

        # 提取后，与原本的 data 进行合并
        response.meta['data']=response.meta.get('data',[])+msgs

    # 下一条 url 的处理者， 持有两个数据  next_urls指向还有的urls数组，msgs指向需要存储的数据数组
    def next_urls_handler(self, response):
        # 处理本次response的msgs
        self.data_solver(response)

        # 查看是否还有没请求的url
        next_urls = response.meta.get('next_urls',[])
        if len(next_urls)>0:
            next_url = self.base_url + next_urls.pop()
            return scrapy.Request(next_url, meta={
                'next_urls': response.meta['next_urls'],
                'data':response.meta['data']
            }, callback=self.next_urls_handler)
        else:
            return response.meta.get('data',[])



    # 在这里计算计算是否有下一页，有的话，进行串联请求
    def parse(self, response):
        self.url_solver(response)
        return self.next_urls_handler(response)


class Item(dict):
    class_name = ''

    def __init__(self, selector, class_name=''):
        super().__init__()
        self._selector = selector
        self.class_name = class_name

    def add_path(self, attr, xpath, begin=0, end=None):
        if end is None:
            self[attr] = self._selector.xpath(xpath).get(begin)
            return
        self[attr] = self._selector.xpath(xpath).getall()[begin:end + 1]
