#!/usr/bin/env python
# -*- coding: utf-8 -*-

import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.settings import Settings
from scrapy.utils.response import get_base_url
from car58.items import EasySpiderItem


def make_rules(rule_list, rule_detail):
    rules = (
        # 提取匹配 'XXXXXXX' 的链接并跟进链接(没有callback意味着follow默认为True)
        Rule(LinkExtractor(allow=rule_list['allow'],
                           deny=rule_list['deny']
                           )
             ),

        # 提取匹配 'XXXXXXX' 的链接并使用spider的parse_item方法进行分析
        Rule(LinkExtractor(allow=rule_detail['allow'],
                           deny=rule_detail['deny']
                           ),
             callback='parse_item')
    )
    return rules


class EasySpider(CrawlSpider):

    name = 'easy-spider'
    custom_settings = {}

    @classmethod
    def update_settings(cls, settings):
        # cls.custom_settings = {'DEPTH_LIMIT' : 1, 'DOWNLOAD_DELAY' : 1}
        super(EasySpider, cls).update_settings(settings)

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider_settings = crawler.settings.getdict('SPIIDER_SETTINGS')
        setattr(cls, 'allowed_domains', spider_settings['allowed_domains'])
        setattr(cls, 'start_urls', spider_settings['start_urls'])
        rule_list = spider_settings['rule_list']
        rule_detail = spider_settings['rule_detail']
        setattr(cls, 'rules', make_rules(rule_list, rule_detail))
        spider = super(EasySpider, cls).from_crawler(crawler, *args, **kwargs)
        return spider

    def process_links(self):
        pass

    @staticmethod
    def extract(response, xpath, re):
        item = response.xpath(xpath)
        if re:
            item = item.re(re)
        items = item.extract()
        items = filter(lambda x:x and x.strip(), items)  # 删掉列表中的空字符串元素
        items_len = len(items)
        if items_len == 1:
            item = items[0].strip()
        elif items_len > 1:
            item = '-'.join([i.strip() for i in items])
        elif items_len < 1:
            item = ''
        return item

    def parse_item(self, response):
        print '*****************************parse_item*********************************'
        fields = EasySpiderItem.fields
        results = dict()
        for k, v in fields.items():
            results[k] = EasySpider.extract(response, v['xpath'], v['re'])
            if v['strict'] and not results[k]:break
        else:
            yield EasySpiderItem(results)

        





