import scrapy
import json
from nlproject.items import NewsItem

class keySpider(scrapy.Spider):
    """
    关键字过滤
    scrapy crawl keySpider -a start_urls='["http://news.baidu.com/"]' -a xpaths='{"first": "//a", "link": "@href", "title": "text()"}' -a keys='[""]' 匹配 singleSpider
    """
    name = "keySpider"

    def __init__(self, start_urls, xpaths, keys, *args, **kwargs):
        """

        :param start_urls: 这里传json化的list
        :param xpaths: 这里传json化的dict
        """
        super(keySpider, self).__init__(*args, **kwargs)
        self.resList = []

        start_urls = json.loads(start_urls)
        self.start_urls = start_urls  # type: list
        print(self.start_urls)

        xpaths = json.loads(xpaths)
        self.xpaths = xpaths  # type: dict
        print(self.xpaths)

        keys = json.loads(keys)
        self.keys = keys  # type: list
        print(self.keys)

    def parse(self, response):
        first_xpath = self.xpaths.pop('first')  # 这个key是固定的 对应前端爬取位置
        for sel in response.xpath(first_xpath):
            for v in self.keys:  # TODO: 条件判断 or and 等条件
                title = sel.xpath('text()').re('([\s\S]*{}[\s\S]*)'.format(v))
                if title:
                    tempDic = {}
                    for k, v in self.xpaths.items():
                        tempDic[k] = sel.xpath(v).extract_first()  # 精确匹配,这里只取第一个结果 TODO: 不确定对结果 是否有影响
                    yield tempDic
                    # self.resList.append(tempDic)
        # print (self.resList)