# -*- coding: utf-8 -*-
import json

import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.utils.project import get_project_settings


class TestItem(scrapy.Item):
    id = scrapy.Field()
    name = scrapy.Field()
    price = scrapy.Field()
    images = scrapy.Field()


class CommonSpider(CrawlSpider):
    name = 'common'
    # allowed_domains = ['youhui.pinduoduo.com/']
    # start_urls = ['https://book.douban.com/top250']
    # start_urls = ['https://search.jd.com/Search?keyword=%E9%9B%80%E5%B7%A2&enc=utf-8'
    #               '&wq=%E9%9B%80%E5%B7%A2&pvid=abc41c6796ca4c8e8f9060008263334d']
    """定义爬取规则属性Rule:
        link_extractor: 提取规则
            allow: 域名白名单
            deny:
            allow_domains:
            restrict_xpaths: 从当前页面中Xpath匹配区域提取链接
            restrict_css:
        callback: 回调函数
        cb_kwargs:
        follow: 是否跟进
        process_links:
        process_request:
    """
    rules = (
        Rule(LinkExtractor(allow=('subject/\d+/$',)), callback='parse_item'),
        Rule(LinkExtractor(allow=('item.jd.com/\d+\.html$',)), callback='parse_item'),
    )

    def __init__(self, keyword=None, *a, **kw):
        super().__init__(*a, **kw)
        self.start_urls = ["https://search.jd.com/Search?keyword=%s&enc=utf-8" % keyword]
        # 经测试，rules无法在初始化中定义

    def parse_item(self, response):
        """从详情页提取信息"""
        self.logger.info(response.url)

        item = TestItem()
        # item['id'] = response.url.split('/')[-1].split('.')[-2]
        item['id'] = None
        item['name'] = response.xpath('//div[@class="sku-name"]/text()').extract()
        item['price'] = None
        item['images'] = None
        print(item)
        # loader =
        # yield item
        pass


if __name__ == "__main__":
    settings = get_project_settings()
    settings.set('TEST', 'test common')
    crawl = CrawlerProcess(settings)
    crawl.crawl(CommonSpider, keyword='椅子')
    crawl.start()
