# -*- coding: utf-8 -*-
from scrapy import Request,Spider
from urllib.parse import quote
from jdlist.items import ProductItem


class JdSpider(Spider):
    name = 'jd'
    allowed_domains = ['list.jd.com']
    #start_urls = ['http://list.jd.com/']
    base_url = 'https://list.jd.com/list.html?cat='

    def start_requests(self):
        print("请求request给到schedule")
        for keyword in self.settings.get('KEYWORDS'):
            for page in range(1,self.settings.get('MAX_PAGE')+1):
                url = self.base_url + quote(keyword)
                print("调用解析函数",url)
                yield Request(url=url,callback=self.parse,meta={'page':page},dont_filter=True)

    def parse(self, response):
        item = ProductItem()
        print("解析download传回数据")
        products = response.selector.css("li.gl-item")
        for product in products:
            item['image'] = product.css("div.p-img img::attr('src'),div.p-img img::attr('data-lazy-img')").extract_first()
            item['price'] = product.css("div.p-price i::text").extract_first()
            item['name'] = product.css("div.p-name em::text").extract_first().strip()
            item['commit'] = product.css("div.p-commit a.comment::text").extract_first()
            item['shop'] = product.css("div.p-shop a::attr(title)").extract_first()
            #print(item)
            yield(item)
