# -*- coding: utf-8 -*-
from scrapy import Request, Spider
from urllib.parse import quote
from jindong.items import JindongItem
import scrapy


class JdSpider(scrapy.Spider):
    name = 'jd'
    allowed_domains = ['list.jd.com']
    # start_urls = ['http://list.jd.com/']

    def start_requests(self):
        base_url = 'https://list.jd.com/list.html?cat='
        for keyword in self.settings.get('KEYWORDS'):
            for page in range(1, self.settings.get('MAX_PAGE') + 1):
                url = base_url + quote(keyword)
                yield Request(url=url, callback=self.parse, meta={'page': page}, dont_filter=True)

    def parse(self, response):
        laptops = response.xpath('//div[@id="plist"]//li[@class="gl-item"]')
        for laptop in laptops:
            item = JindongItem()
            item['title'] = ''.join(laptop.xpath('.//div[@class="p-name"]/a/em/text()').extract()).strip()
            item['price'] = laptop.xpath('.//strong[@class="J_price"]/i/text()').extract_first()
            yield item
