# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request, Spider
from urllib.parse import quote
from jdspider.items import JdspiderItem

class JdgoodsSpider(scrapy.Spider):
    name = 'jdgoods'
    allowed_domains = ['www.jd.com']
    base_url = 'https://search.jd.com/Search?keyword='
 #scrapy crawl jdgoods

    def start_requests(self):
        print("请求request给到schedule")
        for keyword in self.settings.get('KEYWORDS'):
            for page in range(1, self.settings.get('MAX_PAGE') + 1):
                url = self.base_url + quote(keyword) + '&enc=utf-8'
                print("调用解析函数",url)
                yield Request(url=url, callback=self.parse, meta={'page': page}, dont_filter=True)

    def parse(self, response):
        #获取商品list
        print("解析download传回数据")
        goods=response.xpath(".//div[@id='J_goodsList']/ul[contains(@class, 'gl-warp')]/li[@class='gl-item']/div[@class='gl-i-wrap']")
        count=0
        for good in goods:
            #print(good)
            count+=1
            item=JdspiderItem()
            item['good_name']=(good.xpath(".//div[contains(@class,'p-name')]/a")).xpath('string(.)').extract_first().replace('\n','').replace('\t','')
            item['good_price']=good.xpath(".//div[contains(@class,'p-price')]/strong/i/text()").extract_first()
            item['good_commit']=good.xpath(".//div[contains(@class,'p-commit')]/strong/a/text()").extract_first()
            item['good_url']="https:"+good.xpath(".//div[contains(@class,'p-name')]/a/@href").extract_first()
            print("商品",count,"===",item)
            yield item

