# -*- coding: utf-8 -*-
from scrapy import Request,Spider
from urllib.parse import quote
from JD_Scrapy_Selenium.items import GoodsInfoItem


class GoodsinfoSpider(Spider):
    name = 'goodsInfo'
    allowed_domains = ['list.jd.com']
    base_url = 'https://list.jd.com/list.html?cat=670,671,672&page='

    def start_requests(self):
        for page in range(1,self.settings.get('MAX_PAGE')+1):
            url = self.base_url+str(page)
            yield Request(url=url,callback=self.parse,
                          meta={'page':page},dont_filter=True)

    def parse(self, response):
        products = response.xpath("//div[@id='plist']/ul/li")
        for product in products:
            item = GoodsInfoItem()
            try:
                item['goodsName'] = product.xpath(".//div[@class='p-name']/a/em/text()").extract_first()
                item['goodsprice'] = product.xpath(".//strong[@class='J_price']/i/text()").extract_first()+"元"
                item['goodsImg'] = "http:" +product.xpath(".//div[@class='p-img']/a/img/@src | .//div[@class='p-img']/a/img/@data-lazy-img").extract_first()
                item['goodsComment'] = product.xpath(".//a[@class='comment']/text()").extract_first()+"评论数"
                item['shop'] = product.xpath(".//div[@class='p-shop']/span/a/text()").extract_first()
            except Exception as err:
                print(err)
            yield item
