# 使用Scrapy框架和Selenium配合爬取京东网站商品列表信息（>=50页）：网址：https://list.jd.com/list.html?cat=670,671,672
# 第十周作业1)
# 班级：Python五期
# 学员：李子坚

# -*- coding: utf-8 -*-
from scrapy import Request, Spider
from urllib.parse import quote
from scrapyseleniumjob.items import JdProductItem


class JdproductSpider(Spider):
    '''爬取京东网站商品列表页面信息的爬虫类'''

    name = 'jdproduct'
    allowed_domains = ['www.jd.com']
    base_url = 'https://list.jd.com/list.html?cat='

    def start_requests(self):
        '''按最大页数爬取京东网站商品列表信息'''
        for category in self.settings.get('CATEGORIES'):
            for page in range(1, self.settings.get('MAX_PAGE') + 1):
                url = self.base_url + quote(category)
                yield Request(url=url, callback=self.parse, meta={'page': page}, dont_filter=True)

    def parse(self, response):
        '''解析每个京东网站商品列表页面，提取商品信息'''
        #如果爬取不成功，重新发起请求
        if response.status != 200:
            yield scrapy.Request(url=response.url,callback=self.parse)

        #定位商品列表页面上的所有商品节点
        products = response.xpath('//div[@id="plist"]/ul[@class="gl-warp clearfix"]/li[@class="gl-item"]/div[@class="gl-i-wrap j-sku-item"]')
        for product in products:
            item = JdProductItem()
            #爬取每项商品信息
            if product.xpath('.//div[@class="p-img"]/a/img/@src').extract():
                item['image'] = "https:" + product.xpath('.//div[@class="p-img"]/a/img/@src').extract_first()
            else:
                item['image'] = "https:" + product.xpath('.//div[@class="p-img"]/a/img/@data-lazy-img').extract_first()
            item['price'] = product.xpath('.//div[@class="p-price"]/strong[@class="J_price"]/i/text()').extract_first()
            item['title'] = product.xpath('.//div[@class="p-name"]/a/em/text()').extract_first().strip()
            item['promo_words'] = product.xpath('.//div[@class="p-name"]/a/@title').extract_first()
            item['detail_url'] = "https:" + product.xpath('.//div[@class="p-name"]/a/@href').extract_first()
            item['comment_num'] = product.xpath('.//div[contains(@class, "p-commit")]/strong/a[@class="comment"]/text()').extract_first().strip()
            item['shop'] = product.xpath('.//div[@class="p-shop"]/span/a/text()').extract_first().strip()
            yield item