# -*- coding: utf-8 -*-
import scrapy
from scrapy_selenium.items import ProductItem


class JdSpider(scrapy.Spider):
    name = 'JD'
    allowed_domains = ['list.jd.com']
    # start_urls = ['https://list.jd.com/list.html?cat=670,671,672&page=1']

    pcount=0# 第几条商品信息

    def start_requests(self):
        '''遍历访问多个页面'''
        print("共爬取60页商品数据")
        for page in range(1,60):
            print("正在爬取第",page,"页商品数据")
            next_url="https://list.jd.com/list.html?cat=670,671,672&page="+str(page)
            # url=response.urljoin(next_url)
            yield scrapy.Request(url=next_url,callback=self.parse_page,meta={'page':page},encoding='utf-8',dont_filter=True)

    def parse_page(self,response):
        '''解析一个商品页面信息'''
        # 获取商品列表
        productList=response.css(".gl-item")
        for product in productList:
            self.pcount+=1
            item=ProductItem()
            # 商品名称
            item["name"]=""
            names=product.css(".p-name a em::text")
            if names:
                item["name"]=names.extract_first().strip("\n").strip(" ")
            # 价格
            item["price"]=""
            prices=product.css(".p-price .J_price:first-child i::text")
            if prices:
                item["price"]=prices.extract_first()
            # 评价人数
            item["commitNum"]=""
            commits=product.css(".p-commit strong a::text")
            if commits:
               item["commitNum"]=commits.extract_first()
            # 简介
            item["promo"]=""
            promos=product.css(".p-name a .promo-words::text")
            if promos:
                item["promo"]=promos.extract_first()

            print("第",self.pcount,"条商品信息:")
            yield item

