import scrapy


from selenium import webdriver
from selenium.webdriver import ChromeOptions

class CommoditytestSpider(scrapy.Spider):
    name = 'commoditytest'
    allowed_domains = ['list.jd.com']
    start_urls = ['https://list.jd.com/list.html?cat=5272,16968']

    def __init__(self):
        super(CommoditytestSpider, self).__init__(name='commoditytest')
        option = ChromeOptions()
        # option.headless = True
        self.driver = webdriver.Chrome('C:\Program Files\Google\Chrome\Application\chromedriver.exe', options=option)

    def parse(self, response):
        commodity_list = response.xpath('//*[@id="J_goodsList"]/ul/li/div')
        for commodity in commodity_list:
            item = {}
            item['commodity_name'] = commodity.xpath('./div[3]/a/em/text()').extract_first()
            item['commodity_author'] = commodity.xpath('./div[4]/span[1]/a/text()').extract_first()
            item['commodity_link_detail'] = response.urljoin(commodity.xpath('./div[3]/a/@href').extract_first())
            item['commodity_price'] = commodity.xpath('./div[2]/strong/i/text()').extract_first()
            print(item)
