import scrapy

from jdsc.items import JdscItem


class DcjdSpider(scrapy.Spider):
    name = 'dcjd'
    allowed_domains = ['jd.com']
    start_urls = ['https://list.jd.com/list.html?cat=652,654,831']
    next_urls = 'https://list.jd.com/list.html?cat=652%2C654%2C831&page={0}&s={1}&click=1'
    page = 1
    s = -3
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'en',
        'Referer': 'https://www.jd.com/',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
    }

    def parse(self, response):
        self.headers['referer'] = response.url
        item = JdscItem()
        root = response.xpath('//ul[contains(@class, "gl-warp")]/li')
        for r in root:
            item['price'] = r.xpath('.//div/div[2]/strong/i/text()').get()
            item['name'] = r.xpath('.//div/div[3]/a/em/text()').get()
            item['link'] = r.xpath('.//div/div[3]/a/@href').get()
            item['shop'] = r.xpath('.//div/div[5]/span/a/text()').get()
            item['reply'] = r.xpath('.//div/div[4]/strong/a/text()').get()
            print(item['name'], item['shop'], item['price'], item['reply'], item['link'])
            yield item

        self.page += 2
        self.s += 60
        if self.page < 20:
            yield scrapy.Request(self.next_urls.format(self.page, self.s),
                                 callback=self.parse, headers=self.headers)
