import scrapy
from ..items import JdCrawlerItem


class JdSearchCrawler(scrapy.Spider):
    name = 'jd_search'

    def start_requests(self):
        headers = {
            'Cache-Control': 'max-age=0',
            'Upgrade-Insecure-Requests': '1',
            'Cookie': 'areaId=17; ipLoc-djd=17-1381-50718-0; rkv=1.0; qrsc=3;',
        }
        keywords = ['洗衣液', '牙膏', '沐浴露']
        for key in keywords:
            for page in range(1, 6):
                url = f'https://search.jd.com/Search?keyword={key}&qrst=1&psort=3&wq={key}&stock=1&psort=3&page={page}&s=61&click=0'
                yield scrapy.FormRequest(
                    url=url,
                    method='GET',
                    headers=headers,
                )
            break

    def parse(self, response, **kwargs):
        for good in response.css('#J_goodsList > ul > li'):
            img = good.css('div.p-img > a > img::attr(data-lazy-img)').get()
            price = good.css('div.p-price > strong > i::text').get()
            name = good.css('div[class^=p-name] > a::attr(title)').get()
            shop = good.css('div.p-shop > span > a::attr(title)').get()
            url = good.css('div[class^=p-name] > a::attr(href)').get()
            item = JdCrawlerItem()
            item['img'] = img if img else ''
            item['price'] = price if price else ''
            item['name'] = name if name else ''
            item['shop'] = shop if shop else ''
            item['url'] = url if url else ''
            yield item
