import time

import scrapy
from scrapy import Request, Selector

from JDSpider.items import JdspiderItem


class JdSpider(scrapy.Spider):
    name = 'jd'
    allowed_domains = ['jd.com']
    start_urls = ['https://search.jd.com/']

    def start_requests(self):
        """
        爬虫启动时的请求函数
        :yield: 返回Request请求到中间件
        """
        start_time = time.time()
        keywords = ['手机', '笔记本']  # 设置需要爬取的商品名
        page_total = 2  # 爬取的总页数
        page_count = 60  # 每页商品列表的最大数量
        total = page_total * page_count * len(keywords)
        print("正在准备爬取 %d 类 %d 条数据：" % (len(keywords), total), end='')
        for keyword in keywords:
            print(keyword, end=' ')
        for keyword in keywords:
            for page in range(page_total):
                print("\n正在爬取第 %d/%d 页 【%s】 数据..." % (page + 1, page_total, keyword))
                url = f'https://search.jd.com/Search?keyword={keyword}&page={2 * (page + 1) - 1}'
                yield Request(url=url, meta={'keywords': keywords, 'keyword': keyword, 'page_total': page_total,
                                             'page_count': page_count, 'start_time': start_time})

    def parse(self, response, **kwargs):
        """
        解析页面数据元素函数
        :param response: 从中间件换回响应数据到该函数
        :yield: 返回item对象到管道数据中
        """
        sel = Selector(response)
        selectors = sel.css('#J_goodsList > ul > li')
        for selector in selectors:
            item = JdspiderItem()
            item['price'] = ''.join(selector.css('div.p-price > strong > i::text').extract_first()).strip()
            item['title'] = ''.join(selector.css('div.p-name.p-name-type-2 > a > em::text').extract()).strip()
            item['comment'] = ''.join(selector.css('div.p-commit > strong > a::text').extract()).strip()
            item['shop'] = ''.join(selector.css('div.p-shop > span > a::text').extract()).strip()
            item['label'] = ''.join(selector.css('div.p-icons > i::text').extract())
            yield item
