# -*- coding: utf-8 -*-
from time import sleep
from urllib import parse

import scrapy
from scrapy import Request

dest = '手机'


# https://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&wq=%E6%89%8B%E6%9C%BA&pvid=437813fa882441649140337a8488d965
def item(name=dest, page=1, s=30):
    return 'https://search.jd.com/Search?keyword={0}&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq={0}&page={1}&s={2}&psort=3&click=0'.format(
        parse.quote(name), page, s)  # url编码


class ItcastSpider(scrapy.Spider):
    name = 'scdn'

    custom_settings = {
        'ITEM_PIPELINES': {
            #  'doubanbook.pipelineItcastItem.itcastItemPipeline': 1,
            # 'doubanbook.pipelineImage.cctvImagePipeline': 299,  # 激活图片下载管道
            'doubanbook.pipelines.DoubanbookPipeline': 1,
        },
        # 设置私有代理IP
        'DOWNLOADER_MIDDLEWARES': {
            #    'doubanbook.middlewares.DoubanbookSpiderMiddleware': 543,
            'doubanbook.middlewareSpider.Uamid': 543,
            # 'doubanbook.middlewareSpider.IPPOOlS': 3,
        },
    }
    page = 1
    s = 1
    url = 'https://blog.csdn.net/dh1151313194/article/details/103642912'
    start_urls = [url, ]

    def parse(self, response):
        # ok = myselenium(item())
        # selector = scrapy.Selector(response)
        # print(selector.extract())

        if self.page <= 200:
            self.page = self.page + 1
            self.s = self.s + 60
            sleep(0.5)

            yield Request(url=self.url, dont_filter=True, callback=self.parse)
