import sys
from scrapy.contrib.spiders import CrawlSpider
from scrapy.selector import Selector
from jd_spider.items import JdSpiderItem
import pymysql

"""
在使用此爬虫爬取京东商品信息的时候需要在seting.py中加入如下内容
DOWNLOADER_MIDDLEWARES = {
   'jd_spider.middlewares.middleware.JSMiddleware': 543,
}

开启middleware中间件,使用js解析引擎来爬取
"""
class JdSpider(CrawlSpider):
    name = "jd_item_spider"
    allowed_domains = ["jd.com"]

    start_urls_tmp = []
    for i in range(2, 401, 2):
        url = "https://search.jd.com/Search?keyword=%E6%96%87%E8%83%B8&enc=" \
              "utf-8&qrst=1&rt=1&stop=1&vt=2&offset=4&page={}&s=1&click=0".format(i)
        start_urls_tmp.append(url)


    start_urls = start_urls_tmp

    def parse(self, response):
        sel = Selector(response)
        bra_list_tmp = sel.xpath(".//*[@id='J_goodsList']/ul/li/div/div[4]/a/@href").extract()
        print(bra_list_tmp)
        print(len(bra_list_tmp),"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
        bra_list = []
        for url in bra_list_tmp:
            url_deal = str(url).replace("//","")
            bra_list.append(url_deal)

        for url in bra_list:
            config = {
                'host': "127.0.0.1"
                , 'port': 3306
                , 'user': 'root'
                , 'password': ''
                , 'db': 'test'
                , 'charset': 'utf8'
                , 'cursorclass': pymysql.cursors.DictCursor
            }
            connection = pymysql.connect(**config)
            try:
                with connection.cursor() as cursor:
                    # 执行sql语句，插入记录
                    sql = 'INSERT INTO jd_bra_url (url) VALUES (%s)'

                    cursor.execute(sql, (url))
                # 没有设置默认自动提交，需要主动提交，以保存所执行的语句
                connection.commit()
            finally:
                connection.close()

        item = JdSpiderItem()
        return item


