from scrapy.contrib.spiders import CrawlSpider
from scrapy.spiders import Spider
from scrapy.selector import Selector
import pymysql
from scrapy.http import Request,Response


def get_start_urls():
    config = {
        'host': "127.0.0.1"
        , 'port': 3306
        , 'user': 'root'
        , 'password': ''
        , 'db': 'test'
        , 'charset': 'utf8'
        , 'cursorclass': pymysql.cursors.DictCursor
    }
    connection = pymysql.connect(**config)
    try:
        with connection.cursor() as cursor:
            # 执行sql语句，插入记录
            sql = 'SELECT url from jd_bra_url'
            print(sql)
            cursor.execute(sql)
        # 没有设置默认自动提交，需要主动提交，以保存所执行的语句
        connection.commit()
        url_list = []
        for item_url in cursor.fetchall():
            url = "https://"+item_url['url']+"#comment"
            url_list.append(url)
        return url_list
    except Exception as e:
        return []
    finally:
        connection.close()


class jd_comment_spider(Spider):
    name = "jd_comment_spider"
    start_urls = get_start_urls()
    print(start_urls)

    raw_url = "https://club.jd.com/comment/productPageComments.action?" \
              "productId={}&score=0&sortType=5&page={}&pageSize=10"

    def parse(self, response):
        sel = Selector(response)
        total_page = sel.xpath(".//*[@id='comment-0']/div[11]/div/div/a[7]/text()").extract()
        print(total_page)
        print("=========================================")