from scrapy.contrib.spiders import CrawlSpider
from scrapy.selector import Selector
import pymysql
import requests
from scrapy.http import Request,Response


def get_itemids():
    config = {
        'host': "127.0.0.1"
        , 'port': 3306
        , 'user': 'root'
        , 'password': ''
        , 'db': 'test'
        , 'charset': 'utf8'
        , 'cursorclass': pymysql.cursors.DictCursor
    }
    connection = pymysql.connect(**config)
    try:
        with connection.cursor() as cursor:
            # 执行sql语句，插入记录
            sql = 'SELECT itemid from jd_bra_info'
            print(sql)
            cursor.execute(sql)
        # 没有设置默认自动提交，需要主动提交，以保存所执行的语句
        connection.commit()
        return cursor.fetchall()
    except Exception as e:
        return []
    finally:
        connection.close()

raw_url = "https://club.jd.com/comment/productPageComments.action?" \
                  "productId={}&score=0&sortType=5&page={}&pageSize=10"


def get_start_urls(itemids):
        urls = []
        raw_url = "https://club.jd.com/comment/productPageComments.action?" \
                  "productId={}&score=0&sortType=5&page=0&pageSize=10"
        for id in itemids:
            comment_url = raw_url.format(id)
            urls.append(comment_url)
        return urls

def save_to_mysql(itemid,json_info):
    config = {
        'host': "127.0.0.1"
        , 'port': 3306
        , 'user': 'root'
        , 'password': ''
        , 'db': 'test'
        , 'charset': 'utf8'
        , 'cursorclass': pymysql.cursors.DictCursor
    }
    connection = pymysql.connect(**config)
    try:
        with connection.cursor() as cursor:
            # 执行sql语句，插入记录
            sql = 'INSERT INTO jd_bra_comment (itemid,json_info) VALUES (%s,%s)'
            cursor.execute(sql, (itemid,json_info))
        # 没有设置默认自动提交，需要主动提交，以保存所执行的语句
        connection.commit()
    finally:
        connection.close()

def wakl_comment(itemid,page):
    current_url = raw_url.format()
    current_page = current_url.split("&")[3].split("=")[1]
    itemid = current_url.split("?")[1].split("&")[0].split("=")[1]


if __name__ == '__main__':
    itemids_tmp = get_itemids()
    itemids = []
    for id in itemids_tmp:
        info = id['itemid']
        itemids.append(info)


    for id in itemids:
        run = True
        page = 1
        while run:
            current_url = raw_url.format(id,page)
            #print()
            current_text = requests.get(current_url).text

            next_url = raw_url.format(id,page+1)
            next_text = requests.get(next_url).text

            print(current_url)
            print(next_url)
            print("------------------------------------")
            if current_text != next_text:
                save_to_mysql(id,current_text)
                save_to_mysql(id,next_text)
                page +=2
            else:
                save_to_mysql(id,current_text)
                run = False



    ##  https://club.jd.com/comment/productPageComments.action?productId=10516572711&score=0&sortType=5&page=201&pageSize=10



    # def parse(self, response):
    #     current_url = str(response.url)
    #     current_page = current_url.split("&")[3].split("=")[1]
    #     itemid = current_url.split("?")[1].split("&")[0].split("=")[1]
    #
    #     current_text = response.text
    #     # print("============================================")
    #     # print(requests.get(current_url).text == response.text)
    #     # print("============================================")
    #     next_page = current_page + 1
    #     next_url = raw_url.format(itemid, current_page)
    #     next_text = requests.get(next_url).text
    #
    #     if current_page == next_url:
    #         save_to_mysql(itemid, current_text)
    #         return
    #     else:
    #         yield Request(url=next_url, callback=self.parse)
































