#!/usr/bin/env python
# -*- coding: utf-8 -*-

import scrapy
import re
from scrapy.http import Request

from laptop.items import LaptopComment


class LaptopSpider(scrapy.Spider):
    name = "laptop"
    allowed_domains = ["club.jd.com"]
    start_urls = [
        "https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100010370680&score=0&sortType=5&page=1&pageSize=10&isShadowSku=0&rid=a38999eabe1d7422&fold=1"
    ]

    def __init__(self, *args, **kwargs):
        super(LaptopSpider, self).__init__(*args, **kwargs)
        self.page = 2

    def start_requests(self):
        return [Request(
            "https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100010370680&score=0&sortType=5&page=1&pageSize=10&isShadowSku=0&rid=a38999eabe1d7422&fold=1",
            callback=self.process
            )]

    def process(self, response):

        c1 = response.body
        c1 = c1.decode(encoding='gb18030', errors='strict')
        # 用正则表达式抽取出评论 "content":"XXXXXX"
        ary = re.findall(r"\"content\":\".+?\"", c1)

        for comment in ary:
            item = LaptopComment()
            item['content'] = comment.strip()
            yield item

        # 下一页
        if self.page <= 160:
            next_page_url = "https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100010370680&score=0&sortType=5&page=" + str(self.page) + "&pageSize=10&isShadowSku=0&rid=a38999eabe1d7422&fold=1"
            print(next_page_url)
            yield Request(next_page_url, callback=self.process)
            self.page += 1

