import scrapy
from scrapy.http import Request
from urllib import request
import pymysql
import json
import re


class JdSpider(scrapy.Spider):
    name = 'jd'
    allowed_domains = ['jd.com']
    custom_settings = {
        'DOWNLOAD_DELAY': 1.25, 'DOWNLOAD_TIMEOUT': 60,  # 大括号里面写需要修改的配置，然后就能把settings里面的配置给覆盖了
    }

    def __init__(self):
        self.headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:92.0) Gecko/20100101 Firefox/92.0"}  # 注意，不能带逗号呀~
        self.page = 0

    def start_requests(self):  # 自动执行
        init_url = "https://list.jd.com/list.html?cat=1713%2C3287%2C3804&page=1&s=1&click=0"
        return [Request(url=init_url, headers=self.headers, callback=self.parse)]  # 京东人工智能图书搜索

    def parse(self, response):
        self.get_data(response)
        # 开始循环爬取
        for j in range(2, 36):  # 我想验证这里是否是一个死循环，理论上这里是一个死循环,但是好想被scrapy封装了
            page = 2 * j - 1
            url_append = 57 + 60 * (j - 2)
            url = "https://list.jd.com/list.html?cat=1713%2C3287%2C3804&page=" + str(page) + "&s=" + str(
                url_append) + "&click=0"
            yield Request(url, callback=self.get_data,headers=self.headers)

    def get_data(self, response):
        # 页面解析开始
        self.page = self.page + 1
        print("当前页" + str(self.page))
        product_id = response.xpath("//li[@class='gl-item']/@data-sku").extract()  # 不加extract得到的是选择器
        prices = response.xpath(
            "//li[@class='gl-item']/div/div[@class='p-price']/strong/i/text()").extract()  # 有时数据会爬取不到
        titles = response.xpath("//li[@class='gl-item']/div/div[@class='p-name']/a/em/text()").extract()
        links = response.xpath("//li[@class='gl-item']/div/div[@class='p-name']/a/@href").extract()
        print(f"长度：products-{len(product_id)},prices-{len(prices)}, titles-{len(titles)},links-{len(links)}")
        comments = []  # 评价数量返回内容不存在，js生成的)
        # 得到js中的评论量（这个评论量是从fiddler抓包中所得，当textview中显示的数据为eb或者11a ?时，要以json格式看）
        for i in product_id:
            comment_url = "https://club.jd.com/comment/productCommentSummaries.action?referenceIds=" + str(i)  # 这个地址会变，不是会变，是接口消失了，它把我ip封了
            print(comment_url)
            comment_url = request.Request(url=comment_url, headers=self.headers)
            commentString = request.urlopen(comment_url).read().decode("GBK")  # 使用utf-8时，单位会消失了
            print(commentString)
            pat = '"CommentCountStr":"(.*?)"'
            comment = re.compile(pat).findall(str(commentString))
            print(comment)
            comments.append(comment[0])
        # 页面解析结束
        # 数据库存储
        if (len(prices) > 0):
            self.data_storage(titles, prices, links, comments)

    def data_storage(self, titles, prices, links, comments):
        for i in range(len(prices)):
            connect = pymysql.connect(user="root", password="root", db="datamine", autocommit=True, host="localhost")
            sql = "insert into jd(title,price,link,comment) values(" + "'" + titles[i] + "'," + "'" + prices[
                i] + "'," + "'" + \
                  links[i] + "'," + "'" + comments[i] + "')"
            print(sql)
            connect.query(sql)
        connect.close()
