# -*- coding: utf-8 -*-
import scrapy
import urllib.request
import zlib
import random
import re
from scrapy.http import Request
#from jdbooks.items import JdbooksItem

class BooksSpider(scrapy.Spider):
    name = 'books'
    allowed_domains = ['jd.com']
    #start_urls = ['http://jd.com/']
    ua = ["Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
          "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
          "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393",
          "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36",# 谷歌浏览器
          "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5514.400 QQBrowser/10.1.1614.400", # QQ浏览器
          "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",  # IE浏览器
          ]

    def start_requests(self):
        # 起始网址
        base_url = "https://book.jd.com/"
        base_req = urllib.request.Request(base_url)
        base_req.add_header("User-Agent",random.choice(self.ua))
        base_req.add_header('Accept-Encoding', 'gzip, deflate') #这一段也要加，不然后面报错
        fp = urllib.request.urlopen(base_req)
        # Content-Encoding: gzip——我草泥马，压缩了(以后乱码就是压缩了)
        # print(fp.info())
        mybytes = fp.read()
        decompressed_data = zlib.decompress(mybytes,16 + zlib.MAX_WBITS)
        base_data = decompressed_data.decode('utf-8','ignore')
        #print(len(base_data))
        """
        # 如果需要写入，不要漏了encoding = "utf-8";而且这里是解码之后的，是“w”而不是“wb”
        fh = open("C:\\Users\\DELL\\Desktop\\jdb.html","w",encoding = "utf-8")
        fh.write(base_data)
        fh.close()
        """
        pat1='navFirst:.*?\{"NAME".*?"URL"(.*?","ANCHOR":"17")\}'   #可以先缩小一个范围，然后再在里面找，注意唯一性
        list1 = re.compile(pat1).findall(base_data)
        #print(list1)，成功得到
        #print('---------------')
        #匹配图书分类-各馆及同级标签，存放至该列表
        pre_url = []
        for i in range(1,18):
            #构造每一个网页的正则
            pat2 = '"NAME":".*"URL":"(.*?.html)","ANCHOR":"' + str(i) + '"' #注意贪婪模式和懒惰模式
            list2 = re.compile(pat2).findall(list1[0])  #好像就是一行，不需要匹配换行符
            if len(list2) == 0:
                continue
            else:
                # 处理得到正确的网址
                for j in range(len(list2)):
                    list2[j] = re.sub('\\\/', '/', list2[j])  # 斜杠是需要转义字符，才能转义本身的
                    # 错：re.sub('\\\/','\/',pre_url[i]) ——多打了一个斜杠
                    pre_url.append('https:' + list2[j])
        #print(pre_url)

        #防止ip被封，只爬两个大频道
        x = 0
        for k in range(len(pre_url)):
            yield Request(pre_url[k], callback=self.pd_next)
            x += 1
            if x>1:break

    def pd_next(self,response):
        #baidunews.n1——item["content"]  = response.body.decode("utf-8", "ignore")
        pd_data = response.body.decode("utf-8","ignore")
        #print(len(pd_data))
        # 得到各馆下小频道的catid
        pat3 = '..list.jd.com.list.html.cat=(.*?)" title='
        #原链接——'href="//list.jd.com/list.html?cat=1713,3259,3334" title="',事实证明所有正斜杠也需要换成点号，问号也是
        cat_data = re.compile(pat3).findall(pd_data)
        # 去除重复网址（好像没有重复网址）
        cat_data2 = set(cat_data)

        x = 0
        all_pd = []
        for m in cat_data2:
            pdnum = m
            pdurl = "https://list.jd.com/list.html?cat=" + pdnum
            pd_req = urllib.request.Request(pdurl)
            pd_req.add_header("User-Agent", random.choice(self.ua))
            #pd_req.add_header('Accept-Encoding', 'gzip, deflate')  # 这一段也要加，不然后面报错
            first_data = urllib.request.urlopen(pd_req).read().decode('utf-8', 'ignore')
            pat4 = '<em>共<b>(.*?)</b>页'
            totalPage = re.compile(pat4).findall(first_data)
            if (len(totalPage) == 0):
                totalPage = [1]
            all_pd.append({pdnum : totalPage[0]})

            thispage = all_pd[x][pdnum]
            #print(thispage)
            #print(type(thispage))
            # 爬取每个小频道的前两页（1页应该爬到60（+5）本书）
            y = 0
            for p in range(1,int(thispage)+1):
                # 原网址：https://list.jd.com/list.html?cat=1713,3259,3328&page=2
                thispageurl = "https://list.jd.com/list.html?cat=" + m + "&page=" + str(p)
                #print(thispageurl)
                yield Request(thispageurl, callback=self.parse)
                y+=1
                if y>1:
                    break
            #只爬取两个小频道的所有页面的数据
            x += 1
            if x>1:
                break
        """
        可以合并到上面
        x = 0
        for n in cat_data2:
            thispage = all_pd[x][n]
            #爬取每个小频道的所有页面
            for p in range(1, int(thispage) + 1):
                #原网址：https://list.jd.com/list.html?cat=1713,3259,3328&page=2
                thispageurl = "https://list.jd.com/list.html?cat=" + str(n) + "&page=" + str(p)
                print(thispageurl)
                yield Request(thispageurl, callback=self.parse)
            x += 1
            if x>1:
                break
        """

    def parse(self, response):

        # 获取频道1、2(这个不在li标签下，所以写在循环外)
        pd = response.xpath('//span[@class="curr"]/text()').extract()
        if len(pd) == 0:
            pd = ['缺省', '缺省']
        elif len(pd) == 1:
            pda = pd[0]
            pd = [pda, '缺省']
        else:
            pass
        pd1 = pd[0]
        pd2 = pd[1]

        #第一步：获取所有的li标签（每页60个）
        li_list = response.xpath('//div[@id="plist"]/ul[@class="gl-warp clearfix"]//li[@class="gl-item"]')

        x1=1
        #第二步：获取每个li标签下的数据并且yield
        for li in li_list:
            print(pd1)
            print(pd2)
            # 获取书名
            #出现错误的原因：XPath表达式里面没有打“.”!!!！！！
            #bookname = li.xpath('.//div[@class="p-name"]/a/em/text()').extract_first()；存在换行符
            #xpath去除换行\空格
            bookname = li.xpath('normalize-space(.//div[@class="p-name"]/a/em/text())').extract_first()
            print("第"+str(x1)+"本书")
            print(bookname)

            # 获取id（不是随机生成的，尽量精简）
            bookid = li.xpath('.//a[@class="p-o-btn focus J_focus"]/@data-sku').extract_first()
            print(bookid)

            # 获取价格,网址：https://p.3.cn/prices/mgets?callback=jQuery7536137&skuIds=J_11324910
            price_url = 'https://p.3.cn/prices/mgets?callback=jQuery7536137&skuIds=J_' + bookid
            price_req = urllib.request.Request(price_url)
            price_req.add_header("User-Agent", random.choice(self.ua))
            price_data = urllib.request.urlopen(price_req).read().decode("utf-8", "ignore")
            price_pat = '"p":"(.*?)"'
            price = re.compile(price_pat).findall(price_data)[0]
            print(price)

            # 获取评论数量,网址：'https://club.jd.com/comment/productCommentSummaries.action?my=pinglun&referenceIds=12135337&callback=jQuery9074510'
            comments_url = 'https://club.jd.com/comment/productCommentSummaries.action?my=pinglun&referenceIds=' + bookid + '&callback=jQuery9074510'
            comments_req = urllib.request.Request(comments_url)
            comments_req.add_header("User-Agent", random.choice(self.ua))
            comments_data = urllib.request.urlopen(comments_req).read().decode("utf-8", "ignore")
            comments_pat = '"CommentCount":(.*?),'  # 注意结尾的逗号
            comments = re.compile(comments_pat).findall(comments_data)[0]
            print(comments)

            # 获取作者
            # 只能获取58个——原因：有些书没有作者，
            # 而且有些一本书有两个作者
            author = ""
            raw_author = li.xpath('.//span[@class="author_type_1"]//a/@title').extract()
            if len(raw_author) == 0:
                author = "None"
            elif len(author)>1:
                for z in range(len(raw_author)):
                    if z == 0:
                        author = author + raw_author[z]
                    else:
                        author = author + "&" + raw_author[z]
            else:
                author = raw_author[0]
            print(author)

            # 获取出版社
            publisher = li.xpath('.//span[@class="p-bi-store"]/a/@title').extract_first()
            print(publisher)

            # 获取卖家(店家)
            seller = li.xpath('.//div[@class="p-shopnum"]/span/text()').extract_first()

            x1 += 1
            item = {}
            item["pd1"] = pd1
            item["pd2"] = pd2
            item["name"] = bookname
            item["price"] = price
            item["comments"] = comments
            item["author"] = author
            item["publisher"] = publisher
            item["seller"] = seller
            yield item

            print("------------------------")


"""
注意1：中括号需要使用转义字符，大括号需要吗？？？？
注意2：正则表达式中最好不要出现反斜杠，会出现很多麻烦
注意3：去重：布隆过滤器/数据库的数据约束/（简单的去重）可以直接使用集合去处理
注意4：extract()返回的所有数据，存在一个list里；extract_first()返回的是一个string，是extract()结果中第一个值。
注意5：（需要使用新的写法了！！）先分组，再找数据要注意XPath表达式里面要打“.”!!!
"""

