# -*- coding: utf-8 -*-
import scrapy
from copy import deepcopy
import re

class SuningSpider(scrapy.Spider):
    name = 'suning'
    allowed_domains = ['suning.com']
    start_urls = ['http://snbook.suning.com/web/trd-fl/999999/0.htm']

    def parse(self, response):  #提取所有的大分类和小分类
        #大分类的分组
        li_list = response.xpath("//ul[@class='ulwrap']/li")
        for li in li_list:
            item = {}
            item["b_cate"] = li.xpath("./div[1]/a/text()").extract_first()
            #获取小分类的分组
            a_list = li.xpath("./div[2]/a")
            for a in a_list:
                item["s_cate"] = a.xpath("./text()").extract_first()
                item["s_href"] = a.xpath("./@href").extract_first()
                #发送小分类的请求，到达列表页的第一页
                # print(item)
                yield response.follow(
                    item["s_href"],
                    callback = self.parse_book_list,
                    meta=  {"item":deepcopy(item)}
                )
    def parse_book_list(self,response): #提取列表页的数据
        item = response.meta["item"]
        #列表页图书的分组
        li_list = response.xpath("//div[@class='filtrate-books list-filtrate-books']/ul/li")
        for li in li_list:
            item["book_title"] = li.xpath(".//div[@class='book-title']/a/text()").extract_first()
            item["book_href"] = li.xpath(".//div[@class='book-title']/a/@href").extract_first()
            item["book_author"] = li.xpath(".//div[@class='book-author']/a/@href").extract_first()
            item["book_press"] = li.xpath(".//div[@class='book-publish']/a/text()").extract_first()
            yield scrapy.Request(
                item["book_href"],
                callback= self.parse_book_price,
                meta = {"item":deepcopy(item)}
            )

        #翻页
        # <script type="text/javascript">
        # var pagecount=21;
        # var currentPage=2;
        #总的页码数
        page_count = re.findall("var pagecount=(.*?);",response.body.decode())[0]
        #当前页码数
        current_Page = re.findall("var currentPage=(.*?);",response.body.decode())[0]
        if int(current_Page) < int(page_count):
            next_url = item["s_href"] + "?pageNumber={}&sort=0".format(int(current_Page)+1)
            yield response.follow(
                next_url,
                callback=self.parse_book_list,
                meta = {"item":deepcopy(item)}
            )


    def parse_book_price(self,resposne):#提取价格
        item = resposne.meta["item"]
        item["book_price"] = re.findall("\"bp\":'(.*?)'",resposne.body.decode())[0]
        print(item)