# -*- coding: utf-8 -*-
import re
import scrapy
from copy import deepcopy

from suning.items import SuningItem


class SnSpider(scrapy.Spider):
    name = 'sn'
    allowed_domains = ['suning.com']
    start_urls = ['http://snbook.suning.com/web/trd-fl/999999/0.htm']

    def parse(self, response):
        #分组
        li_list=response.xpath("//div[@class='sider-sort l']//ul[@class='ulwrap']/li")
        for li in li_list:
            item = SuningItem()
            item["title"] = li.xpath("./div[@class='three-sort']/a/text()").extract()
            item["href"] = li.xpath("./div[@class='three-sort']/a/@href").extract()
            if len(item["href"]):
                for url in item["href"]:
                    url = "http://snbook.suning.com/"+url
                    yield scrapy.Request(
                        url,
                        callback=self.parse_detail,
                        meta={"item":deepcopy(item)}
                    )
            else:
                print(item)
                return item
    def parse_detail(self,response):
        item = deepcopy(response.meta["item"])
        li_list = response.xpath("//div[@class='filtrate-books list-filtrate-books']/ul/li")
        for li in li_list:
            item["detail_url"]=li.xpath(".//div[@class='book-img']/a/@href").extract_first()
            item["img"]= li.xpath(".//div[@class='book-img']//img/@src").extract_first()
            if item["img"] is None:
                item["img"] = li.xpath(".//div[@class='book-img']//img/@src2").extract_first()
            item["alt"]=li.xpath(".//div[@class='book-img']/a/img/@alt").extract_first()
            item["author"]=li.xpath(".//div[@class='book-author']/a/@text()").extract_first()
            item["jianjie"]=li.xpath("./div[@class='book-detail']/div[@class='book-descrip c6']/text()").extract_first()
            # 详情页面数据请求抓取
            yield scrapy.Request(
                item["detail_url"],
                # 调用详情页面数据获取函数
                callback=self.parse_detail1,
                meta={"item": deepcopy(item)}
            )
        # 下一页数据ｖ抓取
        # next_url = response.xpath("//a[text()='下一页']/@href").extract_first()
        # if next_url is not None:
        #     yield scrapy.Request(
        #         next_url,
        #         callback=self.parse_detail
        #     )
        page_count=int(re.findall("var pagecount=(.*?);",response.body.decode())[0])
        current_page = int(re.findall("var currentPage=(.*?);", response.body.decode())[0])
        if current_page<page_count:
            for url in item["href"]:
                url = "http://snbook.suning.com/" + url
                next_url=url+"?pageNumber={}&sort=0".format(current_page+1)
                yield scrapy.Request(
                    next_url,
                    callback=self.parse_detail,
                    meta={"item":response.meta["item"]}
                )
    def parse_detail1(self,response):
        item = response.meta["item"]
        # item["jiage"]=response.xpath("//div[@class='brief-info-main c9 hauto']/ul//span[@class='snPrice f18 fl']/em/text()").extract_first()
        item["jiage"] = re.findall("\"bp\":'(.*?)',", response.body.decode())
        item["jiage"] = item["jiage"][0] if len(item["jiage"]) > 0 else None
        print(item)
        yield item

