import scrapy
from scrapy.http import request
import re

from scrapy.utils.trackref import NoneType

class DangdangSpider(scrapy.Spider):
    currentPage = 1
    name = 'dangdang'
    allowed_domains = ['dangdang.com']
    start_urls = ['http://category.dangdang.com/?ref=www-0-C']

    def parse(self, response):
        try:
            alldata1 = response.xpath(".//div[@ddt-pit]")
            for alldata1Item in alldata1:  #alldataItem是所有大分类的div@class=ddt-pit
                result = alldata1Item.xpath("./div")
                rootclass = alldata1Item.xpath("./div[@class='classify_books_detail']//a/text()").get()
                for adddatainner in result: #adddatainner是小分类div@class=classify_kind
                    if (result.index(adddatainner) == 0):
                        continue
                    parentclass = adddatainner.xpath("./div[1]/a/text()").get()
                    allsdata = adddatainner.xpath(".//ul/li")
                    for itemdatas in allsdata:
                        nameclass = itemdatas.xpath("./a/text()").get()
                        urls = itemdatas.xpath("./a/@href").get()
                        meta = {
                            "root":rootclass,
                            "parent":parentclass,
                            "classify":nameclass,
                            "current":1
                        }
                        print(
                            "根类：%s\t\t" % rootclass,
                            "父类：%s\t\t" % parentclass,
                            "分类：%s\t\t" % nameclass,
                            # urls
                        )
                        yield scrapy.Request(
                            urls,
                            callback=self.getshop,
                            meta={"meta":meta}
                        )
        except Exception as e:
            print('-e-' * 20)
    def getshop(self,response):
        try:
            meta = response.meta.get("meta")
            alllist = response.xpath(".//div[@id='search_nature_rg']/ul/li")
            for li in alllist:
                item = {}
                item["root"] = meta.get("root")
                item["parent"] = meta.get("parent")
                item["classify"] = meta.get("classify")
                item["storename"] = li.xpath("./p[@class='search_shangjia']/a[1]/text()").get("none")
                item["title"] = li.xpath("./a[@name='itemlist-picture']/@title").get("none")
                item["shopurl"] = li.xpath("./a[@name='itemlist-picture']/@href").get("none")
                item["price"] = li.xpath("./p[@class='price']/span[@class='search_now_price']/text()").get("none")
                item["prePrice"] = li.xpath("./p[@class='price']/span[@class='search_pre_price']/text()").get("none")
                item["sellNum"] = li.xpath("./p[@class='search_shangjia']/a[2]/text()").get("none")
                spanName = li.xpath(".//span[@class='new_lable']/span")
                spanlist = []
                for ispanname in spanName:
                    spanlist.append(ispanname.xpath("./text()").get("none"))
                item["span"] = " | ".join(spanlist)
                style = li.xpath("./p[@class='search_star_line']/span[@class='search_star_black']/span/@style").get()
                item["startlevel"] = int(re.search("(\d+)",style).group(1)) / 10
                item["commentNum"] =li.xpath("./p[@class='search_star_line']/a/text()").get("none")
                item["author"] = li.xpath("./p[@class='search_book_author']/span[1]/a/text()").get("none")
                item["time"] = li.xpath("./p[@class='search_book_author']/span[2]/text()").get("none")
                item["titlemade"] = li.xpath("./p[@class='search_book_author']/span[3]/a/text()").get("none")
                item['detail'] = li.xpath("./p[@class='detail']/text()").get("none")
                yield item
                nexturl = response.xpath('//div[@class="paging"]/ul//li[@class="next"]/a/@href').get("none")
                if (nexturl !="none"):
                    yield scrapy.Request(
                        "http://category.dangdang.com"+nexturl,
                        callback=self.getshop,
                        meta={"meta":meta}
                    )
        except Exception as e:
            print("-err-"*40)
