# -*- coding: utf-8 -*-
import scrapy
from amazon.items import CatelogItem
import hashlib

class CatelogSpider(scrapy.Spider):
    name = 'catelog'
    # 自定义配置
    custom_settings = {
        'ITEM_PIPELINES': {
            'amazon.pipelines.CatelogPipeline': 1,
        }
    }

    def start_requests(self):
        catelogs = [
            {'cate_type':'bestsellers','url':'https://www.amazon.com/Best-Sellers/zgbs'},
            {'cate_type':'newreleases','url':'https://www.amazon.com/gp/new-releases'},
            {'cate_type':'movers','url':'https://www.amazon.com/gp/movers-and-shakers'}
        ]
        self.cateStarters = ["Appliances","Arts, Crafts & Sewing","Automotive","Baby",
            "Beauty & Personal Care","Camera & Photo","Cell Phones & Accessories","Clothing, Shoes & Jewelry",
            "Electronics","Handmade Products","Home & Kitchen","Kitchen & Dining","Patio, Lawn & Garden",
            "Pet Supplies","Sports & Outdoors","Tools & Home Improvement","Toys & Games"
        ]
        #self.cateStarters = ["Sports & Outdoors"]
        for catelog in catelogs:
            yield scrapy.Request(catelog['url'], callback=self.parse, meta={'cate_type': catelog['cate_type']})

    def parse(self, response):
        cateType = response.meta['cate_type']
        for item in self.handleCatelogTree(response,cateType):
            if item['cate_top'] in self.cateStarters:
                yield item
                # 过滤item
                # 递归解析下一层目录
                yield scrapy.Request(item[cateType+'_url'],callback=self.parse, meta={'cate_type': cateType})

    # 扫描目录层级
    def handleCatelogTree(self,response,cateType):
        items = []
        # 当前目录
        selectedNode = response.xpath('//*[@id="zg_browseRoot"]//span[@class="zg_selected"]')
        cateLine = response.xpath('//*[@id="zg_browseRoot"]//li[@class="zg_browseUp"]//a//text()').extract()
        cateName = ''
        if len(selectedNode) == 1:
            cateName  = selectedNode.xpath('.//text()').extract_first()
            cateLine.append(cateName.strip())
            # 子目录
            for node in selectedNode.xpath('../../ul/li'):
                item = CatelogItem()
                levelCateName = node.xpath('a//text()').extract_first()
                item['cate_line'] = cateLine[1:]
                item['cate_line'].append(levelCateName)
                item['cate_name'] = self.cateName(item)
                item['cate_top'] = item['cate_line'][0]
                item['cate_level'] = len(item['cate_line'])
                item['cate_level_name'] = levelCateName 
                item[cateType+'_url'] = node.xpath('a//@href').extract_first()
                item['_id'] = self.cateId(item)
                items.append(item)
        return items

    def cateId(self,catelog):
        cateName = self.cateName(catelog)
        sha1 = hashlib.sha1()
        sha1.update(cateName.encode('utf-8'))
        return sha1.hexdigest()

    def cateName(self,catelog):
        return " > ".join(catelog['cate_line'])


