# -*- coding: utf-8 -*-
import scrapy
from amazon.items import CatelogItem

class CatelogStarterSpider(scrapy.Spider):
    name = 'catelog_starter'
    start_urls = ['https://www.amazon.com/gp/bestsellers/']
    # 自定义配置
    custom_settings = {
        'ITEM_PIPELINES': {
            'amazon.pipelines.CatelogPipeline': 1,
        }
    }

    def start_requests(self):
        cateTypes = ['bestsellers','newreleases','movers']
        catelogs = [
            {'cate_type':'best-sellers','url':'https://www.amazon.com/Best-Sellers/zgbs'},
            {'cate_type':'new-releases','url':'https://www.amazon.com/gp/new-releases'},
            {'cate_type':'movers-and-shakers','url':'https://www.amazon.com/gp/movers-and-shakers'}
        ]
        for catelog in catelogs:
            yield scrapy.Request(catelog['url'], callback=self.parse, meta={'cate_type': catelog['cate_type']})
    # 获取第一层目录结构
    def parse(self, response):
        cateType = response.meta['cate_type']
        for node in response.xpath('//*[@id="zg_browseRoot"]/ul/li'):
            item = CatelogItem()
            cateLine = []
            cateUrl = node.xpath('a//@href').extract_first()
            cateName = node.xpath('a//text()').extract_first()
            cateLine.append(cateName)
            item['cate_line'] = cateLine
            item['cate_name'] = cateName
            item['cate_top'] = cateName
            item['cate_level_name'] = cateName
            item['cate_level'] = 1
            if cateType == 'best-sellers':
                item['bestsellers_url'] = cateUrl
            elif cateType == 'new-releases':
                item['newreleases_url'] = cateUrl
            elif cateType == 'movers-and-shakers':
                item['movers_url'] = cateUrl
            else:
                pass
            yield item
    


