# -*- coding: utf-8 -*-
import scrapy
from amazon.items import CatelogItem,ProductItem
from scrapy.conf import settings
import requests
import time
import hashlib
import re
import urllib
from amazon.models import db_connect,create_amazon_table,Catelog,Product
from sqlalchemy.orm import sessionmaker

class ProductListSpider(scrapy.Spider):
    name = 'product_list'
    custom_settings = {
        'ITEM_PIPELINES': {
            'amazon.pipelines.ProductPipeline': 1,
        }
    }
    def __init__(self, page=None, *args, **kwargs):
        super(ProductListSpider, self).__init__(*args, **kwargs)
        self.page = page
        engine = db_connect()
        self.Session = sessionmaker(bind=engine)

    def start_requests(self):
        catelogUrls = []
        session = self.Session()
        pageSize = 100
        # pageSize = 1
        cateTypes = ['bestsellers','newreleases','movers']
        if self.page is not None:
            _page = int(self.page)
            items = session.query(Catelog).limit(pageSize).offset(pageSize*(_page-1)).all()
            for catelog in items:
                aCatelog = catelog.to_dict()
                aCatelog['_id'] = aCatelog['id']
                for cateType in cateTypes:
                    if (cateType+'_url') in aCatelog and aCatelog[cateType+'_url'] is not None:
                        yield scrapy.Request(aCatelog[cateType+'_url'],callback=self.parse,meta={'catelog':aCatelog,'cate_type':cateType})
        session.close()
        return catelogUrls

    def parse(self, response):
        # 解析产品列表
        currentCatelog = response.meta['catelog']
        cateTypeKey = response.meta['cate_type']
        for item in self.handleProductList(response,currentCatelog,cateTypeKey):
            yield item
            # 每个产品进一步解析
            #yield scrapy.Request(item['product_url'], callback=self.parseProductDetail, meta={'product': item})
        nextPageUrl = response.xpath('//*[@id="zg-center-div"]//li[@class="a-last"]/a//@href').extract_first()
        if nextPageUrl is not None:
            # 下一页代码
            nextPageUrl = response.urljoin(nextPageUrl)
            yield scrapy.Request(nextPageUrl,callback=self.parse,meta={'catelog':currentCatelog,'cate_type':cateTypeKey})

    # 扫描top100页面，每页50个
    def handleProductList(self,response,currentCatelog,cateTypeKey):
        items = []
        cateId = currentCatelog['_id']
        cateName = currentCatelog['cate_name']
        cateTop = currentCatelog['cate_top']
        cateLevel = currentCatelog['cate_level']
        for node in response.xpath('//*[@id="zg-ordered-list"]/li'):
            item = ProductItem()
            item['product_url'] = response.urljoin(node.xpath('span/div/span/a//@href').extract_first())
            item['product_img'] = node.xpath('span/div/span/a//img//@src').extract_first()
            item['product_name'] = node.xpath('span/div/span/a//img//@alt').extract_first()
            item['product_rating'] = node.xpath('span/div/span/div[contains(@class,"a-icon-row")]/a[1]//@title').extract_first()
            item['product_reviews'] = node.xpath('span/div/span/div[contains(@class,"a-icon-row")]/a[2]//text()').extract_first()
            item['price_amazon'] = node.xpath('span/div/span/div[@class="a-row"]/a/span/span//text()').extract_first()
            item['product_price'] = item['price_amazon']
            item['cate_ref'] = [cateName]
            item['cate_top'] = cateTop
            rank = {
                'cate_id' : cateId,
                'cate_name' : cateName,
                'cate_top' : cateTop,
                'cate_level' : cateLevel,
                'rank' : int(re.sub(r'\D',"",node.xpath('span//span[@class="zg-badge-text"]/text()').extract_first()))
            }
            item[cateTypeKey+'_rank']={cateId:rank}
            items.append(item)
        return items
