# -*- coding: UTF-8 -*-
# Spider for '/www.zappos.com/'
#Author: Denis <sinedone@gmail.com>,

from urllib2 import unquote

from scrapy.spider import BaseSpider
from scrapy.http import  Request
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.loader import XPathItemLoader
from scrapy.contrib.loader.processor import TakeFirst, Join
from scrapy.shell import inspect_response
from asos.items import ClothItem

class Zappos(BaseSpider):

    name = 'zappos'
    start_urls = ['http://www.zappos.com/beauty',
                  'http://www.zappos.com/housewares',
                  'http://www.zappos.com/bags',
                  'http://www.zappos.com/clothing',
                  'http://www.zappos.com/shoes']
    
    URL = 'http://www.zappos.com/'
    
    def parse(self, response):
        hxs = HtmlXPathSelector(response)
        for cat in hxs.select("//div[@class='sideColumn']/h4/a"):
            title = cat.select("text()").extract()[0]
            if u'Popular' in title:continue
            url = self.URL + cat.select("@href").extract()[0]
            yield Request(url, meta={"category":title}, callback=self.list_subcategories)
    
    def list_subcategories(self, response):
        hxs = HtmlXPathSelector(response)
        category = response.request.meta['category']
        for subcat in hxs.select("//div[contains(@id, 'Select') and contains(@id, 'zc')]/a"):
            title = subcat.select("text()").extract()[0]
            url = self.URL + subcat.select("@href").extract()[0]
            yield Request(url, meta={'category': u"%s - %s" %(category, title)},
                          callback=self.list_items)
    
    def list_items(self, response):
        hxs = HtmlXPathSelector(response)
        category = response.request.meta['category']
        for item in hxs.select("//div[@id='searchResults']/a"):
            url = self.URL + item.select("@href").extract()[0]
            yield Request(url, meta={'category':category},
                          callback=self.get_item)
        next_page = hxs.select("//div[@class='pagination']/a[contains(text(), '»')]/@href").extract()
        if next_page:
            yield Request(self.URL+next_page[0], meta={'category':category}, 
                          callback=self.list_items)
    
    def get_item(self, response):
        hxs = HtmlXPathSelector(response)
        l = XPathItemLoader(ClothItem(), hxs)
        l.add_value('url', [unicode(response.url)])
        l.add_value('category', [response.request.meta['category']])
        l.add_xpath('designer', "//span[@class='prName']/a/text()", TakeFirst())
        l.add_xpath('title', "//span[@class='prName']/text()", TakeFirst())
        l.add_xpath('price', "//span[@id='price']/text()", TakeFirst())
        desc = []
        for row in hxs.select("//div[@id='prDesc']//li"):
            desc.append(u' '.join([i.strip() for i in 
                                row.select(".//text()").extract() if i.strip()]))
            
        l.add_value('description', u'\r\n'.join(desc))
        l.add_xpath('facebookURL', "//meta[@property='og:url']/@content", TakeFirst())
        item = l.load_item()
        
        image_url = hxs.select("//a[@id='multiview']/@href").extract()
        if image_url:
            return Request(self.URL+image_url[0], meta={'item':item},
                           callback=self.get_images)
        return item

    def get_images(self, response):
        hxs = HtmlXPathSelector(response)
        item = response.request.meta['item']
        for i, image in enumerate(hxs.select("//script/text()").re(r"'normal': '(.+?)'")[:4]):
            item['image%s' %str(i+1)] = [image]
        return item
            
        
