# -*- coding: UTF-8 -*-
# Spider for 'threadless.com'
#Author: Denis <sinedone@gmail.com>,

from urlparse import urljoin

from scrapy.spider import BaseSpider
from scrapy.http import  Request
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.loader import XPathItemLoader
from scrapy.contrib.loader.processor import TakeFirst, Join
from scrapy.shell import inspect_response
from asos.items import ClothItem

class Threadless(BaseSpider):

    name = 'threadless'
    start_urls = ['http://www.threadless.com/shop']
    URL = 'http://www.threadless.com'
    
    def parse(self, response):
        hxs = HtmlXPathSelector(response)
        for cat in hxs.select("//ul[@class='shop_windows']/li"):
            try:
                category = cat.select("a/h2/text()").extract()[0]
            except:continue
            url = urljoin(response.url, cat.select("a/@href").extract()[0])
            yield Request(url, meta={"category":category},
                          callback=self.list_categories)
    
    def list_categories(self, response):
#        inspect_response(response)
        hxs = HtmlXPathSelector(response)
        categories = hxs.select("//ul[@class='category']//a")[1:] 
        for cat in categories:
            category = cat.select("text()").extract()[0]
            url = self.URL + cat.select("@href").extract()[0]
            yield Request(url, meta={'category':u"%s - %s" %(response.request.meta['category'],
                                                             category)},
                          callback=self.list_items)
        if not categories:
            for i in self.list_items(response):
                yield i
            else:
                for i in hxs.select("//a[@class='shop_links']"):
                    url = i.select("@href").extract()[0]
                    yield Request(url, meta={'category':category},
                                  callback=self.list_categories)
            
    
    def list_items(self, response):
        hxs = HtmlXPathSelector(response)
        category = response.request.meta['category']
        for item in hxs.select("//dd[@class='product_item']"):
            url = self.URL + item.select("a/@href").extract()[0]
            try:
                price = item.select(".//dd[@class='sale_price']/text()").extract()[0]
            except:continue
            yield Request(url, meta={'category':category, 'price':price},
                          callback=self.get_item)
        
        next_page = hxs.select("//ul[@class='pagination']/li[@class='next']/a/@href").extract()
        if next_page:
            yield Request(self.URL + next_page[0],
                          meta={'category':category},
                          callback=self.list_items)
    
    def get_item(self, response):
        hxs = HtmlXPathSelector(response)
        l = XPathItemLoader(ClothItem(), hxs)
        l.add_value('url', [unicode(response.url)])
        l.add_value('category', [response.request.meta['category']])
        l.add_value('price', [response.request.meta['price']])
        l.add_xpath('sex', "//ul[@class='product_tabs']//a/text()", Join(u', '))
        l.add_xpath('title', "//h1[@class='title']/text()", TakeFirst())
        l.add_xpath('designer', "//span[@id='designed_by']/a/text()", Join(u', '))
        l.add_xpath('description', "//p[@class='select_desc']/text()", TakeFirst())
        l.add_xpath('facebookURL', "//section[@class='social']/like/@href"), TakeFirst()
        for i, image in enumerate(hxs.select("//div[@class='product_view']/a/@href").extract()[:4]):
            l.add_value('image%s' %str(i+1), [image])

        return l.load_item()
        
        
            