# -*- coding: UTF-8 -*-
# Spider for 'crookedtongues.com'
#Author: Denis <sinedone@gmail.com>,

from urlparse import urljoin

from scrapy.spider import BaseSpider
from scrapy.http import  Request
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.loader import XPathItemLoader
from scrapy.contrib.loader.processor import TakeFirst, Join
from scrapy.shell import inspect_response
from asos.items import ClothItem

class Crookedtongues(BaseSpider):

    name = 'crookedtongues'
    start_urls = ['http://www.crookedtongues.com/store/clothing',
                  'http://www.crookedtongues.com/store/sneakers']
    URL = 'http://www.crookedtongues.com'
    
    def parse(self, response):
        
        hxs = HtmlXPathSelector(response)
        for menu_entry in hxs.select("//div[@class='block']/ul//a"):
            category = menu_entry.select("text()").extract()[0]
            url = self.URL + menu_entry.select("@href").extract()[0]
            yield Request(url, meta={'category':category}, callback=self.list_items)
    
    def list_items(self, response):
        hxs = HtmlXPathSelector(response)
        category = response.request.meta['category']
        for item in hxs.select("//div[@id='product-list-results']/div[@class='item_text']"):
            url = self.URL + item.select("a/@href").extract()[0]
            yield Request(url, meta={'category':category},
                          callback=self.get_item)
        
        
        next_page = hxs.select("//a[@class='pagination-controller next']/@href").extract()
        if next_page:
            url = urljoin(response.url, next_page[0])
            yield Request(url, meta={'category':category}, callback=self.list_items)
    
    def get_item(self, response):
        hxs = HtmlXPathSelector(response)
        l = XPathItemLoader(ClothItem(), hxs)
        l.add_value('category', [response.request.meta['category']])
        l.add_value('url', [unicode(response.url)])
        l.add_xpath('designer', "//a[@class='brand']/text()", TakeFirst())
        l.add_xpath('title', "//strong[@class='product_name fn']/text()", TakeFirst())
        l.add_xpath('price', "//span[@class='price']/text()", TakeFirst())
        l.add_value('description', [u' '.join([i.strip() for i in 
                                    hxs.select("//div[@class='scroll-pane description']/p/text()").extract()
                                if i.strip()])])
        l.add_xpath('facebookURL', "//a[@class='share_link utility']/@href", TakeFirst())
        for i, image in enumerate(hxs.select("//div[@id='image']/a/@href").extract()[:4]):
            l.add_value('image%s' %str(i+1), [self.URL + image])
        
        return l.load_item()
            