# -*- coding: UTF-8 -*-
# Spider for 'crookedtongues.com'
#Author: Denis <sinedone@gmail.com>,

from urlparse import urljoin

from scrapy.spider import BaseSpider
from scrapy.http import  Request
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.loader import XPathItemLoader
from scrapy.contrib.loader.processor import TakeFirst, Join
from scrapy.shell import inspect_response
from asos.items import ClothItem

class TopShop(BaseSpider):

    name = 'topshop'
    start_urls = ['http://www.topshop.com/']
    URL = 'http://www.topshop.com'
    
    def parse(self, response):
        hxs = HtmlXPathSelector(response)
        for menu in hxs.select("//ul[@id='nav_catalog_menu']/li"):
            title = menu.select("a/text()").extract()[0].strip()
            if title in [u'New In', u'Sale', u'Lookbook', u'Features']:continue
            url = menu.select("a/@href").extract()[0]
            yield Request(url, meta={'category':title}, callback=self.list_categories)
    
    def list_categories(self, response):
        hxs = HtmlXPathSelector(response)
        for cat in hxs.select("//ul[@id='featNav']//a"):
            title = cat.select("text()").extract()[0]
            try:
                print title.encode(), 'list_categories'
            except:pass
            url = self.URL + cat.select("@href").extract()[0]
            yield Request(url, meta={'category': u"%s - %s" %(response.request.meta['category'],
                                                              title.strip())},
                          callback=self.list_subcategories)
    
    def list_subcategories(self, response):
        hxs = HtmlXPathSelector(response)
        category = response.request.meta['category']
        for subcat in hxs.select("//div[@class='block_search_filter']//li[contains(@class, 'category')]/a"):
            title = subcat.select("text()").extract()[0].strip()
            try:
                print title.encode(), 'list_subcategories'
            except:pass
            if title==u'View All':continue
            url = 'http://www.topshop.com/webapp/wcs/stores/servlet/' +\
                    subcat.select("@href").extract()[0]
            yield Request(url, meta={"category":u"%s - %s" %(category, title.strip())},
                          callback=self.list_items)
    
    def list_items(self, response):
        hxs = HtmlXPathSelector(response)
        category = response.request.meta['category']
        for item in hxs.select("//ul[@class='product']"):
            url = item.select(".//a/@href").extract()[0]
            yield Request(url, meta={'category':category},
                          callback=self.get_item)
        next_page = hxs.select("//li[@class='show_next']/a/@href").extract()
        if next_page:
            yield Request('http://www.topshop.com/webapp/wcs/stores/servlet/'+next_page[0],
                          meta={'category':category}, callback=self.list_items)
    
    def get_item(self, response):
        hxs = HtmlXPathSelector(response)
        l = XPathItemLoader(ClothItem(),hxs)
        l.add_value('category', [response.request.meta['category']])
        l.add_value('url', [unicode(response.url)])
        l.add_xpath('title', "//h1/text()", TakeFirst())
        l.add_xpath('price', "//li[@class='product_price']/span/text()", TakeFirst())
        l.add_value('description', [u'\r\n'.join([i.strip() for i in 
                            hxs.select("//p[@class='product_description']/text()").extract()
                            if i.strip()])])
        l.add_xpath('facebookURL', "//like/@href", TakeFirst())
        images = hxs.select("//script/text()").re(r"images: (\d+?)")[0]
        image_id = hxs.select("//script/text()").re(r'code: "(.+?)"')[0]
        l.add_value('image1', "http://media.topshop.com/wcsstore/TopShop/images/catalog/%s_large.jpg" %image_id)
        for i in range(1, int(images)):
            try:
                l.add_value('image%s' %str(i+1),
                ['http://media.topshop.com/wcsstore/TopShop/images/catalog/%s%slarge.jpg' %(
                                        image_id, '_%s_' %str(i+1))])
            except:continue
        return l.load_item()
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        