# -*- coding: UTF-8 -*-
# Spider for 'accessorize.com'
#Author: Denis <sinedone@gmail.com>,

from urllib2 import unquote
from collections import defaultdict


from scrapy.spider import BaseSpider
from scrapy.http import  Request
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.loader import XPathItemLoader
from scrapy.contrib.loader.processor import TakeFirst, Join
from scrapy.shell import inspect_response
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
from scrapy.project import crawler
from scrapy import log
from scrapy.http.cookies import CookieJar
from scrapy.contrib.downloadermiddleware.cookies import CookiesMiddleware

from asos.items import ClothItem

class Accessorize(BaseSpider):

    name = 'accessorize'
    start_urls = ['http://www.accessorize.com/en/restofworld/page/home/']
    URL = 'http://www.accessorize.com'
    syncRequests = []
    
    def __init__(self):
        super (Accessorize,self).__init__()
        dispatcher.connect(self.spider_idle, signal = signals.spider_idle)
    
    def spider_idle(self,spider):
        if spider == self and self.syncRequests:
            for mid in crawler.engine.downloader.middleware.middlewares:
                if isinstance(mid, CookiesMiddleware):
                    mid.jars = defaultdict(CookieJar)
            self.log('Spider is idle with '+str(len(self.syncRequests))+\
                     ' sync requests in the queue.', level = log.INFO)
            crawler.engine.crawl(self.syncRequests.pop(0),self)
            
    def parse(self, response):
        hxs = HtmlXPathSelector(response)
        for menu in hxs.select("//ul[@id='singlecolumn']/li")[:-2]:
            menu_title = menu.select("a/span/text()").extract()[0]
            if 'Sale' in menu_title:continue
            for submenu in menu.select(".//li/a"):
                entry = submenu.select("text()").extract()[0]
                if (' Sale' in entry) or ('New In' in entry) or ('All ' in entry):
                    continue
                self.syncRequests.append(Request(submenu.select("@href").extract()[0], priority=3,
                              meta={'category': u"%s - %s" %(menu_title, entry)},
                              callback=self.list_items))
    
    def list_items(self, response):
        hxs = HtmlXPathSelector(response)
        category = response.request.meta['category']
        if 'The page you are looking for is currently unavailable.' in response.body:
            req = response.request
            req = req.replace(dont_filter=True, priority=1)
            self.log("retrying %s" %response.url, log.ERROR) 
            return req
        reqs = []
        for url in hxs.select("//div[@class='prods']//form//div[@class='image']/a/@href").extract():
            reqs.append(Request(url, meta={'category':category}, priority=1, callback=self.get_item))
        next_page = hxs.select("//span[@class='pagnNext']/a/@href").extract()
        if next_page:
            reqs.append(Request(self.URL+next_page[0], meta={'category':category},
                          priority=3, callback=self.list_items))
        return reqs
    
    
    def get_item(self, response):
        hxs = HtmlXPathSelector(response)
        l = XPathItemLoader(ClothItem(), hxs)
        l.add_value('url', [unicode(response.url)])
        l.add_value('category', [response.request.meta['category']])
        l.add_xpath('title', "//h1/text()", TakeFirst())
        l.add_xpath('price', "//span[contains(@id, 'PriceDiv_')]/text()", TakeFirst())
        l.add_value('description', [u'\r\n'.join([i.strip() for i in 
                            hxs.select("//div[@class='tabcolleft']//text()").extract()
                            if i.strip()])])
        l.add_xpath('facebookURL', "//like/@href", TakeFirst())
        for i, image in enumerate(hxs.select("//script/text()").re(r'"(http://.+?_L\.jpg)"')[:3]):
            l.add_value('image%s' %str(i+1), image)
        
        return l.load_item()
        
            
