# -*- coding: UTF-8 -*-
# Spider for 'asos.com'
#Author: Denis <sinedone@gmail.com>,

from urlparse import urljoin

from scrapy.spider import BaseSpider
from scrapy.http import  Request
from scrapy.selector import HtmlXPathSelector
from littlecrawler.items import ScrapedItem

class Netaporter(BaseSpider):
    
     # To run this scrapy crawl riverisland --set FEED_URI=/Sitemaps/riverisland.json --set FEED_FORMAT=json


    name = 'riverisland'
    start_urls = ['http://www.riverisland.com/Online/men',
                  'http://www.riverisland.com/Online/women',
                  ]
    URL = 'http://www.riverisland.com'
    visited = set()
    
    def parse(self, response):
        page = HtmlXPathSelector(response)
        
        for url in page.select("//div[@class='options']//div//ul//@href").extract():
            if url not in self.visited and not url.startswith('http://www.riverisland.com'):
                self.visited.add(url)
                yield Request(self.URL+url, callback=self.parse)
        else:
            for i in self.list_items(response):
                yield i
                
                
    def list_items(self, response):
        page = HtmlXPathSelector(response)
        for url in page.select('//div[@class="productsContainer"]//@href').extract():
            if url not in self.visited:
                self.visited.add(url)
                yield Request(url, callback = self.extract_data, encoding = 'latin-1')
                     
        next_page = page.select('//div[@class="pages"]//ul//li//a[contains(text(), "next")]//@href').extract()
        if next_page:
            yield Request(urljoin(response.url, next_page[0]), callback=self.list_items, encoding = 'latin-1')

            
    def extract_data(self, response):
        productPage = HtmlXPathSelector(response)
        
        source = 'River Island' 
        sex = productPage.select('//div[@id="breadcrumbTrail"]//ul//li//text()').extract()[1]
        type =  productPage.select('//div[@id="breadcrumbTrail"]//ul//li//text()').extract()[2]
        
        if len(productPage.select('//div[@id="breadcrumbTrail"]//ul//li//text()').extract()) >= 3:
            category = productPage.select('//div[@id="breadcrumbTrail"]//ul//li//text()').extract()[3]
        else:
            category = None
        
        name = productPage.select('//h1[@class = "productName"]//text()').extract()[0].strip()

        
        if len(productPage.select('//p[@class = "price"]//text()').extract()) == 1:
            price =  productPage.select('//p[@class = "price"]//text()').extract()[0].strip()
        else:
            price = None
        
        image1 = productPage.select('//div[@id = "productInteractions"]//div[@id="productPic"]//@src').extract()
        
        if len(productPage.select('//div[@id="productInfo"]//p//text()')) > 1:
            description =  productPage.select('//div[@id="productInfo"]//p//text()').extract()[0].strip()
        else:
            description = None
        
        designer =  'river island'
        
        item = ScrapedItem()
        item['sex'] = sex;
        item['name'] = name;
        item['price'] = price;
        item['image1'] = image1;
        item['source'] = source;
        item['category1'] = category;
        item['description'] = description;
        item['designer'] = designer;
        item['type'] = type;
        item['facebookURL'] = response.url;
        item['url'] = response.url;
        item['image2'] = None;
        item['image3'] = None;
        item['image4'] = None;
        yield item
        