from scrapy.utils.url import urljoin_rfc 
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector

from extremefunnypictures.items import ExtremefunnypicturesItem

class ExtremeFunnyPicturesSpider(CrawlSpider):
   name = "efc"
   allowed_domains = ["extremefunnypictures.com"]
   start_urls = [
       "http://www.extremefunnypictures.com/"
	   
   ]
   rules = (
       #http://www.extremefunnypictures.com/?&page=2
       Rule(SgmlLinkExtractor(allow=['\/\?\&page=\d+'] ),'parse_item'),
	   
   )   
   
   def parse_item(self, response):
		myprefix ="http://www.extremefunnypictures.com/"	
		hxs = HtmlXPathSelector(response)
		rurls = hxs.select("//img[@id='photo']/@src").extract()
		#urls = []
		#for uri in rurls:
		#	urls.append(urljoin_rfc(response.url, uri)) 
		urls=rurls
		alts = hxs.select("//img[@id='photo']/@alt").extract()
		items = []
		i =0
		while i < len(urls):
			item = ExtremefunnypicturesItem()
			item['url'] = urls[i]
			item['alt'] = alts[i]
			items.append(item)
			i = i + 1
		
		return items
