from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector

from quotes.items import QuotesItem

class Spider(CrawlSpider):
   name = "brainyquote.com"
   allowed_domains = ["brainyquote.com"]
   start_urls = [
       "http://www.brainyquote.com/quotes/authors/a/albert_einstein.html"
	   #"http://www.brainyquote.com/quotes/a.html"
	   #"http://www.brainyquote.com/quotes/x.html"
	   
   ]
   rules = (
       # Extract links matching 'item.php' and parse them with the spider's method parse_item
       Rule(SgmlLinkExtractor(allow=['quotes\/authors\/[a-zA-Z]\/[a-zA-Z\_]+\d+\.html'] ),'parse_item'),
	   #http://www.brainyquote.com/quotes/authors/a/aaliyah.html
	   Rule(SgmlLinkExtractor(allow=['quotes\/authors\/[a-zA-Z]\/[a-zA-Z\_]+\.html'] )),
   )
   def parse_item(self, response):
		hxs = HtmlXPathSelector(response)
		quotes = hxs.select("//span[@id='aptureStartContent']/span[@class='body']/text()").extract_unquoted()
		authors = hxs.select("//span[@id='aptureStartContent']/span[@class='bodybold']/a/text()").extract_unquoted()
		items = []
		i =0
		while i < len(quotes):
			item = QuotesItem()
			item['quote'] = quotes[i]
			item['author'] = authors[i]
			items.append(item)
			i = i + 1
		return items
