from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector

from torrentzap.items import TorrentzapItem
import re
class torrentzapSpider(CrawlSpider):
   name = "torrentzap"
   allowed_domains = ["torrentzap.com"]
   start_urls = [
	   "http://www.torrentzap.com/clean-browse-torrents/4/added/1",
	   "http://www.torrentzap.com/clean-browse-torrents/1/added/1",
	   "http://www.torrentzap.com/clean-browse-torrents/7/added/1",
	   "http://www.torrentzap.com/clean-browse-torrents/8/added/1",
	   "http://www.torrentzap.com/clean-browse-torrents/9/added/1",
	   "http://www.torrentzap.com/clean-browse-torrents/6/added/1",
	   "http://www.torrentzap.com/clean-browse-torrents/5/added/1",
	   "http://www.torrentzap.com/clean-browse-torrents/3/added/1",
	   "http://www.torrentzap.com/clean-browse-torrents/2/added/1",
  
   ]
   rules = (
   
	   #/clean-browse-torrents/4/added/4
       Rule(SgmlLinkExtractor(allow=['\/clean\-browse\-torrents\/[0-9]+\/added\/[0-9]+'] )
							,'parse_item',follow=True),
	   #http://www.watchcartoononline.com/anime/avatar-the-last-airbender-book-1-water
	   #Rule(SgmlLinkExtractor(#allow=['anime\/[a-zA-Z0-9\-\%]+'] 
							#restrict_xpaths=["//div[@id='sidebar']/table[1]//div[@class='menustyle']//ul/li"])),
	   #"http://www.watchcartoononline.com/cartoon-list"
	   #Rule(SgmlLinkExtractor(allow=['cartoon-list'],restrict_xpaths=["//div[@id='ddmcc_container']"] )),
	   
   )
   def parse_item(self, response):
		hxs = HtmlXPathSelector(response)
		url = response.url		
		turlTitle =hxs.select("//td[@class='second']/a")
		
		addedDate = hxs.select("//td[@class='first']/text()").extract()

		size = hxs.select("//span[@id='size']/../text()").extract()
		sizeUnit = hxs.select("//span[@id='size']/text()").extract()		
		
		seeds =hxs.select("//td[@class='s']/text()").extract()
		leaches =hxs.select("//td[@class='l']/text()").extract()

		items = []
		i =0
		while i < len(size):
			item = TorrentzapItem()
			item['url'] = url
			item['turl'] = turlTitle[i].select("./@href").extract()[0]
			item['title'] = "".join(turlTitle[i].select("./text()").extract())
			item['addedDate'] = addedDate[i]
			item['size'] = size[i]+" "+sizeUnit[i]
			
			item['seeds'] = seeds[i]
			item['leaches'] = leaches[i]
			items.append(item)
			i = i + 1
		return items