from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector

from torrentbit.items import TorrentbitItem

class torrentbitSpider(CrawlSpider):
   name = "torrentbit"
   allowed_domains = ["torrentbit.net"]
   start_urls = [
	   #"http://www.torrentbit.net/cat/Movies/subcat/Action/"
	   "http://www.torrentbit.net/cat/Movies/?list=all&page=1",
"http://www.torrentbit.net/cat/Anime%20/%20Hentai/?list=all",
"http://www.torrentbit.net/cat/Music/?list=all",
"http://www.torrentbit.net/cat/Games/?list=all",
"http://www.torrentbit.net/cat/Books/?list=all",
"http://www.torrentbit.net/cat/Apps/?list=all",
"http://www.torrentbit.net/cat/Series%20/%20TV%20Shows/?list=all",
"http://www.torrentbit.net/cat/Pictures/?list=all",
"http://www.torrentbit.net/cat/Other/?list=all",
"http://www.torrentbit.net/cat/Adult/?list=all",
"http://www.torrentbit.net/cat/iPod/?list=all",	   
   ]
   rules = (
	   #http://www.torrentbit.net/cat/Movies/subcat/Action/?page=2
       Rule(SgmlLinkExtractor(allow=['\?list=all\&page=[0-9]+'] )
							,'parse_item'),
	   #http://www.watchcartoononline.com/anime/avatar-the-last-airbender-book-1-water
	   #Rule(SgmlLinkExtractor(#allow=['anime\/[a-zA-Z0-9\-\%]+'] 
							#restrict_xpaths=["//div[@id='sidebar']/table[1]//div[@class='menustyle']//ul/li"])),
	   #"http://www.watchcartoononline.com/cartoon-list"
	   #Rule(SgmlLinkExtractor(allow=['cartoon-list'],restrict_xpaths=["//div[@id='ddmcc_container']"] )),
	   
   )
   def parse_item(self, response):
		hxs = HtmlXPathSelector(response)
		url = response.url		
		title = hxs.select("//td[@class='title']/a/text()").extract()		
		turl= hxs.select("//td[@class='title']/a/@href").extract()
		addedDate = hxs.select("//td[@class='firstcol']/span/text()").extract()
		size= hxs.select("//td[@class='size']/text()").extract()		
		seeds =hxs.select("//tr/td[6]/span/text()").extract()
		leaches = hxs.select("//tr/td[7]/span/text()").extract()
		dl =hxs.select("//tr/td[8]/span/text()").extract()
		subcat =hxs.select("//tr/td[9]/text()").extract()		
		items = []
		i =0
		while i < len(turl):
			item = TorrentbitItem()
			item['url'] = url
			item['turl'] = turl[i]
			item['title'] = title[i]			
			item['addedDate'] = addedDate[i]
			item['size'] = size[i]
			item['seeds'] = seeds[i]
			item['leaches'] = leaches[i]
			item['dl'] = dl[i]			
			item['subcat'] =subcat[i]						

			items.append(item)
			i = i + 1
		return items