print """Pirate Bay Torrent Downloader - Command Line Interface
Program Written by: Daniel Folkes
website: http://danfolkes.com
email: danfolkes @t gmail dot c0m
written and tested in: Python 2.6.6

Props: http://www.hellboundhackers.org/articles/841-using-python-39;s-htmlparser-class.html

        
    - Usage -
    -a[n] = auto downloads first [n] results (default:10)
    -o[/path/] = path that points to the folder where .torrent's go (default, current dir)
	-t[trackerurl] = the tracker search url (default: http://btjunkie.org/search?c=1&o=52&t=1&q=)
	-f[feedurl] = RSS Feed with title links (example: http://feeds2.feedburner.com/PitchforkBestNewAlbums)
	-c[allowed chars] = Characters that are not stripped in RSS Titles (default: "abcd...xyzABC..XYZ1234567890 " example: http://feeds2.feedburner.com/PitchforkBestNewAlbums)
	-d[disallowed chars] = Characters that are stripped in RSS Titles (default: example: http://feeds2.feedburner.com/PitchforkBestNewAlbums)
	-s[strip] = Will call the strip command on the RSS Titles (default: True)
	-h[n] = auto searches newest [n] results (default:1)
"""


import urllib
import sys
from elementtree import ElementTree
from HTMLParser import HTMLParser
from HTMLParser import HTMLParseError
import re

autodownload = 0
outputpath = "./"
trackerurl = "http://btjunkie.org/search?c=1&o=52&t=1&q="
feedurl = ""
ignoredargs = ""
charsallowed = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890 "
charsdisallowed = ""
striptitle = True
autosearch = 1


url = 'http://thepiratebay.org/search.php?q='
url = 'http://btjunkie.org/search?c=1&o=52&t=1&q='

for arg in sys.argv[1:]:
	if arg[:2] == "-a":
		autodownload = int(arg[2:len(arg)])
	elif arg[:2] == "-o":
		outputpath = arg[2:len(arg)]
	elif arg[:2] == "-t":
		trackerurl = arg[2:len(arg)]
	elif arg[:2] == "-f":
		feedurl = arg[2:len(arg)]
	elif arg[:2] == "-c":
		charsallowed = arg[2:len(arg)]
	elif arg[:2] == "-d":
		charsdisallowed = arg[2:len(arg)]
	elif arg[:2] == "-s":
		striptitle = arg[2:len(arg)]
	elif arg[:2] == "-h":
		autosearch = int(arg[2:len(arg)])
	else:
		ignoredargs += "|"+arg
		
print "autodownload: " , autodownload
print "outputpath:   " , outputpath
print "trackerurl:   " , trackerurl
print "feedurl:      " , feedurl
print "charsallowed: " , charsallowed
print "charsdisalwed:" , charsdisallowed

		
if ignoredargs:
	print ignoredargs

class Spider(HTMLParser): 
	def __init__(self, url): 
		HTMLParser.__init__(self) 
		req = urllib.urlopen(url) 
		self.feed(req.read())

		
	def handle_starttag(self, tag, attrs): 
		try:
			if tag == 'a' and attrs: 
				print "Found link => %s" % attrs[0][1]
		except HTMLParseError:
			print "error"
		
	
class ElementWrapper:
    def __init__(self, element):
        self._element = element
    def __getattr__(self, tag):
        if tag.startswith("__"):
            raise AttributeError(tag)
        return self._element.findtext(tag)
class RSSWrapper(ElementWrapper):
    def __init__(self, feed):
        channel = feed.find("channel")
        ElementWrapper.__init__(self, channel)
        self._items = channel.findall("item")
    def __getitem__(self, index):
        return ElementWrapper(self._items[index])
		
rssfeed = "http://feeds2.feedburner.com/PitchforkBestNewAlbums"	
rsstitles = []
if len(rssfeed) > 0:
	tree = ElementTree.parse(urllib.urlopen(rssfeed))
	feed = RSSWrapper(tree.getroot())
	i = 0	
	for item in feed:
		if i < autosearch:
			i += 1
			titleo = item.title
			title = ""
			for s in titleo:
				if charsallowed.find(s) > -1:
					title += s
			if striptitle:
				title = title.strip()
			rsstitles.append(title)
			
for title in rsstitles:
	print "Searching ["+trackerurl+"] for: " + str(title)
	srchurl = trackerurl + str(title)
	resp = urllib.urlopen(srchurl).read()
	#print resp
	for m in re.finditer('href="(.*)"', resp):
		if m.group(0).find(".torrent") > -1:
			print '%02d-%02d: %s' % (m.start(), m.end(), m.group(0))
	#tree = TidyHTMLTreeBuilder.parse(resp)
	# p = tree.xpath("\\a")
	# links = list(p.iter("a"))
	# for i in p:
		# if i.find(".torrent") > -1:
			# print i
	
# o = 0;
# for ar in searchFor:
    # if o != 0:
        # url+="%20?"    
    # url+=str(ar)
    # o +=1
# print "URL: " + url
# req = urllib2.Request(url)
# response = urllib2.urlopen(req)
# the_page = response.read()

# parser = URLLister()
# parser.feed(the_page)
# parser.close()
# i=0
# results = 15
# listing = []
# for urls in parser.urls: 
	# if len(listing) < results:
		# if urls.count(".torrent"):
			# print len(listing), urls
			# listing.append(urls)

# if autoprint==0:
    # download = raw_input("Which ones to download?(sep by commas) :").split(',')
# else:
    # download = range(0,autoprint)
# print download
# for j in download:
	# g = int(j)
	# if (g < results) and (g >=0):
		# #- Strip Characters Off
		# localPath = listing[g]
		# for c in ".:/%!@#$^&*()_-=+~`[]{}|;',.":
			# localPath = localPath.replace(c,"")
		# localPath = localPath[(len(localPath)/2):]
		# localPath += ".torrent"
		# if outgoing:
			# localPath = outgoing+localPath
		# #-
		# print "Downloading: "+ listing[g] + " -> " + str(localPath)
		# open(localPath, 'wb').write(urllib2.urlopen(listing[g]).read())
print "...Download Complete" 
