# -*- coding:utf-8 -*-

__author__ = 'nameoff'

from BeautifulSoup import BeautifulSoup
from models import Entry
import httplib2
import threading

ListOfItem = []

class MultiThreadParser():
	def __init__(self, searchString):
		self.searchString = searchString

	def startThreads(self):
		global ListOfItem
		ListOfItem = []
		rutorT = ThreadRutor(self.searchString)
#		opensT = ThreadOpenSharing(self.searchString)
		torreT = ThreadTorrentino(self.searchString)
		rutorT.start()
#		opensT.start()
		torreT.start()

		alive = rutorT.isAlive()
		if alive:
			alive = rutorT.isAlive()

#		alive = opensT.isAlive()
#		if alive:
#			alive = opensT.isAlive()

		alive = torreT.isAlive()
		if alive:
			alive = torreT.isAlive()

		return ListOfItem

class ThreadRutor(threading.Thread):
	def __init__(self, searchString):
		self.searchString = searchString
		threading.Thread.__init__(self)

	def run(self):
		Parser().ParseRutor(self.searchString)

class ThreadOpenSharing(threading.Thread):
	def __init__(self, searchString):
		self.searchString = searchString
		threading.Thread.__init__(self)

	def run(self):
		Parser().ParseOpenSharing(self.searchString)

class ThreadTorrentino(threading.Thread):
	def __init__(self, searchString):
		self.searchString = searchString
		threading.Thread.__init__(self)

	def run(self):
		Parser().ParseTorrentino(self.searchString)

class Parser():

	def CutString(self, string, mode = 0):
		if mode == 0:
			newstring = ""
			for i in string:
				if i != "/" and i != "|":
					newstring = newstring + i
				if i == "/" or i == "|":
					break
			return newstring
		elif mode == 1:
			newstring = ""
			for i in string:
				if i == " ":
					newstring = newstring + "+"
			return newstring

		return newString

	def ParseRutor(self, title):
		error = 1
		global ListOfItem
		while error == 1:
			
			try:
				h = httplib2.Http()
				response, content = h.request(('http://rutor.org/search/%s' % title).replace(' ', '+'))
				html_file = content
			except:
				error = 1

			count = 0

			if html_file.find('Результатов поиска 0') > 0:
				return 0, 'nothing'

			try:
				soup = BeautifulSoup(html_file)
				count = int(len(soup.findAll("tr", { "class" : "gai" })))
				error = 0
			except:
				error = 1

			i=0

			#ListOfItem = []

			while i < count:
				rows = soup.findAll("tr", {"class": "gai"})[i]
				seed = rows.findAll("span", {"class": "green"})
				cols = rows.findAll("td")
				entry = Entry()
				entry.seed = seed[0].contents[1]
				#entry.date = cols[0].string
				entry.link = 'http://rutor.org' + cols[1].contents[0]['href']
				#entry.title = self.CutString(cols[1].contents[3].string)
				entry.title = cols[1].contents[3].string
				if cols[3].string == None:
					entry.size = cols[2].string
				else:
					entry.size = cols[3].string
				ListOfItem.append(entry)
				i+=1
		#return 1, ListOfItem

	def ParseTorrentino(self, title):
		error = 1
		global ListOfItem
		while error == 1:

			try:
				h = httplib2.Http()
				response, content = h.request(('http://www.torrentino.com/search?kind=0&search=%s' % title).replace(' ', '+'))
				html_file = content
			except:
				error = 1

			count = 0

			try:
				soup = BeautifulSoup(html_file)
				rang = soup.find("div", {"class": "results"})
				count = int(len(rang.findAll("li")))
				error = 0
			except:
				error = 1

			i = 1

			while i < count-14:
				rows = rang.findAll("li")[i]
				entry = Entry()

				hh = rows.findAll("h4", {"class": "h4 col1"})
				entry.title = hh[0].contents[1].string
				### TO DO
				#entry.link = "http://torrentino.com" + hh[0].contents[0]['href'] + "/download"
				entry.link = "http://3.firstappz.appspot.com/?d=1&l=%s" % hh[0].contents[1]['href']
				#entry.link = "http://127.0.0.1:8080/?d=1&l=%s" % hh[0].contents[0]['href']

				hh = rows.findAll("p", {"class": "size"})
				entry.size = hh[0].contents[0].string

				hh = rows.findAll("span", {"class": "s"})
				entry.seed = hh[0].contents[0].string

				ListOfItem.append(entry)

				i += 1

	def ParseOpenSharing(self,title):
		error = 1
		global ListOfItem
		while error == 1:
			try:
				h = httplib2.Http()
				response, content = h.request(('http://opensharing.org/c.php?search=%s&make_search=1&method=0' % title).replace(' ', '+'))
				html_file = content
			except:
				error = 1

			count = 0
			cpo = False

			if html_file.find('К сожалению, по вашему запросу ничего не найдено') > 0:
				return 0, 'nothing'

			try:
				soup = BeautifulSoup(html_file)
				count = int(len(soup.findAll("tr", { "class" : "gai" })))
				cpo = int(len(soup.findAll("tr",{"class":"gai"}))) > int(len(soup.findAll("tr",{"class":"tum"})))
				error = 0
			except:
				error = 1

			i=0

			#ListOfItem = []

			while i < count:
				rows = soup.findAll("tr", { "class" : "gai" })[i]
				cols = rows.findAll("td")
				if (cpo == True):
					if(count - 1 != i):
						rowst = soup.findAll("tr",{"class":"tum"})[i]
						colst = rowst.findAll("td")
					else:
						rowst = soup.findAll("tr",{"class":"tum"})[i]
						colst = rowst.findAll("td")

				entry = Entry()
				entry.date = cols[0].string
				entry.link = 'http://opensharing.org' + cols[1].contents[2]['href']
				entry.title = self.CutString(cols[1].contents[4].text)
				try:
					entry.size = cols[4].string
				except:
					entry.size = cols[5].string
				ListOfItem.append(entry)

				if(cpo == True):
					if(count-1 != i):
						entry = Entry()
						entry.date = colst[0].string
						entry.link = 'http://opensharing.org' + colst[1].contents[2]['href']
						entry.title = self.CutString(colst[1].contents[4].text)
						try:
							entry.size = colst[4].string
						except:
							entry.size = colst[5].string
						ListOfItem.append(entry)
					else:
						entry = Entry()
						entry.date = colst[0].string
						entry.link = 'http://opensharing.org' + colst[1].contents[2]['href']
						entry.title = self.CutString(colst[1].contents[4].text)
						try:
							entry.size = colst[4].string
						except:
							entry.size = colst[5].string
						ListOfItem.append(entry)
				i+=1
		#return 1, ListOfItem
