import re

from BeautifulSoup import BeautifulSoup

from datamodel import Calendar, Downloads
import bot
import tvdb

class Downloader(object):

    torrentPage = 'http://ezrss.it/search/?mode=simple&show_name='
    filestubePage = 'http://www.filestube.com/search.html?q='
    hostings = {'All':0, 'Megaupload':3, 'Rapidshare':1, 'Hotfile':27, 'Mediafire':15}

    def __init__(self):
        self.links = None
        self.source = ''

    def obtain_links(self, cal, direct=False, host='All', format='all'):
        self.robot = bot.Bot('', '')
        #Search for Torrents or Direct Download
        search = cal.serie.replace(' ', '+')
        if not direct:
            #Torrent
            self.obtain_torrent(cal, search)
        else:
            #Direct Download
            hosting = Downloader.hostings[host]
            self.obtain_direct(cal, search, hosting, format)
        value = None
        if self.links is not None:
            value = tvdb.store_download(cal.serie, self.links, direct, host, format, self.source)
            self.links = None
            self.source = ''
        return value
        
    def obtain_torrent(self, c, search):
        soup = self.robot.read_page(Downloader.torrentPage + search)
        table = soup.findAll('table')
        soup = BeautifulSoup(str(table[0]))
        tr = soup.findAll('tr')
        del tr[0]
        for t in tr:
            td = t.findAll('td')
            if len(tr) < 2:
                search = search.replace('+', '')
                temp = c.serie
                c.serie = search
                self.obtain_torrent(c, search)
                c.serie = temp
                break
            pat = re.compile(c.serie + '.*', re.IGNORECASE)
            if pat.match(str(td[1].find('a').string)):
                pat = re.compile('\d+x\d+')
                episode = pat.search(str(td[1].find('a').nextSibling.string)).group()
                sea = int(episode[0:episode.find('x')])
                epi = int(episode[(episode.find('x')+1):])
                if sea == c.season and epi >= c.nroEp:
                    self.links = str(td[1].find('a')['href'])
                    break
                    
    def obtain_direct(self, c, search, host, format):
        epi = '0'+str(c.nroEp) if c.nroEp < 10 else str(c.nroEp)
        sea = '0'+str(c.season) if c.season < 10 else str(c.season)
        search = Downloader.filestubePage + search + '+s'\
                + sea + 'e' + epi + '&select=' + format
        if host != 0:
            search += '&hosting=' + str(host)
        try:
            soup = self.robot.read_page(search)
            divs = soup.findAll('div', id="newresult")
            word = ''
            for w in c.serie.split(' '):
                if len(w) > len(word):
                    word = w
            for div in divs:
                link = div.find('a', href=re.compile('.*'+word+'.*', re.IGNORECASE))
                if link is not None:
                    link = link['href']
                    soup = self.robot.read_page(link)
                    ddLinks = soup.find('pre', id="copy_paste_links")
                    if ddLinks is None:
                        continue
                    ddLinks = ddLinks.next
                    ddLinks = ddLinks.replace('\n', ',')
                    toFind = '<span>Link(s) source:</span>'
                    pos = str(soup).find(toFind) + len(toFind)
                    posEnd = str(soup).find('</div>', pos) + 6
                    html = str(soup)[pos:posEnd]
                    soup = BeautifulSoup(html)
                    passw = soup.find('a')['href']
                    self.links = ddLinks
                    self.source = passw
                    break
        except IndexError:
            pass
