import re
import threading
import urllib

from BeautifulSoup import BeautifulSoup

import datamodel
import language
import tvdb
import notificator

class Bot(threading.Thread):

    #only support ten seasons in literal, to be improved later
    literalNumber = {'one':1, 'two':2, 'three':3, 'four':4, 'five':5, 'six':6, 'seven':7, 'eight':8, 'nine':9, 'ten':10}
    numberLiteral = {1:'one', 2:'two', 3:'three', 4:'four', 5:'five', 6:'six', 7:'seven', 8:'eight', 9:'nine', 10:'ten'}

    def __init__(self, search=None, serie=None):
        threading.Thread.__init__(self)
        self._browser = Browser()
        self._notif = notificator.Notificator()
        self._search = search
        self._serie = serie
        self._number = True
        self._lang = language.setup()
        self._check_new_data = False

    def run(self):
        try:
            link = self._search_google()
            #unable to parse IMDB page with BeautifulSoup
            self._explore_wikipedia(link)
            if self._actual == 0:
                self._notif.error_message(self._serie)
            elif not self._check_new_data:
                self._notif.show_message(self._serie.upper(), self._lang['calSave'])
            self._browser.close()
        except:
            self._notif.error_message(self._serie)

    def _search_google(self):
        #Search for Wikipedia or IMDB link
        link = None
        soup = self.read_page(self._search)
        alinks = soup.findAll('a', href=re.compile('^http://en.wikipedia(.)+\\episodes(.)*', re.IGNORECASE))
        if len(alinks) == 0:
            alinks = soup.findAll('a', href=re.compile('http://en.wikipedia(.)+\\episodes(.)*', re.IGNORECASE))
            link = str([a['href'] for a in alinks][0])
            link = link[7:link.find('&')]
        else:
            link = [a['href'] for a in alinks][0]
        return link

    def _explore_wikipedia(self, wiki):
        #look for the last season
        print 'Reading: ' + wiki
        soup = self.read_page(wiki)
        table = soup.find('table', id='toc')
        seasonsNumeric = []
        seasons = table.findAll(text=re.compile('^(Season|Series) (\d)+'))
        if seasons == []:
            seasons = table.findAll(text=re.compile('^(Season|Series) (\w)+'))
            for season in seasons:
                n = season.find(':')
                if n == -1:
                    pat = re.compile('^(Season|Series) (\w)+\ ')
                    num = pat.search(season).group().split(' ')[1]
                    seasonsNumeric.append(Bot.literalNumber[num.lower()])
                else:
                    seasonsNumeric.append(Bot.literalNumber[str(sea[7:n]).lower()])
            self._number = False
        else:
            for season in seasons:
                n = season.find(':')
                if n == -1:
                    pat = re.compile('^(Season|Series) (\d)+')
                    num = pat.search(season).group().split(' ')[1]
                    seasonsNumeric.append(int(num))
                else:
                    seasonsNumeric.append(int(season[7:n]))
        seasonsNumeric.sort()
        self._actual = seasonsNumeric[-1]
        self._obtain_calendar_wiki(str(soup))

    def _obtain_calendar_wiki(self, html):
        while self._actual > 0:
            try:
                #find Season section
                pat = re.compile('<h.+\ (Season|Series) ' + str(self._actual) + '.+\</h', re.IGNORECASE)
                if not self._number:
                    pat = re.compile('<h.+\ (Season|Series) ' + str(Bot.numberLiteral[self._actual]) + '.+\</h', re.IGNORECASE)
                    self._number = True
                pos = html.find(pat.search(html).group(0))
                #find Table from Season
                posNextTitle = html.upper().find('<H', pos+5)
                if posNextTitle != -1:
                    posTableBegin = html.upper().find('<TABLE', pos, posNextTitle)
                else:
                    posTableBegin = html.upper().find('<TABLE', pos)
                posTableEnd = html.upper().find('</TABLE', pos) + 8
                soup = BeautifulSoup(html[posTableBegin:posTableEnd])
                #read table header
                trs = soup.findAll('tr') #Rows
                pos = 0
                ths = trs[pos].findAll('th')    #Headers
                while len(ths) < 2:
                    pos += 1
                    ths = trs[pos].findAll('th')
                posTitle = [n for n in range(len(ths)) if str(ths[n]).upper().find('TITLE') > -1
                                or str(ths[n]).upper().find('EPISODE') > -1]
                posTitle = int(posTitle[0])
                posDate = [n for n in range(len(ths)) if str(ths[n]).upper().find('DATE') > -1]
                posDate = int(posDate[0])
                #read table data
                calendars = []
                nroEp = 1
                patDate = re.compile('(\w)+\ (\d){,2}, (\d){4}')
                patTitle = re.compile('(")*((\w)+(\s)*)+')
                for tr in trs:
                    td = tr.findAll('td')
                    if len(td) >= posDate:
                        w = patDate.search(str(td[posDate]))
                        if w is None:
                            patDate = re.compile('(\d){1,2} (\w)+\ (\d){4}')
                            w = patDate.search(str(td[posDate]))
                        strDate = ''
                        strTitle = ''
                        if w is not None:
                            strDate = str(w.group())
                        elif len(calendars) == 0:
                            raise StandardError()
                        else:
                            break
                        next_ = td[posTitle].next
                        while patTitle.match(str(next_)) is None:
                            next_ = next_.next
                        strTitle = unicode(str(next_))
                        cal = datamodel.Calendar(self._serie, strTitle, self._actual, nroEp, strDate)
                        nroEp += 1
                        calendars.append(cal)
                db = tvdb.TvDB()
                db.store_calendar(calendars)
                self._actual = -1
            except:
                self._actual -= 1

    def read_page(self, link):
        page = self._browser.open(link)
        soup = BeautifulSoup(page.read())
        return soup

    def link_imdb(self, serie):
        self._search = 'http://www.google.com/search?hl=en&q='+serie+'+imdb'
        #Search for IMDB link        
        soup = self.read_page(self._search)
        alinks = soup.findAll('a', href=re.compile('^http://www.imdb.com/title/.+'))
        link = [a['href'] for a in alinks]
        return link[0]

    def obtain_image(self):
        self._search = 'http://images.google.com/images?q=' + self._serie + '+serie'
        page = self._browser.open(self._search)
        page = page.read()
        posBegin = page.find('<div style="display:none')
        posEnd = page.find('</div>', posBegin) + 6
        html = page[posBegin:posEnd]
        soup = BeautifulSoup(html)
        imgs = soup.findAll('img')
        content = self._browser.open(str(imgs[0]['src']))
        return content.read()


class Browser(urllib.FancyURLopener):
    version = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'
