__type__ = "rlslog"
__instance__ = "RlsLog"

import urllib
import urllib2
import urlparse
import logging
import re
import yaml

# this way we don't force users to install bs incase they do not want to use module http
soup_present = True
soup_err = "Module rlslog requires BeautifulSoup. Please install it from http://www.crummy.com/software/BeautifulSoup/ or from your distribution repository."

try:
    from BeautifulSoup import BeautifulSoup
except:
    logging.warning(soup_err)
    soup_present = False


class NewTorrents:
    """NewTorrents parsing utilities"""

    def __init__(self, raw_url, title):
        self.raw_url = raw_url
        self.title = title

    def request_torrent_url(self):
        """Returns torrent from either search url or download page"""
        if (self.raw_url.startswith("http://www.newtorrents.info/?q=") or self.raw_url.startswith("http://www.newtorrents.info/search")) and (self.title != None):
            #logging.debug("NewTorrents get_torrent_url using search")
            return self.__get_torrent_url_from_search(self.raw_url, self.title)
        else:
            #logging.debug("NewTorrents get_torrent_url using page")
            return self.__get_torrent_url_from_page(self.raw_url)

    # TODO: refactor parameters to use self
    
    def __get_torrent_url_from_page(self, url):
        """Parses torrent url from newtorrents download page"""
        page = urllib2.urlopen(url)
        data = page.read()
        p = re.compile("copy\(\'(.*)\'\)", re.IGNORECASE)
        f = p.search(data)
        if f==None:
            logging.debug("NewTorrents get_torrent_url_from_page failed")
            return None
        else:
            return f.groups()[0]

    def __get_torrent_url_from_search(self, url, name):
        """Parses torrent download url (requires release name) from search results"""
        page = urllib2.urlopen(url)
        soup = BeautifulSoup(page)
        torrents = []
        for link in soup.findAll('a', attrs={'href': re.compile('down.php')}):
            torrent_url = "http://www.newtorrents.info%s" % link.get('href')
            release_name = link.parent.next.get('title')
            if release_name.replace('.',' ').lower() == name.lower():
                torrents.append(torrent_url)
            else:
                logging.debug("NewTorrents rejecting search result: '%s' != '%s'" % (release_name, name))

        # choose the torrent
        if len(torrents) == 0:
            logging.debug("NewTorrents did not found any matches in search result")
            return None
        else:
            if len(torrents) == 1:
                logging.debug("NewTorrents found only one matching search result.")
            else:
                logging.debug('NewTorrents search results contains multiple matches, using first occurence from: %s' % torrents)
            return torrents[0]

class PirateBay:
    """Piratebay parsing utilities"""

    def __init__(self, raw_url, title):
        self.raw_url = raw_url
        self.title = title

    def request_torrent_url(self):
        page = urllib2.urlopen(self.raw_url)
        soup = BeautifulSoup(page)
        tag_div = soup.find("div", attrs={"class":"download"})
        tag_a = tag_div.find("a")
        torrent_url = tag_a.get('href')
        return torrent_url

class RlsLog:

    """
        Adds support for rlslog.net as a source.
        
        If rlslog entry has NewTorrents download link then torrent url is parsed from there.
        If rlslog entry has NewTorrents search link, we try to look from there if any of the results match entry title.
        On multiple NewTorrents-links per entry have unknown effects ...

        Module caches all successfull NewTorrents 'download torrent'-parses, hence module makes only one request per
        rlslog-entry to NewTorrents thus eliminating any potential DDOS effect and or bandwith wasting.

        NEW: Supports also piratebay links

        In case of movies the module supplies pre-parse IMDB-details (helps when chaining with filter_imdb).
    """

    def parse_imdb(self, s):
        score = None
        votes = None
        re_votes = re.compile("\((\d*).votes\)", re.IGNORECASE)
        re_score = [re.compile("(\d\.\d)"), re.compile("(\d)\/10")]
        for r in re_score:
            f = r.search(s)
            if f != None:
                score = float(f.groups()[0])
                break
        f = re_votes.search(s)
        if f != None:
            votes = f.groups()[0]
        logging.debug("RlsLog: parse_imdb returning score: '%s' votes: '%s' from: '%s'" % (str(score), str(votes), s))
        return (score, votes)

    def parse_rlslog(self, config):
        """Parse configured url and return releases array"""
        if not soup_present:
            logging.error(soup_err)
            return
        page = urllib2.urlopen(config['url'])
        soup = BeautifulSoup(page)
        releases = []
        for entry in soup.findAll('div', attrs={"class" : "entry"}):
            release = {}
            h3 = entry.find('h3', attrs={"class" : "entrytitle"})
            release['title'] = h3.a.string.strip()
            entrybody = entry.find('div', attrs={"class" : "entrybody"})
            if not entrybody:
                logging.debug("No entrybody")
                continue

            logging.debug("Processing title %s" % (release['title']))

            rating = entrybody.find('strong', text=re.compile('imdb rating\:', re.IGNORECASE))
            if rating != None:
                score_raw = rating.next.string
                if score_raw != None:
                    release['imdb_score'], release['imdb_votes'] = self.parse_imdb(score_raw)
            
            for link in entrybody.findAll('a'):
                link_name = link.string
                if link_name == None:
                    continue
                link_name = link_name.strip().lower()
                link_href = link['href']
                # handle imdb link
                if link_name == "imdb":
                    release['imdb_url'] = link_href.encode() # removes unicode (yaml problem)
                    score_raw = link.next.next.string
                    if not release.has_key('imdb_score') and not release.has_key('imdb_votes') and score_raw != None:
                        release['imdb_score'], release['imdb_votes'] = self.parse_imdb(score_raw)
                # handle newtorrents link
                if link_href.startswith('http://www.newtorrents.info'):
                    release['site'] = NewTorrents(link_href, release['title'])
                # handle piratebay link
                if link_href.startswith('http://thepiratebay.org'):
                    release['site'] = PirateBay(link_href, release['title'])

            # add empty imdb keys if not found
            if not release.has_key('imdb_url'):
                release['imdb_url'] = None
            if not release.has_key('imdb_score'):
                release['imdb_score'] = None
            if not release.has_key('imdb_votes'):
                release['imdb_votes'] = None

            # reject if no torrent link
            if release.has_key('site'):
                releases.append(release)
            else:
                logging.info('RlsLog: %s rejected due missing torrents-link' % (release['title']))

        return releases

    def run(self, source, patterns, config, session):

        releases = self.parse_rlslog(config)

        # now loop trough releases and see what matches
        matches = []
        for release in releases:
            #logging.debug(yaml.dump(release))
            for pattern in patterns:
                regexp = str(pattern['pattern'])
                # check if pattern matches
                if re.search(regexp, release['title'], re.IGNORECASE|re.UNICODE):
                    # initialize torrent cache and try to look from there
                    session.setdefault('urlcache', {})
                    # limit cache size, clearing has no dangerous effects, besides
                    # that all urls are fetched from sites upon clear :)
                    if len(session['urlcache']) > 1500:
                        session['urlcache'].clear()
                    torrent_url = session['urlcache'].get(release['site'].raw_url.encode(), None)
                    if torrent_url == None:
                        # find out actual torrent link from site (requests page and parses it)
                        torrent_url = release['site'].request_torrent_url()
                        # add to cache if found
                        if torrent_url != None:
                            session['urlcache'][release['site'].raw_url.encode()] = torrent_url.encode() # removes unicode (yaml problem)
                    else:
                        logging.debug('Got newtorrents url from cache %s' % torrent_url)
                    if torrent_url != None:
                        match = pattern.copy()
                        match['torrent'] = torrent_url.encode()             # removes unicode (yaml problem)
                        match['title'] = release['title'].encode()          # removes unicode (yaml problem)
                        match['imdb_url'] = release['imdb_url']
                        match['imdb_score'] = release['imdb_score']
                        match['imdb_votes'] = release['imdb_votes']
                        matches.append(match)
                    else:
                        logging.debug("RlsLog: Unable to get torrent url for '%s' from newtorrents" % (release['title']))
        return matches

if __name__ == '__main__':
    import sys
    logging.basicConfig(level=logging.DEBUG)

    """
    n = NewTorrents()
    p2 = None
    if len(sys.argv) > 2:
        p2 = sys.argv[2]
    else:
        p2 = None
    url = n.get_torrent_url(sys.argv[1], p2)
    print yaml.dump(url)

    """

    r = RlsLog()
    config = {}
    config['url'] = sys.argv[1]

    patterns = []
    pattern = {}
    pattern['pattern'] = ".*"
    patterns.append(pattern)
    matches = r.run("source", patterns, config, {})
    print yaml.dump(matches)
