"""
This module scans HTML pages for .torrent links
"""

__author__    = "Roee Shlomo"
__version__   = "2.0"
__date__      = "$2007/02/16$"
__license__   = 'MIT license'

import wx
import re
import urlparse
from types import UnicodeType, StringType
from HTMLParser import HTMLParser
from threading import Thread
from BitTornado.zurllib import urlopen

class UrlFinder(HTMLParser):
    """
    Parse HTML page
    saves all links in self.allLinks
    """
    def __init__(self, link):
        HTMLParser.__init__(self)
        
        self.allLinks   = []                    # all links are saved here
        self.torLinks   = []                    # all links that ends with .torrent are saved here
        self.link       = link                  # the base url
        self.keywords   = ("/get", "download")  # put these first

    def parse_starttag(self, i):
        """
        For fixing broken end tags
        """
        tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
        attrfind = re.compile(
            r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
            r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~@]*))?')

        self.__starttag_text = None
        endpos = self.check_for_whole_start_tag(i)
        if endpos < 0:
            return endpos
        rawdata = self.rawdata
        self.__starttag_text = rawdata[i:endpos]

        # Now parse the data between i+1 and j into a tag and attrs
        attrs = []
        match = tagfind.match(rawdata, i+1)
        assert match, 'unexpected call to parse_starttag()'
        k = match.end()
        self.lasttag = tag = rawdata[i+1:k].lower()

        while k < endpos:
            m = attrfind.match(rawdata, k)
            if not m:
                break
            attrname, rest, attrvalue = m.group(1, 2, 3)
            if not rest:
                attrvalue = None
            elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
                 attrvalue[:1] == '"' == attrvalue[-1:]:
                attrvalue = attrvalue[1:-1]
                attrvalue = self.unescape(attrvalue)
            attrs.append((attrname.lower(), attrvalue))
            k = m.end()

        end = rawdata[k:endpos].strip()
        if end not in (">", "/>"):
            lineno, offset = self.getpos()
            if "\n" in self.__starttag_text:
                lineno = lineno + self.__starttag_text.count("\n")
                offset = len(self.__starttag_text) \
                         - self.__starttag_text.rfind("\n")
            else:
                offset = offset + len(self.__starttag_text)
            #self.error("junk characters in start tag: %r"
             #          % (rawdata[k:endpos][:20],))
        if end.endswith('/>'):
            # XHTML-style empty tag: <span attr="value" />
            self.handle_startendtag(tag, attrs)
        else:
            self.handle_starttag(tag, attrs)
            if tag in self.CDATA_CONTENT_ELEMENTS:
                self.set_cdata_mode()
        return endpos

    def check_for_whole_start_tag(self, i):
        locatestarttagend = re.compile(r"""
          <[a-zA-Z][-.a-zA-Z0-9:_]*          # tag name
          (?:\s+                             # whitespace before attribute name
            (?:[a-zA-Z_][-.:a-zA-Z0-9_]*     # attribute name
              (?:\s*=\s*                     # value indicator
                (?:'[^']*'                   # LITA-enclosed value
                  |\"[^\"]*\"                # LIT-enclosed value
                  |[^'\">\s]+                # bare value
                 )
               )?
             )
           )*
          \s*                                # trailing whitespace
        """, re.VERBOSE)

        rawdata = self.rawdata
        m = locatestarttagend.match(rawdata, i)
        if m:
            j = m.end()
            next = rawdata[j:j+1]
            if next == ">":
                return j + 1
            if next == "/":
                if rawdata.startswith("/>", j):
                    return j + 2
                if rawdata.startswith("/", j):
                    # buffer boundary
                    return -1
                # else bogus input
                self.updatepos(i, j + 1)
#                print "malformed empty start tag"
            if next == "":
                # end of input
                return -1
            if next in ("abcdefghijklmnopqrstuvwxyz=/"
                        "ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
                # end of input in or before attribute value, or we have the
                # '/' from a '/>' ending
                return -1
            self.updatepos(i, j)
#            print "malformed start tag"
            return  j + 1
        raise AssertionError("we should not get here!")

    def parse_endtag(self, i):
        error = False
        endendtag = re.compile('>')
        endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
        rawdata = self.rawdata
        assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
        match = endendtag.search(rawdata, i+1) # >
        if not match:
            return -1
        j = match.end()
        match = endtagfind.match(rawdata, i) # </ + tag + >
#        if not match:
#            print "Error: bad end tag: %r" % (rawdata[i:j],)
        try:
            tag = match.group(1)
        except:
            tag = ""
        self.handle_endtag(tag.lower())
        self.clear_cdata_mode()
        return j
    
    # Override this to handle MS-word extension syntax <![if word]>content<![endif]>
    def parse_marked_section(self, i, report=1):
        rawdata= self.rawdata
        assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
        sectName, j = self._scan_name( i+3, i )
        if j < 0:
            return j
        if sectName in ("temp", "cdata", "ignore", "include", "rcdata"):
            # look for standard ]]> ending
            match= _markedsectionclose.search(rawdata, i+3)
        elif sectName in ("if", "else", "endif"):
            # look for MS Office ]> ending
            match= _msmarkedsectionclose.search(rawdata, i+3)
        else:
            self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
        if not match:
            return -1
#        if report:
#            j = match.start(0)
#            self.unknown_decl(rawdata[i+3: j])
        return match.end(0)
        
    def handle_starttag(self, tag, attrs):
        attrs = dict(attrs) # Convert from tuple of tuples to dict
        if 'a' == tag and attrs.has_key('href'):
            if attrs['href'].startswith("http://"):
                if attrs['href'].endswith('.torrent'):
                    self.torLinks.append(attrs['href'])
                elif self.keywords[0] in attrs['href'] or self.keywords[1] in attrs['href']:
                    self.allLinks.insert(0, attrs['href'])
                else:
                    self.allLinks.append(attrs['href'])
            else:
                try:
                    attrs['href'] = attrs['href'].decode("utf_8")
                except:
                    pass
                _url = urlparse.urljoin(self.link, attrs['href'])
                if attrs['href'].endswith('.torrent'):
                    self.torLinks.append(attrs['href'])
                elif self.keywords[0] in attrs['href'] or self.keywords[1] in attrs['href']:
                    self.allLinks.insert(0, _url)
                else:
                    self.allLinks.append(_url)
    
class HtmlTorrentScanner(Thread):
    """
    Scans all links from UrlFinder and
    checks if they are .torrent links
    """
    def __init__(self, parent, article, rule = None, single = False, allowBrowser = False):
        Thread.__init__(self)
        self.setDaemon(True)

        self.parent      = parent
        self.article     = article
        self.rule        = rule
        self.allowBrowser= allowBrowser
        
        if StringType != type(article) != UnicodeType:
            self.cookies = self.article.feeder.getCookie()
            self.url = article.url
            self.single = True
        else:
            self.cookies = None
            self.url = article
            self.single = single
            
        self.parser      = UrlFinder(self.url)
        self.stopall = False
        
    def stopAll(self):
        self.stopall = True

    def run(self):
        try:
            self.GetTheData()
        except wx.PyDeadObjectError:
            pass
    
    def GetTheData(self):
        
        add = None
            
        # if user called stop: Stop!
        if self.stopall:
            self.failed(_("User stopped HTML scan"))
            return

        # Check the original URL
        try:
            answer = urlopen(self.url, encoding = None, cookies = self.cookies)
        except IOError:
            answer = None
        if not answer:
            self.failed(_("Could not open URL"))
            return
        if "application/x-bittorrent" in answer.getHeader("Content-Type"):
            self.success(answer)
            return
        if not "text/html" in answer.getHeader("Content-Type"):
            if self.allowBrowser:
                wx.LaunchDefaultBrowser(self.url)
            else:
                self.failed(_("Invalid HTML page"))
            return

        try:
            self.parser.feed(answer.read())
        except Exception, e:
            self.failed(str(e))
            return
            
        # if user called stop: Stop!
        if self.stopall:
            self.failed(_("User stopped HTML scan"))
            return

        # check to see any direct .torrent links
        for link in self.parser.torLinks:
            if self.stopall:
                break
            try:
                answer = urlopen(link, encoding = None, cookies = self.cookies)
                if answer:
                    contenttype = answer.getHeader("Content-Type")
                    if "application/x-bittorrent" in contenttype:     
                        self.success(answer)
                        if self.single:
                            return
                    elif self.allowBrowser:
                        wx.LaunchDefaultBrowser(link)
                        return
            except Exception, e:
                self.failed(str(e))
                return
            
        # if user called stop: Stop!
        if self.stopall:
            self.failed(_("User stopped HTML scan"))
            return
        
        #if there aren't any- check the Content-Type
        for link in self.parser.allLinks:
            if self.stopall:
                break
            try:
                answer = urlopen(link, encoding = None, cookies = self.cookies)
                if answer:
                    contenttype = answer.getHeader("Content-Type")
                    if "application/x-bittorrent" in contenttype:     
                        self.success(answer)
                        if self.single:
                            return
            except Exception, e:
                self.failed(str(e))
                return
            
        self.failed(_("Couldn't find .torrent links"))
        
    def failed(self, message):
        self.parent.HtmlTorrentScannerFailed(self.article, message)
        
    def success(self, data):
        self.parent.HtmlTorrentScannerSucceeded(self.article, self.rule, data)        
