# TODO:
# - Apply Python naming conventions.
# - Log all errors to errors.log
# - Stream title RE replacement
# - Alphabetic sort (sort key maybe lower() -> [a-z0-9]
# - Avoid non-termination: DL size limit, async cancellation option
# - allow protocol filters for files? (e.g. http, rtsp, ...)
# E.g. http://www.liveradio.de/playlist.php?id=2768 uses a .php script to
# return a playlist of "content-type: application/octetstream", and also
# "Content-Disposition: attachment; filename=listen.pls"
#
# "base href" is used on http://www.polskastacja.pl/

from asxparser import AsxParser
from m3uparser import M3uParser
from plsparser import PlsParser
import BeautifulSoup
import copy
import logging
import re
import sys
import urllib2
import urlparse
import utils

def _same_domain(site1, site2):
    site1 = site1.lower()
    site2 = site2.lower()
    if site1.startswith('www.'):
        site1 = site1[4:]
    if site2.startswith('www.'):
        site2 = site2[4:]
    return site1 == site2

def _compare_streams(stream1, stream2):
    t1 = stream1['title'].lower()
    t2 = stream2['title'].lower()
    f1 = stream1['file'].lower()
    f2 = stream2['file'].lower()
    if t1 < t2:
        return -1
    elif t1 > t2:
        return 1
    elif f1 < f2:
        return -1
    elif f1 > f2:
        return 1
    return 0

# Used when logging is disabled.
class NoLogger:
    def debug(self, message):
        pass
    def info(self, message):
        pass
    def warning(self, message):
        pass
    def error(self, message):
        pass
    def critical(self, message):
        pass
    def setLevel(self, level):
        pass

class SuckPls:
    ''' Suck playlists from the web. '''

    _pls_filename_pattern = re.compile(r'.+\.(pls|m3u|asx)$', re.I)
    _crawl_extensions = [ '', 'htm', 'html', 'php', 'asp', 'aspx', 'cfm',
                          'jsp' ]
    _user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.2.2) ' \
                  'Gecko/20100316 Firefox/3.6.2 (.NET CLR 3.5.30729)'
    _log_levels = { 'debug': logging.DEBUG,
                    'info': logging.INFO,
                    'warning': logging.WARNING, 'warn': logging.WARNING,
                    'error': logging.ERROR,
                    'critical': logging.CRITICAL, 'fatal': logging.CRITICAL }

    def __init__(self, writer_factory):
        self._writer_factory = writer_factory

    def suck(self, seed, **kwargs):
        '''
        Positional argument:
        seed -- The website or playlist URL.

        Keyword arguments:
        writers -- Writers created by WriterFactory. For playlist output.
                   Defaults to [ ], i.e. no output.
        depth -- Crawl depth (number of links to follow). This is 0 if the seed
                 is already the playlist, 1 if it's a direct link on the seed
                 website, etc. Default: 1
        name -- The output playlist's name. Set to the seed URL if omitted. Used
                for filename generation when writing the playlist.
        leavedomain -- Allow leaving the seed's domain when crawling.
                       Default: True
        loglevel -- Controls logging verbosity. One of 'DEBUG', 'INFO',
                    'WARNING', 'ERROR', 'CRITICAL'. Case-insensitive.
                    Default: 'WARNING'

        Several filters (see below) can be specified. By default, no filters are
        applied.

        To control crawling:
        hostfilter -- Applied to a URL's host part. Overridden by
                      leavedomain=False.
        pathfilter -- Applied to a URL's path part.

        To control playlist reading:
        plsfilter -- Applied to the full playlist URL.

        To filter individual streams in all found playlists:
        titlefilter -- Applied to a stream's title.
        urlfilter -- Applied to a stream's URL.

        A filter is a list of 2-tuples of the form (<expression>, <verdict>).
        <expression> is a case-insensitive regular expression as a string.
        <verdict> is a bool specifying the verdict in case <expression> matches.
        In case of multiple matches, the total verdict is that of the last
        match.
        Examples:
        plsfilter = [ (r'\.m3u$', False) ] -- exclude .m3u playlists
        plsfilter = [ ('', False),
                      (r'\.m3u$', True) ] -- include only .m3u playlists
        '''
        self._kwargs = kwargs  # for logging
        self._initialize(seed, **kwargs)
        self._crawl(0)
        self._retrieve_and_write()

    def _initialize(self,
                    seed,
                    writers = [],
                    depth = 1,
                    name = '',
                    leavedomain = True,
                    hostfilter = [],
                    pathfilter = [],
                    plsfilter = [],
                    titlefilter = [],
                    urlfilter = [],
                    loglevel = 'WARNING'):
        # Parse the seed value, automatically prefixing an omitted 'http://'.
        try:
            self._seed = urlparse.urlsplit(seed);
            if not self._seed.scheme:
                self._seed = urlparse.urlsplit('http://' + seed);
        except:
            print >> sys.stderr, ('failed to parse "%s":\n%s'
                                  % (seed, sys.exc_info()))
            sys.exit(1)
        if self._seed.scheme != 'http':
            print >> sys.stderr, ('unsupported scheme: "%s"'
                                  % self._seed.scheme)
            sys.exit(1)

        # Set name, stripping 'www.' if generated from seed.
        if not name:
            name = self._seed.netloc
            if name.lower().startswith('www.'):
                name = name[4:]
        self._pls_name = name

        # Generate matching log name.
        if loglevel.lower() == 'none':
            self._log = NoLogger()
        else:
            self._initialize_logging()
            self._log.setLevel(SuckPls._log_levels.get(loglevel.lower(),
                                                       logging.INFO))

        if not self._writer_factory:  # may be None for testing
            self._writers = []
        else:
            if not writers:
                self._log.warning('no writers specified')
                self._writers = []
            else:
                self._writers = self._writer_factory.get_writers(writers)

        self._log.info('---------- start sucking from %s, name: %s' %
                        (self._seed.geturl(), self._pls_name))
        self._log.info('arguments: %s' % self._kwargs)

        self._visited = set()
        self._found = {}

        # Nodes at current crawling depth.
        self._nodes = set()
        self._nodes.add(self._seed)

        self._depth = int(depth)  # cfg parser passes a str
        assert self._depth >= 0, 'invalid depth: %i' % depth

        self._leave_domain = bool(int(leavedomain))  # cfg parser passes a str

        self._host_filter = self._compile_filter(hostfilter)
        self._path_filter = self._compile_filter(pathfilter)
        self._pls_filter = self._compile_filter(plsfilter)
        self._title_filter = self._compile_filter(titlefilter)
        self._url_filter = self._compile_filter(urlfilter)

        self._opener = urllib2.build_opener()

        # Build dict of reusable _parsers.
        self._parsers = { 'asx': AsxParser(),
                          'm3u': M3uParser(),
                          'pls': PlsParser() }

    def _initialize_logging(self):
        # log_filename may not exactly match the pls filename that the writer
        # will later generate, but that's OK.
        log_filename = utils.sanitize_filename(self._pls_name) + '.log'
        self._log = logging.getLogger(self._seed.geturl())
        # "delay" avoids empty log files.
        handler = logging.FileHandler(log_filename, delay = True)
        handler.setFormatter(logging.Formatter(
            '%(asctime)s - %(levelname)-8s - %(message)s'))
        assert not self._log.handlers, ('concurrent sucks: %s' % self._pls_name)
        self._log.addHandler(handler)

    def _get_found(self):
        return set(url.geturl() for url in self._found.keys())

    def _compile_filter(self, filter):
        compiledFilter = []
        for rule in filter:
            compiledFilter.append((re.compile(rule[0], re.I), rule[1]))
        return compiledFilter

    def _crawl(self, current_depth):
        assert current_depth <= self._depth
        self._log.debug('crawling %i nodes at depth %i' %
                        (len(self._nodes), current_depth))
        self._log.debug('nodes are: %s' % [ x.geturl() for x in self._nodes ])

        next_nodes = set()
        # Convert to a list, so we can append URLs of frames we find.
        nodes_list = list(self._nodes)
        for node in nodes_list:
            # Remember/skip visited nodes.
            if node in self._visited:
                continue;
            self._visited.add(node)

            # Found a playlist?
            match = SuckPls._pls_filename_pattern.match(node.path)
            if (match and not self._found.has_key(node)):
                if self._qualifies(self._pls_filter, node.geturl()):
                    self._log.info('found playlist: %s' % node.geturl())
                    self._found[node] = match.group(1)
                else:
                    self._log.info('rejected playlist: %s' % node.geturl())
                continue

            # Stop crawling at depth limit.
            if current_depth >= self._depth:
                continue

            self._log.debug('considering node: %s' % node.geturl())

            # Restrict to seed domain?
            if not self._leave_domain and not _same_domain(self._seed.netloc,
                                                           node.netloc):
                self._log.debug('not leaving %s: %s'
                                % (self._seed.netloc, node.netloc))
                continue

            # Apply general URL filters.
            if (current_depth > 0 and  # seed and its frames always qualify
                    (not self._qualifies(self._host_filter, node.netloc)
                     or not self._qualifies(self._path_filter, node.path))):
                self._log.debug('rejected URL: %s' % node.geturl())
                continue;

            # Crawl only selected file extensions.
            if (current_depth > 0 and  # seed and its frames always qualify
                    not self._should_crawl_extension(node.path)):
                self._log.debug('rejected extension: %s' % node.path)
                continue

            self._log.info('crawling at depth %i: %s'
                           % (current_depth, node.geturl()))

            # Download and parse.
            try:
                response = self._urlopen(node.geturl())
                self._log.debug('document info: %s'
                                % str(response.info()).replace('\r\n', '|'))
                soup = BeautifulSoup.BeautifulSoup(response)
            except AssertionError:
                raise  # tests expect this
            except:
                self._log.error('failed to retrieve %s: %s'
                                % (node.geturl(), sys.exc_info()))
                continue

            # Is this an "unnamed" playlist? TODO test this
            found_by_mime_type = False
            if response.info().gettype() == 'audio/x-mpegurl':
                self._found[node] = 'm3u'
                found_by_mime_type = True
            if response.info().gettype() == 'audio/x-scpls':
                self._found[node] = 'pls'
                found_by_mime_type = True
            elif response.info().gettype() == 'video/x-ms-asf':
                self._found[node] = 'asx'
                found_by_mime_type = True
            if found_by_mime_type:
                self._log.info('found playlist: %s' % node.geturl())
                continue

            # TODO base href... ignore these? (assert looks for an example)
            # assert not soup.findAll(name='base')

            # Add frames at the current parsing level.
            frames = soup.findAll(name='frame', src=re.compile(''))
            frames.extend(soup.findAll(name='iframe', src=re.compile('')))
            self._log.debug('found %i frames/iframes' % len(frames))
            for frame in frames:
                assert frame.has_key('src'), frame
                url = self._get_absolute_url(node, frame.get('src'))
                if url:
                    nodes_list.append(url)

            # Find links.
            hrefs = soup.findAll(name='a', href=re.compile(''))
            self._log.debug('outgoing links: %i' % len(hrefs));
            for href in hrefs:
                self._log.debug('a href: %s' % href)
                assert href.has_key('href'), href
                url = self._get_absolute_url(node, href.get('href'))
                if url:
                    next_nodes.add(url)

        # Continue as long as we find further nodes.
        if next_nodes:
            self._nodes = next_nodes
            self._crawl(current_depth + 1)

    def _get_absolute_url(self, base, url):
        url = urlparse.urlsplit(urlparse.urljoin(base.geturl(), url))
        if not url.scheme == 'http':
            return None
        return url

    # TODO Consider removing this.
    def _urlopen(self, url):
        request = urllib2.Request(url)
        request.add_header('User-Agent', SuckPls._user_agent)
        response = self._opener.open(request)
        return response

    def _should_crawl_extension(self, path):
        ext = ''
        i = path.rfind('.')
        if i >= 0:
            ext = path[i + 1:].lower()
        return ext in SuckPls._crawl_extensions

    # Apply "filters" to "text". "filters" is a list of 2-tuples (regexp, bool),
    # where True/False designate an inclusion/exclusion rule, respectively. By
    # default (i.e. if no filter matches), returns True.
    def _qualifies(self, filter, text):
        self._log.debug('filtering "%s"' % text)
        verdict = True
        for pattern, b in filter:
            self._log.debug('applying pattern "%s" to "%s" '
                           % (pattern.pattern, text))
            if pattern.search(text):
                verdict = b
                self._log.debug('match')
            else:
                self._log.debug('no match')
        self._log.debug('verdict: %s' % verdict)
        return verdict

    def _retrieve_and_write(self):
        self.streams = {}
        for url, ext in self._found.items():
            self._log.info('reading %s playlist at %s' % (ext, url.geturl()))
            try:
                file = self._urlopen(url.geturl())
            except:
                self._log.error('failed to retrieve %s: %s'
                                % (url.geturl(), sys.exc_info()))
                continue
            self._parse(url, file, ext)

        if not self.streams:
            self._log.warning('no streams found; will not produce output')
            return

        # Sort streams by title, then by URL.
        streams_by_title = [ stream for stream in self.streams.values() ]
        streams_by_title.sort(cmp = _compare_streams)

        for writer in self._writers:
            writer.write(self._pls_name, streams_by_title, self._log)

    def _parse(self, url, file, ext):
        parser = self._parsers.get(ext)
        if not parser:
            self._log.error('parsing unimplemented for %s' % ext)
            return

        try:
            streams = parser.parse_file(file)
        except:
            self._log.error('failed to parse %s' % url.geturl())
            self._log.error(sys.exc_info())
            return

        for stream in streams:
            self._store_stream(stream)

    def _store_stream(self, stream):
        if stream.has_key('file'):
            if not stream.has_key('title'):
                stream['title'] = ''  # use an empty title for filtering
            if (self._qualifies(self._title_filter, stream['title'])
                and self._qualifies(self._url_filter, stream['file'])):
                if not stream['title']:  # set URL as title if missing
                    stream['title'] = stream['file']
                self._log.debug('stream: %s' % stream)
                self.streams[stream['file']] = copy.deepcopy(stream)
        else:
            self._log.warning('incomplete: %s' % stream)
        stream.clear()
