from HTMLParser import HTMLParser
from urlparse import urljoin, urlparse

class AnchorParser(HTMLParser):
    def __init__(self):
        HTMLParser.__init__(self)
        self.anchors = []

    def handle_starttag(self, tag, attrs):
        if tag == 'a':
            href = dict(attrs).get('href')
            if href:
                self.anchors.append(href)

def parse_http_archive(opts, args, response):
    """parses a Mailman archive web page, returning a list of links to mbox files.
    """
    mb_list = {}
    ap = AnchorParser()
    ap.feed(response.read())
    for a in ap.anchors:
        split_url = urlparse(a)[2].split('/')[-1].split('.')
        #For now, only look at links ending in gz
        if split_url[-1] == 'gz':
            if len(split_url) == 3:
                #It seems most archive files follow this format:
                #  'pgsql-jobs.2003-07.gz'
                list_id = split_url[0]
                split_adate = split_url[1].split('-')
                #Don't even bother adding the link if the month isn't in the
                #range provided
                if int(split_adate[0]) >= opts.begindate.year and \
                   int(split_adate[1]) >= opts.begindate.month and \
                   int(split_adate[0]) <= opts.enddate.year and \
                   int(split_adate[1]) <= opts.enddate.month:
                    opts.logger.debug('Added link: %s', a)
                    #We store absolute URL's for ease of use later
                    print split_url[1],  a
                    if a.endswith('http', 0, 4):
                        mb_list[split_url[1]] = a
                    else:
                        mb_list[split_url[1]] = urljoin(args[0], a)
                else:
                    opts.logger.debug('Skipped link: %s', a)
            else:
                opts.logger.debug("Link format not recognized: %s.", '.'.join(split_url))
        else:
            opts.logger.debug("Link format not recognized: %s.", '.'.join(split_url))

    return mb_list

def parse_ftp_archive(opts, args, response):
    mb_list = {}
    links = [t[-7:] for t in response.read().split('\r\n')[:-1]]
    for l in links:
        split_adate = l.split('-')
        #Don't even bother adding the link if the month isn't in the
        #range provided
        if int(split_adate[0]) >= opts.begindate.year and \
           int(split_adate[1]) >= opts.begindate.month and \
           int(split_adate[0]) <= opts.enddate.year and \
           int(split_adate[1]) <= opts.enddate.month:
            opts.logger.debug('Added link: %s', l)
            #We store absolute URL's for ease of use later
            mb_list[l] = urljoin(args[0], l)

    return mb_list
