#!/usr/bin/env python

"""
The filter_urls function takes as input the following parameters:
1. Name of the output file produced by wget
2. Starting URL for wget
3. Boolean indicating whether the wget span-hosts option was used

The function currently applies two filters:
1. Check that either the hostnames or the next level domain and hostname subset
    match to filter out redirects wget may have followed to restricted domains.
2. Filter out any URL that points to a robots.txt file.
"""

import re
import sys

from utils import StartingURL, match_hostnames

robots_pattern = re.compile('robots.txt$')

def apply_filters(starting_url, this_url, span):
    if not match_hostnames(starting_url, this_url, span):
        return False
    if re.search(robots_pattern, this_url):
        return False
    return True

def filter_urls(outfile, starting_url, span):
    global robots_pattern

    timestamp = '[0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}'
    pattern = re.compile('^(%s) URL:(.*)\s(\[[0-9/]+\])\s->\s"(.*)"' % timestamp)

    f = open(outfile, 'r')
    lines = f.readlines()
    f.close()

    urlset = set();

    for line in lines:
        m = re.search(pattern, line)
        if m:
            this_url = m.group(2); size = m.group(3)
            if apply_filters(starting_url, this_url, span):
                urlset.add((this_url, size))

    urls = list(urlset)
    urls.sort()
    return urls

def print_urls(urls):
    for url in urls:
        print url[0], url[1]

    print 'length: ', len(urls)

if __name__ == "__main__":
    if len(sys.argv) < 4:
        sys.exit()

    outfile = sys.argv[1]
    starting_url = StartingURL(sys.argv[2])
    span = sys.argv[3] != '0'

    urls = filter_urls(outfile, starting_url, span)

    print_urls(urls)
