'''simple site parser for ioks project
(c) 2006 Gregory Petrosyan'''

# TODO add html parsing
# TODO add frames support
# TODO add external sites!
# TODO add smth like canonical(url)...
# TODO add smth like is_parent(parent_url, child_url)

try:
    import psyco
    psyco.full()
except ImportError:
    pass

import sys
import pickle
import urllib
import itertools as it

from urlparse import urlparse, urljoin
from htmllib import HTMLParser, HTMLParseError
from formatter import NullFormatter, AbstractFormatter, DumbWriter

from db_classes import *

def main():
    if len(sys.argv) != 2:
        print 'Usage: %s <url>' % sys.argv[0]
        return
    for o in dump(sys.argv[1]):
        print o
        #pickle.dump(sys.stdout, o)

class InvalidURLError(Exception):
    pass

def dump(root):
    proot = urlparse(root)
    if proot.scheme != 'http' or not proot.netloc or \
       proot.fragment or proot.params or proot.query:
        raise InvalidURLError(root)
    yield site(hash(root), root)
    ignore_exts = ('jpg', 'jpeg', 'gif', 'png', 'bmp', 'svg', 'psd',
                   'mov', 'avi', 'wmv', 'mpeg', 'swf',
                   'mp3', 'aac', 'ogg',
                   'pdf', 'djvu', 'doc', 'ppt', 'xls', 'txt',
                   'zip', 'tar', 'gz', 'bz2', 'rar', 'arj', '7z', 'iso',
                   'exe', 'msi', 'cab', 'jar')
    for url, depth in urls(root, ignore_exts):
        for r in dump_page(root, url, depth):
            yield r

class Buffer:
    def __init__(self):
        self.buf = ''
    def write(self, s):
        self.buf += s
    def __nonzero__(self):
        return True

# report_keywords is here because I don't want to see all
# the keywords in output
def dump_page(root, url, depth, report_keywords=False):
    page_id = hash(url)
    external = not url.startswith(root)
    if external:
        purl = urlparse(url)
        yield page(hash(purl[0:2]), page_id, url, depth, -1, '', True)
        return
    uo = urllib.FancyURLopener()
    html = uo.open(url).read()
    yield page(hash(root), page_id, url, depth, -1, html, not url.startswith(root))
    buf = Buffer()
    parser = AdvLinksExtractor(AbstractFormatter(DumbWriter(buf)))
    parser.feed(html)
    for kwd, kwds in it.groupby(sorted(buf.buf.split())):
        keyword_id = hash(kwd)
        if report_keywords:
            yield page_to_keyword(page_id, keyword_id, len(list(kwds)))
            yield keyword(keyword_id, kwd, -1)
    for lnk, lgr in it.groupby(sorted(parser.links)):
        link_id = hash(lnk)
        yield link(link_id, hash(lnk.href), lnk.title, lnk.text)
        yield page_to_link(page_id, link_id, len(list(lgr)))
        full_link_text = lnk.title + lnk.text
        for kwd, kwds in it.groupby(sorted(it.chain(full_link_text.split()))):
            yield link_to_keyword(link_id, hash(kwd), len(list(kwds)))

class AdvLinksExtractor(HTMLParser):
    class link:
        def __init__(self, href, title, text=''):
            self.href = href
            self.title = title
            self.text = text
        def __hash__(self):
            return hash((self.href, self.title, self.text))
        def __lt__(self, other):
            return hash(self) < hash(other)
        def __eq__(self, other):
            return hash(self) == hash(other)

    def __init__(self, writer):
        HTMLParser.__init__(self, writer)
        self.links = []
    def anchor_bgn(self, href, name, type):
        self.links.append(self.link(href, name))
        self.save_bgn()
    def anchor_end(self):
        try:
            self.links[-1].text = self.save_end()
        except AttributeError:  # seems like this occures because of library bug
            pass

class LinksExtractor(HTMLParser):
    def __init__(self):
        HTMLParser.__init__(self, NullFormatter())
        self.links = []
    def anchor_bgn(self, href, name, type):
        self.links.append(href)

def valid_url(url, ignore_exts):
    purl = urlparse(url)
    if any(url.lower().endswith(e) for e in ignore_exts):
        return False
    if purl.fragment or purl.query or purl.params:
        return False
    if purl.scheme and purl.scheme != 'http':
        return False
    return True

def urls(root, ignore_exts):
    used = set([root])
    depth = {root: 0}
    queue = [root]
    uo = urllib.FancyURLopener()
    while queue:
        url = queue[0]
        del queue[0]
        print >>sys.stderr, '- processing', urllib.unquote(url)
        yield url, depth[url]
        parser = LinksExtractor()
        try:
            for line in uo.open(url):
                parser.feed(line)
            parser.close()
        except HTMLParseError:
            print >>sys.stderr, '\terror: not a valid html document'
            continue
        valid_links = (u for u in parser.links if valid_url(u, ignore_exts))
        for link in (urljoin(url, link) for link in valid_links):
            if not link in used:
                used.add(link)
                if link.startswith(root):
                    depth[link] = depth[url] + 1
                    queue.append(link)
                else:
                    yield link, -1  # external url


if __name__ == '__main__':
    main()
