"""
This application is used to concatenate and fix the HTML files that
are exported from a CHM. The idea is to generate a single HTML that
is then exported to a PDF.
"""
from BeautifulSoup import BeautifulSoup
from os.path import exists

def find_next(html, blacklist=("index.html",)):
    for alink in html.body.table.tr("a"):
        if alink.img['src'].lower() == 'images/next.gif':
            href = alink['href']
            if href in blacklist:
                href = None
            
            return href

def find_anchor(html):
    return "#" + html.body("table")[1].tr.td.a['name']

def fix_href(alink, links_db):
    try:
        href = alink['href']
        if "http://" not in href and href.startswith("www."):
            alink['href'] = href = "http://" + href

        if ":" not in href \
               and "#" not in href and ".gif" not in href and ".jpg" not in href:
            if not exists(href) and not href.endswith(".html"):
                href += ".html"

            if not exists(href):
                # maybe the filename has strange caps
                href = href.lower()

            if exists(href):
                try:
                    alink['href'] = links_db[href]
                except KeyError:
                    html = BeautifulSoup(open(href).read())
                    alink['href'] = find_anchor(html)
                    links_db[href] = alink['href']
            else:
                # delete bogus links
                del alink['href']
                
        elif "#" in href and ":" not in href:
            alink['href'] = "#" + href.split("#", 1)[1]
    except KeyError:
        pass

def find_content(html, fix_img=False, fix_links=True, links_db=None):
    if links_db is None:
        links_db = {}
        
    data = html.body("table")[1].tr.td
    if fix_img:
        # fix images like this:
        # <img src="foo;21332" />
        for img in data.findAll("img"):
            img['src'] = img['src'].replace(";", "%3B")
    if fix_links:
        for alink in data.findAll("a"):
            fix_href(alink, links_db)
    return data.contents
    

def scrap(filename="toc.html"):
    found = []
    links_db = {}
    while filename not in found and filename is not None:
        found.append(filename)
        html = BeautifulSoup(open(filename).read())
        filename = find_next(html)
        for line in find_content(html, links_db):
            yield line


if __name__ == '__main__':
    print "<html><head><title></title></head><body>"
    for html in scrap():
        print html
    print "</body></html>"
    
