#!/usr/bin/env python
#
# Copyright (C) 2007 Lemur Consulting Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
r"""build.py: Build the documentation for Xappy.

"""
__docformat__ = "restructuredtext en"

import copy
import os
import shutil
import sys
import lxml.html
from lxml.cssselect import CSSSelector

# Set the locale, if possible, so rst2html doesn't produce localised output.
try:
    import locale
    locale.setlocale(locale.LC_ALL, '')
except:
    pass

from docutils.core import publish_string, default_description
import epydoc.cli

# Definition of locations of rst files.
# Relative to "docs/src" directory.
rst_files = (
    'core/colour.rst',
    'core/image.rst',
    'core/index.rst',
    'core/tutorial.rst',
    'core/queries.rst',
    'core/weighting.rst',
    'server/api.rst',
    'server/future.rst',
    'server/tutorial/tutorial1.rst',
    'clients/perl.rst',
    'clients/php.rst',
    'website/docs.rst',
    'website/downloads.rst',
    'website/index.rst',
)

# Media files which should be copied to built directory.
# Relative to "docs/src/website" directory.
media_files = (
    'xappy.css',
    'xappy.logo.300.png',
)

def call_rst2html(srcfile):
    srcdata = open(srcfile).read()
    return publish_string(srcdata, writer_name='html',
                          settings_overrides={
                            'output_encoding': 'utf8',
                            'input_encoding': 'utf8',
                          })

def call_epydoc(*args):
    args = list(args)
    args.insert(0, 'epydoc')
    sys.argv = args
    epydoc.cli.cli()

def convert_rst(srcdir, outdir, template, files):
    # Selector for the content in html generated from RST.
    rst_content_sel = CSSSelector('div.document')
    rst_title_sel = CSSSelector('h1.title')

    # Selector for the content in the html template.
    html_content_sel = CSSSelector('div#content')
    html_pagetitle_sel = CSSSelector('title')
    html_title_sel = CSSSelector('div#title')
    html_menu_sel = CSSSelector('div#menu p')

    # Read and parse the template.
    tmpl_orig = lxml.html.parse(os.path.join(srcdir, template)).getroot()

    for file in files:
        print "Converting %r" % file

        # Work out the input and output paths.
        srcfile = os.path.join(srcdir, file)
        if file.startswith('website/'):
            file = file[8:]
        file = os.path.splitext(file)[0] + '.html'
        destfile = os.path.join(outdir, file)

        # Select parts of the template.
        tmpl = copy.deepcopy(tmpl_orig)
        tmplbody = html_content_sel(tmpl)[0]
        tmplpagetitle = html_pagetitle_sel(tmpl)[0]
        tmpltitle = html_title_sel(tmpl)[0]
        tmplmenuitems = html_menu_sel(tmpl)

        # Clear the template parts.
        for item in tmplbody:
            tmplbody.remove(item)
        for item in tmpltitle:
            tmpltitle.remove(item)

        # Convert the RST file, parse it, and get the content.
        rstout = call_rst2html(srcfile)
        doc = lxml.html.fromstring(rstout)

        content = rst_content_sel(doc)[0]
        title_element = rst_title_sel(content)[0]
        title = title_element.text
        content.remove(title_element)

        # Insert the content into the template.
        for child in content.iterchildren(reversed=True):
            tmplbody.insert(0, child)

        tmplpagetitle.text = "Xappy: " + title
        tmpltitle.text = title

        for item in tmplmenuitems:
            href = item[0].attrib['href'][7:]
            if href == file:
                itemtext = item[0].text
                item.remove(item[0])
                item.text = itemtext

        # Generate a relative path to topdir from the file.
        reltop = '../' * (len(file.split('/')) - 1)

        # Fix up links in the content.
        def repl_func(oldlink):
            if len(oldlink) == 0: return oldlink
            if oldlink[0] in '#/': return oldlink
            if oldlink.startswith('local:'):
                if oldlink[6] == '/':
                    return reltop + oldlink[7:]
                return oldlink[6:]
            return reltop + oldlink
        tmpl.rewrite_links(repl_func)

        # Write to the output file.
        if not os.path.isdir(os.path.dirname(destfile)):
            os.makedirs(os.path.dirname(destfile))
        fd = open(destfile, 'wb')
        fd.write(lxml.html.tostring(tmpl))
        fd.close()

def copy_media(srcdir, outdir, media_files):
    for file in media_files:
        shutil.copy(os.path.join(srcdir, file), os.path.join(outdir, file))

def build_website(srcdir, outdir):
    convert_rst(srcdir, outdir, 'website/index.html', rst_files)
    #call_epydoc('-o', 'docs/api', '--name', 'xappy', '--no-private', 'xappy')

    copy_media(os.path.join(srcdir, 'website'), outdir, media_files)

if __name__ == '__main__':
    srcdir = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))
    destdir = os.path.join(os.path.dirname(srcdir), 'built')
    if os.path.exists(destdir):
        shutil.rmtree(destdir)
    os.mkdir(destdir)
    build_website(srcdir, destdir)
