#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
    from collections import OrderedDict
except ImportError:
    # if on python 2.6
    from ordereddict import OrderedDict

from collections import defaultdict
from operator import attrgetter, itemgetter
from datetime import datetime
from pprint import pprint
from tempfile import mkstemp
from io import BytesIO
from itertools import islice
from wsgiref.handlers import format_date_time as format_http_date
import codecs
import logging
import logging.handlers
import os
import re
import sys
import time
import calendar
import filecmp

# 3rd party
import pkg_resources
from lxml import etree
from lxml.builder import ElementMaker
from lxml.etree import XSLT
import lxml.html


from rdflib import Graph
from rdflib import Literal, Namespace, URIRef
import bs4
import requests
import requests.exceptions

import six
from six import text_type as str

if six.PY3:
    from urllib.parse import quote, unquote 
else:
    from urllib import quote, unquote  # NOQA

# mine
from ferenda import util, errors, decorators
from ferenda import Describer, LayeredConfig, TripleStore, FulltextIndex, Document, DocumentEntry, NewsCriteria, TocCriteria, TocPageset, TocPage, DocumentStore
from ferenda.elements import AbstractElement, serialize, Body, Nav, Link, Section, Subsection, Subsubsection, Heading, UnorderedList, ListItem, Preformatted, Paragraph
from ferenda.elements.html import elements_from_soup
from ferenda.thirdparty import patch
__version__ = (1, 6)
__author__ = "Staffan Malmgren <staffan@tomtebo.org>"


class DocumentRepository(object):
    """Base class for downloading, parsing and generating HTML versions of a repository of documents.

    Start building your application by subclassing this class, and
    then override methods in order to customize the downloading,
    parsing and generation behaviour.

    :param \*\*kwargs: Any named argument overrides any
                   similarly-named configuration file parameter.
    
    Example:
    
    >>> class MyRepo(DocumentRepository):
    ...     alias="myrepo"
    ...
    >>> d = MyRepo(datadir="/tmp/ferenda")
    >>> d.store.downloaded_path("mybasefile").replace(os.sep,'/') == '/tmp/ferenda/myrepo/downloaded/mybasefile.html'
    True
    """

#    There are seven main entry points into the module, with the
#    following principal call chains:
#
#    download
#        do_download
#            download_single
#                downloaded_path
#                download_if_needed
#                remote_url
#                download_update_entry
#    parse
#        parsed_path
#        soup_from_basefile
#        parse_from_soup
#        render_xhtml
#
#    relate
#
#    generate
#        generated_file
#        prep_annotation_file
#            graph_to_annotation_file
#
#    toc
#        toc_query
#        toc_criteria
#            toc_predicates
#        toc_item
#
#    news
#        news_selections
#        news_selection
#            news_get_entry
#
#    frontpage_content

    ################
    # general class properties

    # FIXME: Duplicated in documentstore -- how do we unify?
    downloaded_suffix = ".html"
    """File suffix for the main document format. Determines the suffix
    of downloaded files."""

    # FIXME: Duplicated in documentstore -- how do we unify?
    storage_policy = "file"
    """Some repositories have documents in several formats, documents
    split amongst several files or embedded resources. If
    ``storage_policy`` is set to ``dir``, then each document gets its own
    directory (the default filename being ``index`` +suffix),
    otherwise each doc gets stored as a file in a directory with other
    files.  Affects
    :py:meth:`~ferenda.DocumentRepository.path` (and therefore
    all other ``*_path`` methods)"""

    alias = "base"
    """A short name for the class, used by the command line
    ``ferenda-build.py`` tool. Also determines where to store
    downloaded, parsed and generated files. When you subclass
    :py:class:`~ferenda.DocumentRepository` you *must* override
    this."""

    namespaces = ['rdf','rdfs','xsd','dct','skos','foaf','xhv','owl','prov','bibo']
    """The namespaces that are included in the XHTML and RDF files
    generated by :py:meth:`~ferenda.DocumentRepository.parse`. This
    can be a list of strings, in which case the strings are assumed to
    be well-known prefixes to established namespaces, or a list of
    (prefix, namespace) tuples. All well-known prefixes are available
    in :py:data:`ferenda.util.ns`."""
    
    ################
    # download() related class properties

    start_url = "http://example.org/"
    """The main entry page for the remote web store of documents. May
    be a list of documents, a search form or whatever. If it's
    something more complicated than a simple list of documents, you
    need to override :py:meth:`~ferenda.DocumentRepository.download`
    in order to tell which documents are to be downloaded."""

    document_url_template = "http://example.org/docs/%(basefile)s.html"
    """A string template for creating URLs for individual documents on
    the remote web server. Directly used by
    :py:meth:`~ferenda.DocumentRepository.remote_url` and indirectly
    by :py:meth:`~ferenda.DocumentRepository.download_single`."""

    document_url_regex = "http://example.org/docs/(?P<basefile>\w+).html"
    """A regex that matches URLs for individual documents -- the
    reverse of what
    :py:data:`~ferenda.DocumentRepository.document_url_template` is
    used for. Used by
    :py:meth:`~ferenda.DocumentRepository.download()` to find suitable
    links if :py:data:`~ferenda.DocumentRepository.basefile_regex`
    doesn't match. Must define the named group ``basefile`` using the
    ``(?P<basefile>...)`` syntax"""
    
    # matches "ID: foo/123" or "ID: Bar:Baz/Quux" but not "ID: Foo bar"
    basefile_regex = "^ID: ?(?P<basefile>[\w\d\:\/]+)$"
    """A regex for matching document names in link text, as used by
    :py:meth:`~ferenda.DocumentRepository.download()`. Must define a
    named group ``basefile``, just like
    :py:data:`~ferenda.DocumentRepository.document_url_template`."""

    ################
    # parse() specific class properties
    rdf_type = Namespace(util.ns['foaf'])['Document']
    """The RDF type of the documents you are handling (expressed as a
    :py:class:`rdflib.term.URIRef` object)."""

    source_encoding = "utf-8"
    """The character set that the source HTML documents use (if
    applicable)."""

    lang = "en"
    """The language which the source documents are assumed to be
    written in (unless otherwise specified), and the language which
    output document should use."""

    # css selectors, handled by BeautifulSoup's select() method
    parse_content_selector = "body"
    """CSS selector used to select the main part of the document
    content by the default
    :py:meth:`~ferenda.DocumentRepository.parse` implementation."""

    parse_filter_selectors = ["script"]
    """CSS selectors used to filter/remove certain parts of the
    document content by the default
    :py:meth:`~ferenda.DocumentRepository.parse` implementation."""

    ################
    # generate() specific class properties
    xslt_template = "res/xsl/generic.xsl"
    """A template used by
    :py:meth:`~ferenda.DocumentRepository.generate` to transform the
    XML file into browser-ready HTML. If your document type is
    complex, you might want to override this (and write your own XSLT
    transform). You should include ``base.xslt`` in that template,
    though."""

    sparql_annotations = "res/sparql/annotations.rq"
    """A template for annotations."""

    documentstore_class = DocumentStore
    
    def __init__(self, **kwargs):
        """See class docstring for constructor doc."""
        codedefaults = self.get_default_options()
        defaults = util.merge_dict_recursive(codedefaults, kwargs)
        self.config = LayeredConfig(defaults=defaults)
        # FIXME: Make it possible to provide an alternative (subclass
        # etc) to DocumentStore
        self.store = self.documentstore_class(self.config.datadir + os.sep + self.alias)
        # should documentstore have a connection to self, ie
        # self.store = DocumentStore(basedir, self) ?
        self.store.downloaded_suffix = self.downloaded_suffix
        self.store.storage_policy = self.storage_policy
        
        logname = self.alias
        # alternatively (nonambigious and helpful for debugging, but verbose)
        # logname = self.__class__.__module__+"."+self.__class__.__name__
        self.log = self._setup_logger(logname)

        self.ns = {}
        for ns in self.namespaces:
            if isinstance(ns,tuple):
                prefix,uri = ns
                self.ns[prefix]=Namespace(uri)
            else:
                prefix = ns
                # assume that any standalone prefix is well known
                self.ns[prefix] = Namespace(util.ns[prefix])

    def get_default_options(self):
        """Returns the class' configuration default configuration
        properties. These can be overridden by a configution file, or
        by named arguments to
        :py:meth:`~ferenda.DocumentRepository.__init__`. See
        :ref:`configuration` for a list of standard configuration
        properties (your subclass is free to define and use additional
        configuration properties).

        :returns: default configuration properties
        :rtype: dict
        """

        
        return {'loglevel': 'INFO',
                'datadir': 'data',
                'patchdir': 'patches',
                'force': False,
                'parseforce': False,
                'generateforce': False,
                'fsmdebug': False,
                'refresh': False,
                'lastdownload': None,
                'conditionalget': True,
                'url': 'http://localhost:8000/',
                'fulltextindex': False,
                'useragent': 'ferenda-bot',
                'storetype': 'SQLITE',
                'storelocation': 'data/ferenda.sqlite',
                'storerepository': 'ferenda',
                'indexlocation': 'data/whooshindex',
                'combineresources': False,
                'cssfiles': ['http://fonts.googleapis.com/css?family=Raleway:200,100',
                             'res/css/normalize.css',
                             'res/css/main.css',
                             'res/css/ferenda.css'],
                'jsfiles': ['res/js/jquery-1.9.0.js',
                            'res/js/modernizr-2.6.2-respond-1.1.0.min.js',
                            'res/js/ferenda.js']}

    def list_basefiles_for(self, action, basedir=None):
        """Get all available basefiles that can be used for the
        specified action.

        :param action: The action for which to get available
                       basefiles (``parse``, ``relate``, ``generate``
                       or ``news``)
        :type action: str
        :param basedir: The base directory in which to search for
                        available files. If not provided, defaults to
                        ``self.config.datadir``.
        :type basedir: str
        :returns: All available basefiles
        :rtype: generator
        """
        return self.store.list_basefiles_for(action,basedir)


    @classmethod
    def setup(cls, action, config):
        """Runs before any of the _all methods starts executing"""
        if hasattr(cls, action + "_all_setup"):
            cbl = getattr(cls, action + "_all_setup")
            return cbl(config)
 
    @classmethod
    def teardown(cls, action, config):
        """Runs after any of the _all methods has finished executing"""
        if hasattr(cls, action + "_all_teardown"):
            cbl = getattr(cls, action + "_all_teardown")
            return cbl(config)

    def get_archive_version(self,basefile):
        """Get a version identifier for the current version of the
        document identified by ``basefile``.

        The default implementation simply increments most recent
        archived version identifier, starting at "1". If versions in
        your docrepo are normally identified in some other way (such
        as SCM revision numbers, dates or similar) you should override
        this method to return those identifiers.

        :param basefile: The basefile of the document to archive
        :type basefile: str
        :returns: The version identifier for the current version of
                  the document.
        :rtype:   str
        """
        return str(len(list(self.store.list_versions(basefile)))+1)

    def context(self):
        """The context URI under which RDF statements should be
        stored.

        :returns: The context URI
        :rtype:   str
        """
        return "http://example.org/ctx/%s" % (self.alias)

    def qualified_class_name(self):
        """The qualified class name of this class

        :returns: class name (e.g. ``ferenda.DocumentRepository``)
        :rtype:   str
        """
        return self.__class__.__module__ + "." + self.__class__.__name__

    def canonical_uri(self, basefile):
        """The canonical URI for the document identified by ``basefile``.

        :returns: The canonical URI
        :rtype: str
        """
        # Note that there might not be a 1:1 mappning between
        # documents/basefiles and URIs -- don't know what we should do
        # in those cases.
        #
        # It might also be impossible to provide the canonical_uri
        # without actually parse()ing the document
        return "%sres/%s/%s" % (self.config.url, self.alias, basefile)

    def basefile_from_uri(self, uri):
        """The reverse of canonical_uri. Returns None if the uri doesn't map to a basefile in this repo."""
        if uri.startswith(self.config.url + "res/"):
            path = uri[len(self.config.url + "res/"):]
            if "/" in path:
                alias, basefile = path.split("/", 1)
                if alias == self.alias:
                    return basefile
        
        
    ################################################################
    #
    # STEP 1: Download documents from the web
    #
    ################################################################
    @decorators.action
    def download(self, basefile=None):
        """Downloads all documents from a remote web service.

        The default generic implementation assumes that all documents
        are linked from a single page (which has the url of
        :py:data:`~ferenda.DocumentRepository.start_url`), that they
        all have URLs matching the
        :py:data:`~ferenda.DocumentRepository.document_url_regex` or
        that the link text is always equal to basefile (as determined
        by :py:data:`~ferenda.DocumentRepository.basefile_regex`). If
        these assumptions don't hold, you need to override this
        method.

        If you do override it, your download method should read and set the
        ``lastdownload`` parameter to either the datetime of the last
        download or any other module-specific string (id number or
        similar).

        You should also read the ``refresh`` parameter. If it is
        ``True`` (the default), then you should call
        :py:meth:`~ferenda.DocumentRepository.download_single` for
        every basefile you encounter, even though they may already
        exist in some form on
        disk. :py:meth:`~ferenda.DocumentRepository.download_single`
        will normally be using conditional GET to see if there is a
        newer version available.

        See :ref:`implementing-download` for more details.

        :returns: True if any document was downloaded, False otherwise.
        :rtype: bool
        """

        if basefile:
            if self.document_url_template:
                return self.download_single(basefile)
            else:
                raise ValueError("Downloading single basefile '%s' not supported (no way to convert basefile to url)" % basefile)
        lastdownload = self.config.lastdownload
        if lastdownload:
            self.log.debug("download: Last download was at %s" % lastdownload)
        else:
            self.log.debug("download: Starting full download")
        # NOTE: This very generic implementation of download has no
        # use for lastdownload, as all the documents it can find are
        # the one linked from the start page. Therefore it's not used
        # for anything else than a diagnostic tool.

        refresh = self.config.refresh
        if refresh:
            self.log.debug("download: Refreshing all downloaded files")
        else:
            self.log.debug("download: Not re-downloading downloaded files")

        self.log.debug("Starting at %s" % self.start_url)
        # url_regex = self.document_url.replace("%s", "(.*)")
        updated = False

        # self.browser.open(self.start_url)
        resp = requests.get(self.start_url)
        tree = lxml.html.document_fromstring(resp.text)
        tree.make_links_absolute(self.start_url, resolve_base_href=True)
        if 'downloadmax' in self.config or 'FERENDA_DOWNLOADMAX' in os.environ:
            if 'downloadmax' in self.config:
                maxdoc = int(self.config.downloadmax)
            else:
                maxdoc = int(os.environ['FERENDA_DOWNLOADMAX'])
            self.log.info("Only downloading max %s documents" % maxdoc)
            links = islice(self.download_get_basefiles(tree.iterlinks()), maxdoc)
        else:
            links = self.download_get_basefiles(tree.iterlinks())
        for (basefile, link) in links:
            if (refresh or
                (not os.path.exists(self.store.downloaded_path(basefile)))):
                ret = self.download_single(basefile, link)
                updated = updated or ret

        self.config.lastdownload = datetime.now()
        return updated

    def download_get_basefiles(self, source):
        for (element, attribute, link, pos) in source:
            url = basefile = None

            # Two step process: First examine link text to see if
            # basefile_regex match. If not, examine link url to see
            # if document_url_regex
            if (self.basefile_regex and
                element.text and
                re.search(self.basefile_regex, element.text)):
                m = re.search(self.basefile_regex, element.text)
                yield(m.group("basefile"), link)
            elif self.document_url_regex and re.match(self.document_url_regex, link):
                m = re.match(self.document_url_regex, link)
                if m:
                    yield(m.group("basefile"), link)
                    

    def download_single(self, basefile, url=None):
        """Downloads the document from the web (unless explicitly
        specified, the URL to download is determined by
        :py:meth:`~ferenda.DocumentRepository.document_url` combined
        with basefile, the location on disk is determined by the
        function
        :py:meth:`~ferenda.DocumentRepository.downloaded_path()`).

        If the document exists on disk, but the version on the web is
        unchanged (determined using a conditional GET), the file on disk
        is left unchanged (i.e. the timestamp is not modified).

        :param basefile: The basefile of the document to download
        :param url: The URL to download (optional)
        :type basefile: string
        :type url: string or None
        :returns: True if the document was downloaded and stored on
                  disk, False if the file on disk was not updated.
        """
        if url is None:
            url = self.remote_url(basefile)

        updated = False
        created = False
        checked = True

        filename = self.store.downloaded_path(basefile)
        created = not os.path.exists(filename)
        # util.print_open_fds()
        if self.download_if_needed(url, basefile):
            if created:
                self.log.info("%s: downloaded from %s" % (basefile, url))
            else:
                self.log.info(
                    "%s: downloaded new version from %s" % (basefile, url))
            updated = True
        else:
            self.log.debug("%s: exists and is unchanged" % basefile)

        entry = DocumentEntry(self.store.documententry_path(basefile))
        now = datetime.now()
        entry.orig_url = url
        if created:
            entry.orig_created = now
        if updated:
            entry.orig_updated = now
        if checked:
            entry.orig_checked = now
        entry.save()

        return updated


    def _addheaders(self, filename=None):
        headers = {"User-agent": self.config.useragent}
        if filename:
            if os.path.exists(filename + ".etag"):
                headers["If-none-match"] = util.readfile(filename + ".etag")
            elif os.path.exists(filename):
                stamp = os.stat(filename).st_mtime
                headers["If-modified-since"] = format_http_date(stamp)
        return headers

    def download_if_needed(self, url, basefile, archive=True, filename=None):
        """Downloads a remote resource to a local file. If a different
        version is already in place, archive that old version.

        :param      url: The url to download
        :type       url: str
        :param basefile: The basefile of the document to download
        :type  basefile: str
        :param  archive: Whether to archive existing older versions of
                         the document, or just delete the previously
                         downloaded file.
        :type   archive: bool
        :param filename: The filename to download to. If not provided, the filename is derived from the supplied basefile
        :type  filename: str
        :returns:        True if the local file was updated (and archived),
                         False otherwise.
        :rtype:          bool
        """
        if not filename:
            filename = self.store.downloaded_path(basefile)
        if self.config.conditionalget:
            # sets if-none-match or if-modified-since (in that order) headers
            headers = self._addheaders(filename)
        else:
            headers = self._addheaders()

        fileno, tmpfile = mkstemp()
        fp = os.fdopen(fileno)
        fp.close()

        # Since this part, containing the actual HTTP request call, is
        # called repeatedly, we take extra precautions in the event of
        # temporary network failures etc. Try 5 times with 1 second
        # pause inbetween before giving up.
        fetched = False
        remaining_attempts = 5
        try: 
            while (not fetched) and (remaining_attempts > 0):
                try:
                    response = requests.get(url, headers=headers)
                    fetched = True
                except requests.exceptions.ConnectionError as e:
                    self.log.warning("Failed to fetch %s: error %s (%s remaining attempts)" % (url, e, remaining_attempts))
                    # close session in hope that this rectifies things
                    s = requests.Session()
                    s.close()
                    remaining_attempts -= 1
                    time.sleep(1)
                           
            if not fetched:
                self.log.error("Failed to fetch %s, giving up" % url)
                return False
        # handles other errors except ConnectionError 
        except requests.exceptions.RequestException as e:
            self.log.error("Failed to fetch %s: error %s" % (url, e))
            raise e

        if response.status_code == 304:
            self.log.debug("%s: 304 Not modified" % url)
            return False  # ie not updated
        elif response.status_code > 400:
            response.raise_for_status()


        with open(tmpfile, "wb") as fp:
            fp.write(response.content)

        if not os.path.exists(filename):
            util.robust_rename(tmpfile, filename)
            updated = True
        elif not filecmp.cmp(tmpfile, filename, shallow=False):
            version = self.get_archive_version(basefile)
            self.store.archive(basefile, version)
            util.robust_rename(tmpfile, filename)
            updated = True
        else:
            updated = False

        if updated:
            # OK we have a new file in place. Now examine the
            # headers to find if we should change file
            # modification time (last-modified) and/or create a
            # .etag file (etag)
            if response.headers["last-modified"]:
                mtime = calendar.timegm(util.parse_rfc822_date(
                        response.headers["last-modified"]).timetuple())
                # FIXME: set a orig_lastmodified on DocumentEntry
                os.utime(filename, (time.time(), mtime))
            # FIXME: set this on DocumentEntry (orig_etag) instead
            # of writing a separate file
            if response.headers["etag"]:
                with open(filename + ".etag", "w") as fp:
                    fp.write(response.headers["etag"])
        return updated

    def remote_url(self, basefile):
        """Get the URL of the source document at it's remote location,
        unless the source document is fetched by other means or if it
        cannot be computed from basefile only. The default
        implementation uses
        :py:data:`~ferenda.DocumentRepository.document_url_template`
        to calculate the url.

        Example:

        >>> d = DocumentRepository()
        >>> d.remote_url("123/a") == 'http://example.org/docs/123/a.html'
        True
        >>> d.document_url_template = "http://mysite.org/archive/%(basefile)s/"
        >>> d.remote_url("123/a") == 'http://mysite.org/archive/123/a/'
        True
        
        :param basefile: The basefile of the source document
        :type basefile: str
        :returns: The remote url where the document can be fetched, or ``None``.
        :rtype: str
        """
        return self.document_url_template % {'basefile':quote(basefile)}

    def generic_url(self, basefile, maindir, suffix):
        """
        Analogous to
        :py:meth:`ferenda.DocumentStore.path`, calculate
        the full local url for the given basefile and stage of
        processing.

        :param basefile: The basefile for which to calculate the local url
        :type  basefile: str
        :param  maindir: The processing stage directory (normally
                         ``downloaded``, ``parsed``, or ``generated``)
        :type   maindir: str
        :param   suffix: The file extension including period (i.e. ``.txt``,
                         not ``txt``)
        :type    suffix: str
        :returns: The local url
        :rtype: str
        """
        path = "%s/%s/%s%s" % (self.alias, maindir, basefile, suffix)
        return self.config.url + path


    def downloaded_url(self, basefile):
        """Get the full local url for the downloaded file for the
        given basefile.
        
        :param basefile: The basefile for which to calculate the local url
        :type  basefile: str
        :returns: The local url
        :rtype: str
        """

        return self.generic_url(basefile, 'downloaded', self.downloaded_suffix)


    ################################################################
    #
    # STEP 2: Parse the downloaded data into a structured XML document
    # with RDFa metadata.
    #
    ################################################################

    @classmethod
    def parse_all_setup(cls, config):
        """
        Runs any action needed prior to parsing all documents in a
        docrepo. The default implementation does nothing.

        .. note::

           This is a classmethod for now (and that's why a config
           object is passsed as an argument), but might change to a
           instance method.
        """
        pass

    @classmethod
    def parse_all_teardown(cls, config):
        """ 
        Runs any cleanup action needed after parsing all documents in
        a docrepo. The default implementation does nothing.

        .. note::

           Like :py:meth:`~ferenda.DocumentRepository.parse_all_setup`
           this might change to a instance method.
        """
        pass

    @decorators.action
    @decorators.managedparsing
    def parse(self, doc):
        """Parse downloaded documents into structured XML and RDF.
       
        It will also save the same RDF statements in a separate
        RDF/XML file.

        You will need to provide your own parsing logic, but often
        it's easier to just override parse_from_soup (assuming your
        indata is in a HTML format parseable by BeautifulSoup) and let
        the base class read and write the files.

        If your data is not in a HTML format, or BeautifulSoup is not
        an appropriate parser to use, override this method.

        :param doc: The document object to fill in.
        :type  doc: ferenda.Document
        """
        soup = self.soup_from_basefile(doc.basefile)
        self.parse_metadata_from_soup(soup, doc)
        self.parse_document_from_soup(soup, doc)

    def patch_if_needed(self, basefile, text):
        # 1. do we have a patch?
        patchstore = self.documentstore_class(self.config.patchdir + os.sep + self.alias)
        patchpath = patchstore.path(basefile, "patches", ".patch")
        descpath =  patchstore.path(basefile, "patches", ".desc")
        if os.path.exists(patchpath):
            # 4. make sure error msgs from the patch modules are
            # available if we fail.
            from io import StringIO
            pbuf = StringIO()
            plog = logging.getLogger('ferenda.thirdparty.patch')
            plog.setLevel(logging.WARNING)
            plog.addHandler(logging.StreamHandler(pbuf))

            # 2. read and parse it
            with open(patchpath) as fp:
                ps = patch.PatchSet()
                success = ps.parse(fp)
            if not success:
                raise errors.PatchError("Patch %s couldn't be parsed: %s" % (patchpath, pbuf.getvalue()))
            assert len(ps.items) == 1
            # 3. Create a temporary file with the file to be patched
            # open tmpfile
            fileno, tmpfile = mkstemp()
            fp = os.fdopen(fileno, "wb")
            # dump text to tmpfile
            fp.write(text.encode("utf-8")) # assume that patches are also in utf-8 
            fp.close()
            ps.items[0].source = tmpfile
            # 5. now do the patching
            success = ps.apply()
            if not success:
                raise errors.PatchError("Patch %s failed: %s" % (patchpath, pbuf.getvalue()))
            else:
                # 6. Finally get a patch description 
                if ps.items[0].hunks[0].desc:
                    desc = ps.items[0].hunks[0].desc
                elif os.path.exists(descpath):
                    desc = util.readfile(descpath)
                else:
                    desc = "(No patch description available)"
                return util.readfile(tmpfile), desc
        else:
            return (text, None)
                        
                        
    def parse_metadata_from_soup(self, soup, doc):
        """
        Given a BeautifulSoup document, retrieve all document-level
        metadata from it and put it into the given ``doc`` object's
        ``meta`` property.

        .. note::

           The default implementation sets ``rdf:type``,
           ``dct:title``, ``dct:identifier`` and
           ``prov:wasGeneratedBy`` properties in ``doc.meta``, as well
           as setting the language of the document in ``doc.lang``.
        
        :param soup: A parsed document
        :type  soup: bs4.BeautifulSoup
        :param  doc: Our document
        :type   doc: ferenda.Document
        :returns: None
        """
        # set rdf:type and dct:identifier of document automatically?
        # set title and other simple things
        # Default language unless we can find out from source doc?
        # Check html/@xml:lang || html/@lang
        root = soup.find('html')
        if root:
            try:
                doc.lang = root['xml:lang']
            except KeyError:
                try:
                    doc.lang = root['lang']
                except KeyError:
                    doc.lang = self.lang
        else:
            doc.lang = self.lang

        try:
            title = soup.find('title').string
        except AttributeError:
            title = None
        # create document-level metadata
        d = Describer(doc.meta, doc.uri)
        d.rdftype(self.rdf_type)
        if title:
            d.value(self.ns['dct'].title, Literal(title, lang=doc.lang))
        d.value(self.ns['dct'].identifier, doc.basefile)
        d.value(self.ns['prov'].wasGeneratedBy, self.qualified_class_name())

    def parse_document_from_soup(self, soup, doc):
        """
        Given a BeautifulSoup document, convert it into the provided
        ``doc`` object's ``body`` property as suitable
        :py:mod:`ferenda.elements` objects.

        .. note::

           The default implementation respects
           :py:data:`~ferenda.DocumentRepository.parse_content_selector`
           and
           :py:data:`~ferenda.DocumentRepository.parse_filter_selectors`.

        :param soup: A parsed document
        :type  soup: bs4.BeautifulSoup
        :param  doc: Our document
        :type   doc: ferenda.Document
        :returns: None
        """

        soups = soup.select(self.parse_content_selector)
        if len(soups) == 0:
            raise errors.ParseError("%s: parse_content_selector %r matches nothing" %
                             (doc.basefile,self.parse_content_selector))
        if len(soups) > 1:
            self.log.warn("%s: parse_content_selector %r matches more than one tag" % 
                          (doc.basefile,self.parse_content_selector))
        soup = soups[0]
        for filter_selector in self.parse_filter_selectors:
            for tag in soup.select(filter_selector):
                # tag.decompose()
                tag.extract() # decompose fails on some trees

        doc.body = elements_from_soup(soup)

    def soup_from_basefile(self, basefile, encoding='utf-8', parser='lxml'):
        """
        Load the downloaded document for basefile into a BeautifulSoup object

        :param basefile: The basefile for the downloaded document to parse
        :type  basefile: str
        :param encoding: The encoding of the downloaded document
        :type  encoding: str
        :returns: The parsed document
        :rtype: bs4.BeautifulSoup

        .. note::

           Helper function. You probably don't need to override it.
        """
        filename = self.store.downloaded_path(basefile)
        with codecs.open(filename, encoding=encoding, errors='replace') as fp:
            soup = bs4.BeautifulSoup(fp.read(), parser)
        return soup

    def make_document(self, basefile=None):
        """
        Create a :py:class:`~ferenda.Document` objects with basic
        initialized fields.

        .. note::

           Helper method used by the
           :py:func:`~ferenda.decorators.makedocument` decorator.
        
        :param basefile: The basefile for the document
        :type  basefile: str
        :rtype: ferenda.Document
        """
        doc = Document()
        doc.basefile = basefile
        doc.meta = self.make_graph()
        if basefile:
            doc.basefile = basefile
            doc.uri = self.canonical_uri(basefile)
        return doc

    def make_graph(self):
        """
        Initialize a rdflib Graph object with proper namespace prefix
        bindings (as determined by
        :py:data:`~ferenda.DocumentRepository.namespaces`)

        :rtype: rdflib.Graph
        """
        g = Graph()
        for prefix, uri in list(self.ns.items()):
            # print "Binding %s to %s" % (prefix,uri)
            g.bind(prefix, uri)
        return g
    
    def create_external_resources(self, doc):
        """Optionally create external files that go together with the
        parsed file (stylesheets, images, etc).

        The default implementation does nothing.

        :param doc: The document
        :type  doc: ferenda.Document
        """
        pass

    def list_external_resources(self, basefile):
        """Return a list of external files that parse (through
        create_external_resources or otherwise) has created.

        The default implementation returns an empty list.

        .. note::

           This is probably obsoleted by
           :py:meth:`~ferenda.DocumentRepository.list_attachments`

        :param doc: The document to list external files for
        :type  doc: ferenda.Document
        :returns: External files created by :py:meth:`~ferenda.DocumentRepository.parse`
        :rtype: list

        """
        return []

    def render_xhtml(self, doc, outfile):
        """Renders the parsed object structure as a XHTML file with
        RDFa attributes (also returns the same XHTML as a string).

        :param doc: The document to render
        :type  doc: ferenda.Document
        :param outfile: The file name for the XHTML document
        :type  outfile: str
        :returns: The XHTML document
        :rtype: str
        """
        XML_LANG = "{http://www.w3.org/XML/1998/namespace}lang"

        def render_head(g, uri):
            children = []
            # we sort to get a predictable order (by predicate, then by object)
            for (subj, pred, obj) in sorted(g, key=lambda t:(t[1],t[2])):
                if str(subj) != uri and str(obj) != uri:
                    self.log.warning("%s != %s" % (subj, uri))
                    continue
                if g.qname(pred) == "dct:title":
                    if obj.language:
                        children.append(
                            E.title({'property': 'dct:title', }, str(obj)))
                    else:
                        children.append(E.title({'property':
                                        'dct:title', XML_LANG: ""}, str(obj)))
                        
                elif isinstance(obj, URIRef) and str(subj) == uri:
                    children.append(E.link({'rel': g.qname(pred),
                                            'href': str(obj)}))
                elif isinstance(obj, URIRef):
                    children.append(E.link({'rev': g.qname(pred),
                                            'href': str(subj)}))
                elif obj.datatype:
                    children.append(E.meta({'property': g.qname(pred),
                                            'datatype': g.qname(obj.datatype),
                                            'content': str(obj)}))
                elif obj.language:
                    children.append(E.meta({'property': g.qname(pred),
                                            XML_LANG: obj.language,
                                            'content': str(obj)}))
                else:
                    children.append(E.meta({'property': g.qname(pred),
                                            'content': str(obj),
                                            XML_LANG: ''}))

            return E.head({'about': uri}, *children)

        nsmap = {None: "http://www.w3.org/1999/xhtml"}
        for prefix, namespace in self.ns.items():
            nsmap[prefix] = str(namespace)
        E = ElementMaker(namespace="http://www.w3.org/1999/xhtml",
                         nsmap=nsmap)
        headcontent = render_head(doc.meta, doc.uri)
        bodycontent = doc.body.as_xhtml(doc.uri)
        if doc.lang:
            htmlattrs = {XML_LANG: doc.lang}
        else:
            htmlattrs = {}
        xhtmldoc = E.html(
            htmlattrs,
            headcontent,
            bodycontent,
        )
        doctype = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN" '
                   '"http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd">')
        res = etree.tostring(xhtmldoc,
                             pretty_print=True,
                             xml_declaration=True,
                             encoding='utf-8',
                             doctype=doctype)
        fileno, tmpfile = mkstemp()
        fp = os.fdopen(fileno)
        fp.close()
        with open(tmpfile, "wb") as fp:
            fp.write(res)
        util.replace_if_different(tmpfile, outfile)
        return res


    def parsed_url(self, basefile):
        """Get the full local url for the parsed file for the
        given basefile.
        
        :param basefile: The basefile for which to calculate the local url
        :type  basefile: str
        :returns: The local url
        :rtype: str
        """
        return self.generic_url(basefile, 'parsed', '.xhtml')


    def distilled_url(self, basefile):
        """Get the full local url for the distilled RDF/XML file for the
        given basefile.
        
        :param basefile: The basefile for which to calculate the local url
        :type  basefile: str
        :returns: The local url
        :rtype: str
        """
        return self.generic_url(basefile, 'distilled', '.rdf')

    ################################################################
    #
    # STEP 3: Extract and store the RDF data
    #
    ################################################################
    @classmethod
    def relate_all_setup(cls, config):
        """Runs any cleanup action needed prior to relating all documents in
        a docrepo. The default implementation clears the corresponsing
        context (see :py:meth:`~ferenda.DocumentRepository.context`)
        in the triple store.

        .. note::

           Like :py:meth:`~ferenda.DocumentRepository.parse_all_setup`
           this might change to a instance method.

        Returns False if no relation needs to be done (as determined
        by the timestamp on the dump nt file)

        """
        # FIXME: duplicate of code in context() (which is not a classmethod)
        context =  "http://example.org/ctx/%s" % (cls.alias)

        # FIXME: this blows away the entire triplestore content for a
        # particular context, making it impossible to update just some
        # data. One way would be to check the timestamp on dump.nt,
        # and if it's newer than all distilled files (how to get them,
        # given that this is a classmethod?), don't clear the
        # triplestore (and don't run any relate() method)

        docstore = DocumentStore(config.datadir + os.sep + cls.alias)
        dump = docstore.path("dump", "distilled", ".nt")

        if not config.force: # check if we need to work at all
            distilled = []
            for basefile in docstore.list_basefiles_for("relate"):
                distilled.append(docstore.distilled_path(basefile))
            if util.outfile_is_newer(distilled,dump):
                return False
            
        store = TripleStore(
            config.storelocation, config.storerepository, context, config.storetype)
        log = cls._setup_logger(cls.alias)
        log.info("Clearing context %s at repository %s" % (
            context, config.storerepository))
        store.clear()
        # we can't clear the whoosh index in the same way as one index
        # contains documents from all repos. But we need to be able to
        # clear it from time to time, maybe with a clear/setup method
        # in manager? Or fulltextindex maybe could have a clear method
        # that removes all documents for a particular repo?
        return True

        
    @classmethod
    def relate_all_teardown(cls, config):
        """ 
        Runs any cleanup action needed after relating all documents in
        a docrepo. The default implementation does nothing.

        .. note::

           Like :py:meth:`~ferenda.DocumentRepository.parse_all_setup`
           this might change to a instance method.
        """
        # FIXME: should use context(), but that's a instancemethod
        context =  "http://example.org/ctx/%s" % (cls.alias)
        store = TripleStore(
            config.storelocation, config.storerepository, context, config.storetype)
        docstore = DocumentStore(config.datadir + os.sep + cls.alias)
        dump = docstore.path("dump", "distilled", ".nt")
        log = cls._setup_logger(cls.alias)
        log.info("Dumping triples from context %s at repository %s to %s" % (
            context, config.storerepository, dump))
        util.ensure_dir(dump)
        store.get_serialized_file(dump, format="nt")

    def relate(self, basefile, otherrepos=[]):
        self.relate_triples(basefile)
        # FIXME: How should we pass in (or create) a list if
        # instantiated repositories?  When using API, the caller must
        # be able to create and pass the list, eg repos=[] as method
        # signature When using manager, we'll probably have to do some
        # specialcase in the run() method (like for
        # makeresources/runserver, but different -- in particular, one
        # instance of every registered repo should be created).

        # When otherrepos = [], should we still provide self as one repo? Yes.
        if otherrepos == []:
            otherrepos.append(self)
        
        self.relate_dependencies(basefile, otherrepos)
        if self.config.fulltextindex:
            self.relate_fulltext(basefile)

    def _get_triplestore(self):
        if not hasattr(self,'_triplestore'):
            self._triplestore = TripleStore(self.config.storelocation,
                                            self.config.storerepository,
                                            self.context(),
                                            self.config.storetype)
        return self._triplestore
        
    def relate_triples(self,basefile):
        """Insert the (previously distilled) RDF statements into the
        triple store.

        :param basefile: The basefile for the document containing the
                         RDF statements.
        :type  basefile: str                        
        :returns: None
        """
        with util.logtime(self.log.debug,
                          "%(basefile)s: Added %(rdffile)s to %(triplestore)s in %(elapsed).3f sec",
                          {'basefile': basefile,
                           'rdffile': self.store.distilled_path(basefile),
                           'triplestore': self.config.storelocation}):
            data = open(self.store.distilled_path(basefile)).read()
            self._get_triplestore().add_serialized(data, format="xml")

    def _get_fulltext_indexer(self, batchoptimize=False):
        if not hasattr(self, '_fulltextindexer'):
            self._fulltextindexer = FulltextIndex(self.config.indexlocation)
            if hasattr(self.config, 'all'):
                self._fulltextindexer._batchwriter = True
            
        return self._fulltextindexer

    def relate_dependencies(self, basefile, repos=[]):
        values = {'basefile':basefile,
                  'deps':0}
        with util.logtime(self.log.debug,
                          "%(basefile)s: Registered %(deps)s dependencies in %(elapsed).3f sec",
                          values):
                          
            with self.store.open_distilled(basefile) as fp:
                g = Graph().parse(fp, format="xml")
                for (s,p,o) in g:
                    # for each URIRef in graph
                    if isinstance(o,URIRef):
                        for repo in repos:
                            # find out if any docrepo can handle it
                            dep_basefile = repo.basefile_from_uri(str(o))
                            if dep_basefile:
                                # if so, add to that repo's dependencyfile
                                repo.add_dependency(dep_basefile,
                                                    self.store.parsed_path(basefile))
                                values['deps'] += 1
        return values['deps']

    def add_dependency(self, basefile, dependencyfile):
        present = False
        if os.path.exists(self.store.dependencies_path(basefile)):
            with self.store.open_dependencies(basefile) as fp:
                for line in fp:
                    if line.strip() == dependencyfile:
                        present = True
        if not present:
            with self.store.open_dependencies(basefile, "a") as fp:
                fp.write(dependencyfile+"\n")

        return not present # return True if we added something, False otherwise

    def relate_fulltext(self,basefile):
        """Index the text of the document into fulltext index.
        
        :param basefile: The basefile for the document to be indexed.
        :type  basefile: str                        
        :returns: None
        """
        values = {'basefile':basefile,
                  'resources':0,
                  'words':0}
        with util.logtime(self.log.debug,
                          "%(basefile)s: Added %(resources)s resources (%(words)s words) to fulltext index in %(elapsed).3f s", values):
            indexer  = self._get_fulltext_indexer()
            tree = etree.parse(self.store.parsed_path(basefile))
            g = Graph()
            desc = Describer(g.parse(self.store.distilled_path(basefile)))
            dct = self.ns['dct']

            for resource in tree.findall(".//*[@about]"):
                if resource.tag == "{http://www.w3.org/1999/xhtml}head":
                    continue
                about = resource.get('about')
                desc.about(about)
                plaintext = self._extract_plaintext(resource)
                l = desc.getvalues(dct.title)
                title = str(l[0]) if l else None
                l = desc.getvalues(dct.identifier)
                identifier = str(l[0]) if l else None
                indexer.update(uri=about,
                               repo=self.alias, 
                               basefile=basefile,
                               title=title,
                               identifier=identifier,
                               text=plaintext)
                values['resources'] += 1
                values['words'] += len(plaintext.split())

            indexer.commit() # NB: Destroys indexer._writer


    def _extract_plaintext(self,node):
        # helper to extract any text from a elementtree node,
        # excluding subnodes that are resources themselves (ie they
        # have an @about node)
        plaintext = node.text if node.text else ""
        for subnode in node:
            if not subnode.get('about'):
                plaintext += self._extract_plaintext(subnode)
        if node.tail:
            plaintext += node.tail
        # append trailing space for block-level elements (including
        # <br>, <img> and some others that formally are inline
        # elements)
        trailspace = "" if node.tag in ("a" "b","i","span") else " "
        return plaintext.strip()+trailspace
    ################################################################
    #
    # STEP 4: Generate browser-ready HTML with navigation panels,
    # information about related documents and so on.
    #
    ################################################################
    @classmethod
    def generate_all_setup(cls, config):
        """ 
        Runs any action needed prior to generating all documents in a
        docrepo. The default implementation does nothing.

        .. note::

           Like :py:meth:`~ferenda.DocumentRepository.parse_all_setup`
           this might change to a instance method.
        """
        pass

    @classmethod
    def generate_all_teardown(cls, config):
        """ 
        Runs any cleanup action needed after generating all documents
        in a docrepo. The default implementation does nothing.

        .. note::

           Like :py:meth:`~ferenda.DocumentRepository.parse_all_setup`
           this might change to a instance method.
        """
        pass

    @decorators.action
    def generate(self, basefile):
        """Generate a browser-ready HTML file from structured XML and RDF.

        Uses the XML and RDF files constructed by
        :py:meth:`ferenda.DocumentRepository.parse`.

        The generation is done by XSLT, and normally you won't need to
        override this, but you might want to provide your own xslt
        file and set
        :py:data:`ferenda.DocumentRepository.xslt_template` to the
        name of that file.

        If you want to generate your browser-ready HTML by any other
        means than XSLT, you should override this method.

        :param basefile: The basefile for which to generate HTML
        :type  basefile: str
        :returns: None
        """
        with util.logtime(self.log.info, "%(basefile)s OK (%(elapsed).3f sec)",
                          {'basefile': basefile}):
            infile = self.store.parsed_path(basefile)
            annotations = self.store.annotation_path(basefile)
            if os.path.exists(self.store.dependencies_path(basefile)):
                dependencies = util.readfile(self.store.dependencies_path(basefile)).split("\n")
            else:
                dependencies = []
            dependencies.extend((infile,annotations))

            outfile = self.store.generated_path(basefile)
            force = (self.config.force or
                     self.config.generateforce)
            if not force and util.outfile_is_newer(dependencies, outfile):
                self.log.debug("%s: Skipped", basefile)
                return
            self.log.debug("%s: Starting", basefile)
            xsltdir = self.setup_transform_templates(os.path.dirname(self.xslt_template))
            xsltfile = xsltdir + os.sep + os.path.basename(self.xslt_template)
            with util.logtime(self.log.debug,
                              "%(basefile)s get_transform_configuration in %(elapsed).3f sec",
                              {'basefile': basefile}):
                params = self.get_transform_configuration(xsltdir,outfile)

            assert 'configurationfile' in params
            # The actual function code
            with util.logtime(self.log.debug,
                              "%(basefile)s prep_annotation_file in %(elapsed).3f sec",
                              {'basefile': basefile}):
                annotation_file = self.prep_annotation_file(basefile)

            if annotation_file:
                relpath = os.path.relpath(annotation_file,
                                          os.path.dirname(xsltfile))
                # NOTE: Even on Win32, lxml needs to have this path using
                # unix separators, i.e. / instead of the native \
                relpath = relpath.replace("\\","/")
                params['annotationfile'] = XSLT.strparam(relpath)
            with util.logtime(self.log.debug,
                              "%(basefile)s transform_html %(elapsed).3f",
                              {'basefile': basefile}):
                self.transform_html(xsltfile, infile, outfile, params)

            # At this point, outfile may appear untouched if it already
            # existed and wasn't actually changed. But this will cause the
            # above outfile_is_newer check to fail next time around. Also,
            # the docentry.updated parameter will be incosistent with the
            # timestamp on the file. What to do?
            os.utime(outfile, None) # update access/modified timestamp 
            now = datetime.now()
            docentry = DocumentEntry(self.store.documententry_path(basefile))
            if not docentry.published:
                docentry.published = now
            docentry.updated = now
            docentry.save()

    def transform_html(self, stylesheet, infile, outfile,
                       parameters={},
                       format=True,
                       xinclude=False):
        """Creates browser-ready HTML5 from a basic XHTML+RDFa file
        using a XSLT transform.

        :param stylesheet: the filename of the XSLT stylesheet to use
        :type  stylesheet: string
        :param infile:     The filename of the basic XHTML+RDFa file to be
                           transformed
        :type  infile:     string
        :param outfile:    The filename of the created HTML5 file
        :type  outfile:    string
        :param parameters: Any parameters passed to the XSLT stylesheet (see
                           :py:meth:`~ferenda.DocumentRepository.get_transform_configuration`)
        :type  parameters: dict
        :param format:     Whether to format/indent the resulting outfile
        :type  format:     bool
        :param xinclude:   Whether to process xinlude directives in the infile
        :type  xinclude:   bool
        :returns:          True if the transform resulted in a new or updated
                           outfile, False if the result was identical
                           to the previously existing outfile.
        :rtype:            bool                   
        """
        assert not xinclude, "xinclude not supported yet"
        # Open the XSLT stylesheet, either as a normal file
        # (user-provided) or a package resource (ferenda built-in)
        # FIXME: load-path mechanism (cf manager.makeresources())?
        if os.path.exists(stylesheet):
            fp = open(stylesheet)
        elif pkg_resources.resource_exists('ferenda',stylesheet): # prefix stylesheet with 'res/xsl'?
            fp = pkg_resources.resource_stream('ferenda',stylesheet)
        else:
            raise ValueError("Stylesheet %s not found" % stylesheet)
        parser = etree.XMLParser(remove_blank_text=format)
        xsltree = etree.parse(fp,parser)
        fp.close()
        transform = etree.XSLT(xsltree)
        with open(infile) as fp:
            intree = etree.parse(fp,parser)
        try:
            outtree = transform(intree,**parameters)
            
        except etree.XSLTApplyError as e:
            raise errors.TransformError(str(e.error_log))
        if len(transform.error_log) > 0:
            raise errors.TransformError(str(transform.error_log))

        res = etree.tostring(outtree,pretty_print=format).strip()

        if format:
            bytefp = BytesIO(res)
            parser = etree.XMLParser(remove_blank_text=True)
            res = etree.tostring(etree.parse(bytefp,parser),pretty_print=True)
        
        fileno, tmpfile = mkstemp()
        fp = os.fdopen(fileno)
        fp.close()

        if res.startswith(b"<remove-this-tag>"):
            res = b"<!DOCTYPE html>\n"+res[17:-19].strip()

        with open(tmpfile,"wb") as fp:
            fp.write(res)

        util.ensure_dir(outfile)
        return util.replace_if_different(tmpfile,outfile)

    # xsltpath = os.path.join(os.curdir,'../ferenda',self.xslt_template)
    def get_transform_configuration(self, xsltdir, outfile=None):
        """
        Set up a dict of parameters pointing to the configuration XML
        file needed for XSLT transform.

        .. note::

           Maybe this should be an internal method.
        
        :param xsltdir: path to the directory where the root xslt file is stored
        :type  xsltdir: str
        :param outfile: path to the planned file resulting from the XSLT transfomrm
        :type  outfile: str
        :returns: The path to the resources.xml file, wrapped through lxml.etree.XSLT.strparam and put in a a dict
        :rtype: dict
        """
        assert os.path.isdir(xsltdir), "%s does not exist (or is not a directory)" % xsltdir
        params = {}
        conffile = os.sep.join([self.config.datadir,'rsrc','resources.xml'])
        if os.path.exists(conffile):
            if outfile:
                # We should maybe also detect if stylesheet[@href] and
                # script[@src] point correctly, and if not, create a
                # new version of configurationfile where os.relpath
                # has been applied to them.
                tree = etree.parse(conffile)
                assert outfile.startswith(self.config.datadir), "outfile %s not under datadir %s" % (outfile, self.config.datadir)
                # "datadir/foo/bar/baz.html" -> "foo/bar"
                # "/var/folders/sy/r4f/T/tmpcRojl/foo/bar/baz.html" -> "foo/bar"p
                relative_outfile = outfile[len(self.config.datadir)+1:]
                if os.sep in relative_outfile:
                    outdir = relative_outfile.rsplit(os.sep,1)[0]
                else:
                    outdir = ""
                for node in tree.findall("stylesheets/link"):
                    if not (re.match("https?://", node.get('href'))):
                        node.set('href', os.path.relpath(node.get('href'),outdir).replace(os.sep,"/"))
                for node in tree.findall("javascripts/script"):
                    if not (re.match("https?://", node.get('src'))):
                        node.set('src', os.path.relpath(node.get('src'),outdir).replace(os.sep,"/"))
                depth = tree.find("stylesheets/link").get('href').count('..')
                if depth > 0:
                    # create a new file
                    (base, ext) = os.path.splitext(conffile)
                    modfile = base + ("-depth-%s" % depth) + ext
                    if not util.outfile_is_newer([conffile], modfile):
                        tree.write(modfile)
                    conffile = modfile
            relpath = os.path.relpath(conffile,xsltdir).replace(os.sep,"/")
            params['configurationfile'] = XSLT.strparam(relpath)
            # params['configurationfile_plain'] = relpath
            # params['configurationfile'] = relpath

        return params

    _transform_resourcedir=None
    def setup_transform_templates(self, xsltdir):
        """Unpack/extract all XSLT files and other resources needed to
        for the XSLT transform, if needed (which is the case if
        ferenda is distributed as an egg, i.e. all files are contained
        in a zip file).

        :param xsltdir: path to the directory where the root xslt file is stored
        :type  xsltdir: str
        :returns: The path to extracted files
        :rtype: str
        """
        # Unpack/extract all the files (NB: if not using zip/eggs just
        # return existing filesystem path)
        if not self._transform_resourcedir:
            for f in pkg_resources.resource_listdir('ferenda',xsltdir):
                p = pkg_resources.resource_filename('ferenda', xsltdir+"/"+f)
                # print("extracted %s/%s to %s" % (xsltdir,f,p))
            self._transform_resourcedir = os.path.dirname(p)
        return self._transform_resourcedir

    def prep_annotation_file(self, basefile):
        """Helper function used by :py:meth:`generate` -- prepares a RDF/XML file
        containing statements that in some way annotates the
        information found in the document that generate handles, like
        URI/title of other documents that refers to this one.

        .. note::

           This does not yet have a generic implementation.
         
        :param basefile: The basefile for which to collect annotating
        statements.
        :type basefile: str
        :returns: The full path to the prepared RDF/XML file
        :rtype: str
        """
        graph = self.construct_annotations(self.canonical_uri(basefile))
        if graph:
            with self.store.open_annotation(basefile,"w") as fp:
                fp.write(self.graph_to_annotation_file(graph))
            return self.store.annotation_path(basefile)

    def construct_annotations(self, uri):
        query_template = self.sparql_annotations
        # if self.config.storetype in ("SLEEPYCAT", "SQLITE"):
        #     query_template = "%s.sparql10%s" % os.path.splitext(query_template)
        if os.path.exists(query_template):
            fp = open(query_template)
        elif pkg_resources.resource_exists('ferenda',query_template):
            fp = pkg_resources.resource_stream('ferenda',query_template)
        else:
            raise ValueError("query template %s not found" % query_template)
        params = {'uri': uri}
        sq = fp.read().decode('utf-8') % params
        fp.close()
        if self.config.storelocation:
            store = TripleStore(self.config.storelocation,
                                self.config.storerepository,
                                None, # self.context(),
                                self.config.storetype)
            res = store.construct(sq)
            if self.config.storetype in ("SLEEPYCAT", "SQLITE"):
                store.graph.close()
            return res

    # helper for the prep_annotation_file helper -- it expects a
    # RDFLib graph, and returns a XML string in Grit format
    def graph_to_annotation_file(self, graph):
        """Converts a RDFLib graph into a XML file with the same
        statements, ordered using the Grit format
        (https://code.google.com/p/oort/wiki/Grit) for easier XSLT
        inclusion.

        :param graph: The graph to convert
        :type  graph: rdflib.Graph
        :returns: A serialized XML document with the RDF statements 
        :rtype: str
        """
        fp = BytesIO(graph.serialize(format="xml"))
        intree = etree.parse(fp)
        stylesheet = "res/xsl/rdfxml-grit.xsl"
        if os.path.exists(stylesheet):
            fp = open(stylesheet)
        elif pkg_resources.resource_exists('ferenda',stylesheet): # prefix stylesheet with 'res/xsl'?
            fp = pkg_resources.resource_stream('ferenda',stylesheet)
        else:
            raise ValueError("Stylesheet %s not found" % stylesheet)
        transform = etree.XSLT(etree.parse(fp))
        resulttree = transform(intree)
        res = etree.tostring(resulttree,pretty_print=format)
        return res.decode('utf-8') 

    def generated_url(self, basefile):
        """Get the full local url for the generated file for the
        given basefile.
        
        :param basefile: The basefile for which to calculate the local url
        :type  basefile: str
        :returns: The local url
        :rtype: str
        """
        return self.generic_url(basefile, 'generated', '.html')

    ################################################################
    #
    # STEP 5: Generate HTML pages for a TOC of a all documents, news
    # pages of new/updated documents, and other odds'n ends.
    #
    ################################################################

    # toc
    #     toc_select
    #         toc_query
    #              (toc_predicates ?)
    #     toc_criteria
    #         toc_predicates
    #         toc_selector
    #     toc_pagesets
    #         (selectors)
    #     toc_select_for_pages   <-- where most of the the magic happens
    #         (selectors)
    #         toc_item
    #     toc_generate_pages
    #         toc_generate_page
    #
    def toc(self):
        """Creates a set of pages that together acts as a table of
        contents for all documents in the repository. For smaller
        repositories a single page might be enough, but for
        repositoriees with a few hundred documents or more, there will
        usually be one page for all documents starting with A,
        starting with B, and so on. There might be different ways of
        browseing/drilling down, i.e. both by title, publication year,
        keyword and so on.

        The default implementation calls
        :py:meth:`~ferenda.DocumentRepository.toc_select` to get all
        data from the triple store,
        :py:meth:`~ferenda.DocumentRepository.toc_criteria` to find
        out the criteria for ordering,
        :py:meth:`~ferenda.DocumentRepository.toc_pagesets` to
        calculate the total set of TOC html files,
        :py:meth:`~ferenda.DocumentRepository.toc_select_for_pages` to
        create a list of documents for each TOC html file, and finally
        :py:meth:`~ferenda.DocumentRepository.toc_generate_pages` to
        create the HTML files. The default implemention assumes that
        documents have a title (in the form of a ``dct:title``
        property) and a publication date (in the form of a
        ``dct:issued`` property). 

        You can override any of these methods to customize any part of
        the toc generation process. Often overriding :py:meth:`~ferenda.DocumentRepository.toc_criteria` to
        specify other document properties will be sufficient."""
        
        data = self.toc_select(self.context())
        criteria = self.toc_criteria(self.toc_predicates())
        pagesets = self.toc_pagesets(data,criteria)
        pagecontent = self.toc_select_for_pages(data, pagesets, criteria)
        self.toc_generate_pages(pagecontent,pagesets)
        self.toc_generate_first_page(pagecontent,pagesets)

    def toc_select(self,context=None):
        """Select all data from the triple store needed to make up all
        TOC pages.

        :param context: The context (named graph) to restrict the query to.
                        If None, search entire triplestore.
        :type  context: str
        :returns: The results of the query, as python objects
        :rtype: set of dicts"""


        store = TripleStore(self.config.storelocation,
                            self.config.storerepository,
                            None, # self.context(),
                            self.config.storetype)

        if self.config.storetype in ('SQLITE','SLEEPYCAT'):
            store.context = context
            sq = self.toc_query(None)
        else:
            sq = self.toc_query(context)
        self.log.debug("toc: querying:\n%s" % sq)
        res = store.select(sq, "python")
        store.close()
        return res

    def toc_query(self,context=None):
        """Constructs a SPARQL SELECT query that fetches all
        information needed to construct the complete set of TOC pages
        in the form of a single list of result rows.

        Override this method if you need to customize the query.

        :param context: The context (named graph) to which to limit
                        the query. If None, query the entire
                        triplestore.
        :type  context: str
        :returns: The SPARQL query
        :rtype: str

        Example:

        >>> d = DocumentRepository()
        >>> expected = 'PREFIX dct:<http://purl.org/dc/terms/> SELECT DISTINCT ?uri ?title ?issued FROM <http://example.org/ctx/base> WHERE {?uri dct:title ?title . ?uri dct:issued ?issued . }'
        >>> d.toc_query("http://example.org/ctx/base") == expected
        True
        """

        # FIXME: create query from self.toc_criteria
        from_graph = ""
        if context:
            from_graph = "FROM <%s>" % context
        # FIXME: load from res/sparql/toc.sq instead
        return """PREFIX dct:<http://purl.org/dc/terms/> SELECT DISTINCT ?uri ?title ?issued %s WHERE {?uri dct:title ?title . ?uri dct:issued ?issued . }""" % from_graph

    def toc_criteria(self, predicates=None):
        """Create the criteria used to organize the documents in the
        repository into different pagesets.

        :param predicates: The :py:class:`~rdflib.term.URIRef` terms to use as base for criteria
        :type  predicates: list
        :returns: :py:class:`~ferenda.sources.documentsource.TocCriteria`
                  objects, each representing a particular way of organizing the
                  documents, and each corresponding to a TocPageset object (constructed
                  by :py:meth:`~ferenda.sources.DocumentRepository.toc_pagesets`)
        :rtype: list
        """
        
        criteria = []
        for predicate in predicates:
            # make an appropriate selector etc. a proper implementation
            # would look at the ontology of the predicate, take a look
            # at the range for that DataProperty and select an
            # appropriate selector.
            if predicate == self.ns['dct']['issued']: # date property
                selector = lambda x: x['issued'][:4]
                key = selector
                label = 'Sorted by publication year'
                pagetitle = 'Documents published in %s'
            else:
                # selector and key for proper title sort
                # (eg. disregarding leading "the", not counting
                # spaces) -- really stretching the limit on what can
                # be comfortably done with lambdas...
                selector = lambda x: x['title'][4].lower() if x['title'].lower().startswith("the ") else x['title'][0].lower()
                key = lambda x: "".join((x['title'][4:] if x['title'].lower().startswith("the ") else x['title']).lower().split())
                label = label='Sorted by ' + util.uri_leaf(predicate)
                pagetitle = 'Documents starting with "%s"'

            criteria.append(TocCriteria(binding=util.uri_leaf(predicate).lower(),
                                        label=label,
                                        pagetitle=pagetitle,
                                        selector=selector,
                                        key=key))
        return criteria
        

    def toc_predicates(self):
        """Return a list of predicates (as
        :py:class:`~rdflib.term.URIRef` objects that each should be
        used to organize a table of contents of documents in this
        docrepo).

        Is used by toc_criteria, must match results from sparql query
        in toc_query."""
        
        return [self.ns['dct']['title'], self.ns['dct']['issued']]

    def toc_pagesets(self, data, criteria):
        """Calculate the set of needed TOC pages based on the result rows

        :param data: list of dicts, each dict containing metadata about a single document
        :param criteria: list of TocCriteria objects
        :returns: The link text, page title and base file for each needed
                  TOC page, structured by selection criteria.
        :rtype: 3-dimensional named tuple

        Example:
        
        >>> d = DocumentRepository()
        >>> rows = [{'uri':'http://ex.org/1','title':'Abc','issued':'2009-04-02'},
        ...         {'uri':'http://ex.org/2','title':'Abcd','issued':'2010-06-30'},
        ...         {'uri':'http://ex.org/3','title':'Dfg','issued':'2010-08-01'}]
        >>> from operator import itemgetter
        >>> criteria = (TocCriteria(binding='title',
        ...                         label='By title',
        ...                         pagetitle='Documents starting with "%s"',
        ...                         selector=lambda x: x['title'][0].lower(),
        ...                         key=itemgetter('title')),
        ...             TocCriteria(binding='issued',
        ...                         label='By publication year',
        ...                         pagetitle='Documents published in %s',
        ...                         selector=lambda x: x['issued'][:4],
        ...                         key=itemgetter('issued')))
        >>> # Note: you can get a suitable tuple of TocCriteria
        >>> # objects by calling toc_criteria() as well
        >>> pagesets=d.toc_pagesets(rows,criteria)
        >>> pagesets[0].label == 'By title'
        True
        >>> pagesets[0].pages[0] == TocPage(linktext='a', title='Documents starting with "a"', basefile='title/a')
        True
        >>> pagesets[0].pages[0].linktext == 'a'
        True
        >>> pagesets[0].pages[0].title == 'Documents starting with "a"'
        True
        >>> pagesets[0].pages[0].basefile == 'title/a'
        True
        >>> pagesets[1].label == 'By publication year'
        True
        >>> pagesets[1].pages[0] == TocPage(linktext='2009', title='Documents published in 2009', basefile='issued/2009')
        True
        """
        
        res = []
        for criterion in criteria:
            pageset = TocPageset(label=criterion.label,pages=[])
            selector_values = {}
            selector = criterion.selector
            binding = criterion.binding
            for row in data:
                selector_values[selector(row)] = True

            for value in sorted(list(selector_values.keys())):
                pageset.pages.append(TocPage(linktext=value,
                                             title=criterion.pagetitle % value,
                                             basefile=binding + "/" + value))
            res.append(pageset)
        return res

    def toc_select_for_pages(self, data, pagesets, criteria):
        """Go through all data rows (each row representing a document)
        and, for each toc page, select those documents that are to
        appear in a particular page.
        
        Example:
        
        >>> d = DocumentRepository()
        >>> rows = [{'uri':'http://ex.org/1','title':'Abc','issued':'2009-04-02'},
        ...         {'uri':'http://ex.org/2','title':'Abcd','issued':'2010-06-30'},
        ...         {'uri':'http://ex.org/3','title':'Dfg','issued':'2010-08-01'}]
        >>> from rdflib import Namespace
        >>> dct = Namespace("http://purl.org/dc/terms/")
        >>> criteria = d.toc_criteria([dct.title,dct.issued])
        >>> pagesets=d.toc_pagesets(rows,criteria)
        >>> expected={'title/a':[[Link('Abc',uri='http://ex.org/1')],
        ...                      [Link('Abcd',uri='http://ex.org/2')]],
        ...           'title/d':[[Link('Dfg',uri='http://ex.org/3')]],
        ...           'issued/2009':[[Link('Abc',uri='http://ex.org/1')]],
        ...           'issued/2010':[[Link('Abcd',uri='http://ex.org/2')],
        ...                          [Link('Dfg',uri='http://ex.org/3')]]}
        >>> d.toc_select_for_pages(rows, pagesets, criteria) == expected
        True
         
        :param data: x
        :param pagesets: y
        :param criteria: z
        :returns: mapping between toc basefile and documentlist for that basefile
        :rtype: dict
        """

        # to 1-dimensional dict (odict?): {basefile: [list-of-Elements]}
        res = {}
        for pageset, criterion in zip(pagesets,criteria):
            documents = defaultdict(list)
            for row in data:
                key = criterion.selector(row)
                # documents[key].append(self.toc_item(criterion.binding,row))
                documents[key].append(row)
            for key in documents.keys():
                # find appropriate page in pageset and read it's basefile
                for page in pageset.pages:
                    if page.linktext == key:
                        s = sorted(documents[key],
                                   key=criterion.key)
                        res[page.basefile] = [self.toc_item(criterion.binding, row) for row in s]
        return res
                
    def toc_item(self, binding, row):
        """Returns a formatted version of row, using Element objects"""
        # default impl always just a simple link with title as link text
        return [Link(row['title'], # yes, ignore binding
                     uri=row['uri'])]

    # pagecontent -> documentlists?
    def toc_generate_pages(self, pagecontent, pagesets):
        paths = []
        for basefile,documents in pagecontent.items():
            paths.append(self.toc_generate_page(basefile, documents, pagesets))
        return paths

    def toc_generate_first_page(self, pagecontent, pagesets):
        (basefile, documents) = sorted(pagecontent.items(), key=itemgetter(0))[0]
        return self.toc_generate_page(basefile, documents, pagesets, "index")
        
    def toc_generate_page(self, basefile, documentlist, pagesets, effective_basefile=None):

        if effective_basefile == None:
            effective_basefile = basefile
        outfile = self.store.path(effective_basefile, 'toc', '.html')
        tmpfile = self.store.path(effective_basefile, 'toc', '.xhtml')

        doc = self.make_document(basefile)
        doc.uri = self.context()+"/"+basefile
        d = Describer(doc.meta,doc.uri)
        nav = UnorderedList(role='navigation')
        for pageset in pagesets:
            sublist = UnorderedList()
            for page in pageset.pages:
                if page.basefile == basefile:
                    title = page.title
                    sublist.append(ListItem([page.linktext]))
                else:
                    # FIXME: less hardcoded strategy plz
                    path = self.store.path(page.basefile, 'toc','.html')
                    href = os.path.relpath(path,os.path.dirname(outfile)).replace(os.sep,"/")
                    sublist.append(ListItem([Link(str(page.linktext),href=href)]))
            nav.append(ListItem([Paragraph(pageset.label),sublist]))
            
        d.value(self.ns['dct'].title, title)

        # Consider other strategies; definition lists with
        # subheadings, orderedlists, tables...
        ul = UnorderedList([ListItem(x) for x in documentlist],role='main')
        doc.body = Body([nav,
                         Heading([title]),
                         ul
                         ])

        self.log.debug("Rendering XHTML to %s" % tmpfile)
        self.render_xhtml(doc, tmpfile)
        if not util.outfile_is_newer([tmpfile],outfile):
            # Prepare a browser-ready HTML page using generic.xsl
            self.log.debug("Transforming HTML to %s" % outfile)
            # configure params
            xsltfile = "res/xsl/toc.xsl"
            xsltdir = self.setup_transform_templates(os.path.dirname(xsltfile))
            params = self.get_transform_configuration(xsltdir,outfile)
            self.transform_html("res/xsl/toc.xsl",
                                tmpfile, outfile, params)
            self.log.info("Created %s" % outfile)
            return outfile
        # if we didn't actually create an outfile:
        return outfile


    def news(self):
        criteria = self.news_criteria()
        data = self.news_entries() # Generator of DocumentEntry objects
        for entry in data:
            for criterion in criteria:
                if criterion.selector(entry):
                    criterion.entries.append(entry)
        for criterion in criteria:
            # should reverse=True be configurable? For datetime
            # properties it makes sense to use most recent first, but
            # maybe other cases?
            entries = sorted(criterion.entries, key=criterion.key, reverse=True)
            self.log.info("feed %s: %s entries" % (criterion.basefile, len(entries)))
            self.news_write_atom(entries,
                                 criterion.feedtitle,
                                 criterion.basefile)

            outfile = self.store.path(criterion.basefile, 'feed', '.html')
            xsltdir = self.setup_transform_templates(os.path.dirname("res/xsl/atom.xsl"))
            params = self.get_transform_configuration(xsltdir,outfile)
            self.transform_html("res/xsl/atom.xsl",
                                self.store.atom_path(criterion.basefile),
                                outfile,
                                params)

    def news_criteria(self):
        """Returns a list of NewsCriteria objects."""
        return [NewsCriteria('main','New and updated documents')]

    def news_entries(self):
        republish_original = False
        # If we just republish eg. the original PDF file and don't
        # attempt to parse/enrich the document
        
        directory = os.path.sep.join((self.config.datadir, self.alias, "entries"))
        for basefile in self.list_basefiles_for("news"):
            path = self.store.documententry_path(basefile)
            entry = DocumentEntry(path)
            if not entry.published:
                # not published -> shouldn't be in feed
                continue
            if not entry.id:
                entry.id = self.canonical_uri(basefile)
            if not entry.url:
                entry.url = self.generated_url(basefile)

            if not os.path.exists(self.store.distilled_path(basefile)):
                self.log.warn("%s: No distilled file at %s, skipping" % (basefile,
                                                                         self.store.distilled_path(basefile)))
                continue
            
            g = Graph()
            g.parse(self.store.distilled_path(basefile))
            desc = Describer(g,entry.id)
            
            dct = self.ns['dct']
            if not entry.title:
                try:
                    entry.title = desc.getvalue(dct.title)
                except KeyError: # no dct:title -- not so good
                    self.log.warn("%s: No title available" % basefile)
                    entry.title = entry.id
            
            try:
                entry.summary = desc.getvalue(dct.abstract)
            except KeyError: # no dct:abstract -- that's OK
                pass

            # 4: Set links to RDF metadata and document content
                entry.set_link(self.store.distilled_path(basefile),
                               self.distilled_url(basefile))
                

            if (republish_original):
                entry.set_content(self.store.downloaded_path(basefile),
                                  self.downloaded_url(basefile))
            else:
                # the parsed (machine reprocessable) version. The
                # browser-ready version is referenced with the <link>
                # element, separate from the set_link <link>
                entry.set_content(self.store.parsed_path(basefile),
                                  self.parsed_url(basefile))
            yield entry
    
    def news_write_atom(self, entries, title, basefile, archivesize=1000):
        """Given a list of Atom entry-like objects, including links to RDF
        and PDF files (if applicable), create a rinfo-compatible Atom feed,
        optionally splitting into archives."""

        # This nested func does most of heavy lifting, the main
        # function code only sets up basic constants and splits the
        # entries list into appropriate chunks
        def write_file(entries,suffix="",prevarchive=None, nextarchive=None):
            # print("Called w suffix=%s, prevarchive=%s, nextarchive=%s" % (suffix, prevarchive, nextarchive))
            feedfile = self.store.path(basefile+suffix, 'feed', '.atom')
            nsmap = {None:'http://www.w3.org/2005/Atom',
                     'le':'http://purl.org/atompub/link-extensions/1.0'}
            E = ElementMaker(nsmap=nsmap)
            updated = max(entries,key=attrgetter('updated')).updated
            contents = [E.id(feedid),
                        E.title(title),
                        E.updated(util.rfc_3339_timestamp(updated)),
                        E.author(
                    E.name("Ferenda"),
                    E.email("info@example.org"),
                    E.uri(self.config.url)
                    ),
                        E.link({'rel':'self', 'href':feedurl})]
            if prevarchive:
                contents.append(E.link({'rel':'prev-archive',
                                        'href':prevarchive}))
            if nextarchive:
                contents.append(E.link({'rel':'next-archive',
                                        'href':nextarchive}))
                
            for entry in entries:
                entrynodes=[E.title(entry.title),
                            E.summary(str(entry.summary)),
                            E.id(entry.id),
                            E.published(util.rfc_3339_timestamp(entry.published)),
                            E.updated(util.rfc_3339_timestamp(entry.updated)),
                            E.link({'href':util.relurl(entry.url, feedurl)})]
                if entry.link:
                    node = E.link({'rel':'alternate',
                                   'href':util.relurl(entry.link['href'],
                                                      feedurl),
                                   'type':entry.link['type'],
                                   'length':str(entry.link['length']),
                                   'hash':entry.link['hash']})
                    entrynodes.append(node)
                if entry.content and entry.content['markup']:
                    node = E.content({'type':'xhtml',
                                      'href':util.relurl(entry.content['href'],
                                                         feedurl),
                                      'type':entry.content['type'],
                                      'length':entry.content['length'],
                                      'hash':entry.content['hash']},
                                     etree.XML(entry.content['markup']))
                    entrynodes.append(node)
                if entry.content and entry.content['src']:
                    node = E.content({'src':util.relurl(entry.content['src'],
                                                        feedurl),
                                      'type':entry.content['type'],
                                      'hash':entry.content['hash']})
                    entrynodes.append(node)
                contents.append(E.entry(*list(entrynodes)))
            feed = E.feed(*contents)
            res = etree.tostring(feed,
                                 pretty_print=True,
                                 xml_declaration=True,
                                 encoding='utf-8')
            fileno, tmpfile = mkstemp()
            fp = os.fdopen(fileno)
            fp.close()
            # tmpfile = mkstemp()[1]
            with open(tmpfile, "wb") as fp:
                fp.write(res)
            util.replace_if_different(tmpfile, feedfile)
            return feedfile

        assert isinstance(entries,list), 'entries should be a list, not %s' % type(entries)
        feedurl = self.generic_url(basefile, 'feed', '.atom')
        # not sure abt this - should be uri of dataset?
        feedid = feedurl

        # assume entries are sorted newest first
        # could be simplified with more_itertools.chunked?
        cnt = 0
        res = []
        # print("chunking...")
        while len(entries) >= archivesize*2:
            cnt += 1
            archiveentries = entries[-archivesize:]
            entries[:] = entries[:-archivesize]
            
            if cnt > 1:
                prev = "%s-archive-%s.atom" % (basefile,cnt-1)
            else:
                prev = None
            if len(entries) < archivesize*2:
                next = "%s.atom" % basefile
            else:
                next = "%s-archive-%s.atom" % (basefile,cnt+1)
            suffix = suffix='-archive-%s'%cnt
            res.append(write_file(archiveentries,suffix=suffix,
                                  prevarchive=prev,
                                  nextarchive=next))

        res.insert(0,write_file(entries,
                                prevarchive="%s-archive-%s.atom" % (basefile, cnt)))
        return res
        

    def frontpage_content(self, primary=False):
        """If the module wants to provide any particular content on
        the frontpage, it can do so by returning a XHTML fragment (in
        text form) here. If primary is true, the caller wants the
        module to take primary responsibility for the frontpage
        content. If primary is false, the caller only expects a
        smaller amount of content (like a smaller presentation of the
        repository and the document it contains)."""
        g = self.make_graph()
        qname = g.qname(self.rdf_type)
        return ("<h2>Module %s</h2><p>Handles %s documents. "
                "Contains %s published documents.</p>"
                % (self.alias, qname,
                   len(list(self.list_basefiles_for("_postgenerate")))))

    # @manager.action
    def status(self, basefile=None, samplesize=3):
        print("Status for document repository '%s' (%s)" % (self.alias, getattr(self.config,'class')))
        s = self.get_status()
        for step in s.keys():  # odict
            exists = s[step]['exists']
            todo = s[step]['todo']
            exists_sample = ", ".join(exists[:samplesize])
            exists_more = len(exists) - samplesize
            todo_sample = ", ".join(todo[:samplesize])
            todo_more = len(todo) - samplesize

            if not exists_sample:
                exists_sample = "None"
            if exists_more > 0:
                exists_more_label = ".. (%s more)" % exists_more
            else:
                exists_more_label = ""
                
            if todo_more > 0:
                todo_more_label = ".. (%s more)" % todo_more
            else:
                todo_more_label = ""

            if step == 'download':
                print(" download: %s.%s" % (exists_sample, exists_more_label))
            else:
                if todo_sample:
                    print(" %s: %s.%s Todo: %s.%s" % (step, exists_sample, exists_more_label,
                                                     todo_sample, todo_more_label))
                else:
                    print(" %s: %s.%s" % (step, exists_sample, exists_more_label))
                    

        # alias and classname
        # $ ./ferenda-build.py w3c status
        # Status for document repository 'w3c' (w3cstandards.W3Cstandards)
        # downloaded: rdb-direct-mapping r2rml ... (141 more)
        # parsed: None (143 needs parsing)
        # generated: None (143 needs generating)

    def get_status(self):
        # for step in ('download', 'parse', 'generate')
        #     basefiles[step] = list_basefiles_for(step)
        #     pathfunc = downloaded_path|parsed_path|generated_path
        #     physicals[step] = [pathfunc(x) for x in basefiles[step]]
        #     compare physical['parse'][idx] with physical['downloaded'][idx]
        #     if older or nonexistent:
        #         todo[step].append()

        status = OrderedDict()
        # download
        exists = []
        todo = []
        for basefile in self.list_basefiles_for("parse"):
            exists.append(basefile)
            # no point in trying to append
        status['download'] = {'exists':exists,
                              'todo':todo}

        # parse
        exists = []
        todo = []
        for basefile in self.list_basefiles_for("parse"):
            dependency = self.store.downloaded_path(basefile)
            target = self.store.parsed_path(basefile)
            if os.path.exists(target):
                exists.append(basefile)
            # Note: duplication of (part of) parseifneeded logic
            if not util.outfile_is_newer([dependency],target):
                todo.append(basefile)
        status['parse'] = {'exists':exists,
                           'todo':todo}
            
        # generated
        exists = []
        todo = []
        for basefile in self.list_basefiles_for("generate"):
            dependency = self.store.parsed_path(basefile)
            target = self.store.generated_path(basefile)
            if os.path.exists(target):
                exists.append(basefile)
            # Note: duplication (see above)
            if not util.outfile_is_newer([dependency],target):
                todo.append(basefile)
        status['generated'] = {'exists':exists,
                               'todo':todo}
        return status

    def tabs(self):
        """Get the navigation menu segment(s) provided by this docrepo

        Returns a list of tuples, where each tuple will be rendered
        as a tab in the main UI. First element of the tuple is the
        link text, and the second is the link destination. Normally, a
        module will only return a single tab.

        :returns: List of tuples
        """
        if self.rdf_type == self.ns['foaf'].Document:
            return [(self.alias, "/" + self.alias + "/")]
        else:
            return [(util.uri_leaf(str(self.rdf_type)), "/" + self.alias + "/")]


    # FIXME: This is conceptually similar to basefile_from_uri (given
    # either a URI or a PATH_INFO, find out if self handles the
    # document/resource pointed out by same), we should perhaps unify
    # them somehow?
    def http_handle(self, environ):
        if environ['PATH_INFO'].count("/") > 2:
            null, res, alias, basefile = environ['PATH_INFO'].split("/", 3)
            if (alias == self.alias):
                # we SHOULD be able to handle this -- maybe provide
                # apologetic message about this if we can't?
                genpath = self.store.generated_path(basefile)
                if os.path.exists(genpath):
                    return (open(genpath, 'rb'),
                            os.path.getsize(genpath),
                            "text/html")
                    
        return (None, None, None)

    @staticmethod
    def _setup_logger(logname):
        log  = logging.getLogger(logname)
        if log.handlers == []:
            if hasattr(logging,'NullHandler'):
                log.addHandler(logging.NullHandler())
            else:
                # py26 compatibility
                class NullHandler(logging.Handler):
                    def emit(self, record):
                        pass
                log.addHandler(NullHandler())
        return log
        
