import os, re
import cStringIO
import glob
import threading
import datetime
import posixpath #really for URL path part manipulation
#import urlparse
from dateutil.tz import *
from dateutil import *
from pkg_resources import resource_string

import amara
#import mergeatom
from amara import binderytools
from brightcontent.core import SLUGCHARS
from brightcontent.util import fixup_namespaces, quick_xml_multiscan
from Ft.Xml.Lib import Uri
from Ft.Xml.Xslt import Transform

ATOM10_NS = u'http://www.w3.org/2005/Atom'
XHTML1_NS = u'http://www.w3.org/1999/xhtml'
ATOMTHR_EXT_NS = u'http://purl.org/syndication/thread/1.0'
ENCODING = 'UTF-8'
DUMMY_URI = u'urn:x-brightcontent:dummy'
BCBLOG_NS = u'http://brightcontent.net/ns/'
COMMON_PREFIXES = { u'atom': ATOM10_NS,
                    u'xh': XHTML1_NS,
                    u'thr': ATOMTHR_EXT_NS }
DEFAULT_LANG = u'en'
ENVELOPE_XML_FILE = 'envelope.xml'


class repository:
    """
    Bright Content Atom Store repository
    Primary storage for Weblog entries.  Defines an API specifically
    For BC needs, but often subclasses will also expose themselves
    as more general Atom Stores, and might even provide AtomPP capability
    
    Every store must instance must also provide a 'name' property, which for
    example would be used by AtomPP plugins as the name of the collection
    """
    def get_entries(self, limit=-1, lower_date=None, upper_date=None, slug=None):
        raise NotImplementedError

    def assemble_feed(self, entries, stream=None):
        # removed lang b/c it is not valid atom...
        # according to the feed validator
        # u'feed', ATOM10_NS, attributes={u'xml:lang': u'en'})
        atom = amara.create_document(u'feed', ATOM10_NS)
        storedoc = amara.parse(self.envelope_xml_file,
            prefixes=COMMON_PREFIXES)
        for element in storedoc.feed.xml_children:
            atom.feed.xml_append(element)
        for entry in entries:
            atom.feed.xml_append_fragment(entry)
        fixup_namespaces(atom)
        if stream:
            atom.xml(indent='yes', stream=stream)
            return stream
        else:
            buffer = cStringIO.StringIO()
            atom.xml(indent='yes', stream=buffer)
            return buffer.getvalue()

    def generate_url(self, slug, date):
        if slug.endswith('.atom'):
            slug = slug.rstrip('.atom')
        return '%s/%s.atom' % (date.strftime('%Y/%m/%d'),
                                self._format_slug(slug))

    def generate_permalink(self, slug, date):
        return '%s/%s/' % (date.strftime('%Y/%m/%d'),
                            self._format_slug(slug))

    def _format_slug(self, slug):
        return str(OMIT_FROM_SLUG_PAT.sub('_', slug.strip()).decode('utf-8'))


def get_entry_filter(fn):
    """
    When an entry is requested from the store, this decorator should
    be used make the self, edit and alternate links based on the
    base_url.
    """
    def filter_entry(obj, *args, **kw):
        entries = fn(obj, *args, **kw)
        if not kw.get('base_url') or not entries:
            return entries
        base_url = kw['base_url']
        new_entries = []
        for entry in entries:
            doc = amara.parse(entry, prefixes=COMMON_PREFIXES)
            rels_to_change = ['self', 'edit', 'alternate']
            for i in range(0, len(doc.entry.link)):
                rel = str(doc.entry.link[i].rel)
                if rel in rels_to_change:
                    href = str(doc.entry.link[i].href)
                    if href.startswith('/'):
                        href = href[1:]
                    doc.entry.link[i].href = unicode(Uri.BASIC_RESOLVER.normalize(href, base_url))
            new_entries.append(doc.xml())
        return new_entries
    return filter_entry

def create_entry_filter(fn):
    """
    The entry filter will add consistent links to an entry before
    adding it to a store. This allows consistent template design
    and BC specific features without having to rely on atom
    specific elements. This also allows importing from other
    platforms in addition to custom url schemes.
    """
    def filter_entry(obj, entry_xml, slug, *args, **kw):
        xslt = resource_string(__name__, 'store/create.xslt')
        clean_entry = Transform(entry_xml, xslt)
        doc = amara.parse(clean_entry, prefixes=COMMON_PREFIXES)
        if not slug:
            slug = str(doc.entry.title)
        updated = amara.binderytools.parse_isodate(str(doc.entry.updated))
        try:
            published = amara.binderytools.parse_isodate(str(doc.entry.published))
        except AttributeError:
            published = updated
        self_url = obj.generate_url(slug, published)
        perma_url = obj.generate_permalink(slug, published)
        self_link = doc.xml_create_element(
            u'link', ATOM10_NS,
            attributes={u'rel' : u'self',
                        u'href': unicode(self_url),
                        u'title' : u'Atom'})
        doc.entry.xml_append(self_link)
        edit_link = doc.xml_create_element(
            u'link', ATOM10_NS,
            attributes={u'rel' : u'edit',
                        u'href': unicode(self_url),
                        u'title' : u'Edit'})
        doc.entry.xml_append(edit_link)
        perma_link = doc.xml_create_element(
            u'link', ATOM10_NS,
            attributes={u'rel' : u'alternate',
                        u'href': unicode(perma_url),
                        u'title' : u'Permalink'})
        doc.entry.xml_append(perma_link)
        return fn(obj, doc.xml(), slug, *args, **kw)
    return filter_entry
            

class flatfile_repository(repository):
    """
    Flat file implementation of the repository
    """
    def __init__(self, **kw):
        self.storedir = kw.get('flatfile_storedir', 'atomstore')
        self.entry_file_pattern = kw.get('entry_file_pattern', '%(year)s/%(month)s/%(day)s/%(slug)s.atom')
        self.name = os.path.split(self.storedir)[1]
        self.envelope_xml_file = Uri.OsPathToUri(
            os.path.join(self.storedir, ENVELOPE_XML_FILE), attemptAbsolute=1)
        self._archive_dfmt = '%Y%b'
        self._file_dfmt = '%Y-%m-%dT%H-%M-%S'
        self._date_format = '%Y-%m-%dT%H:%M:%SZ'
        self._index = None #used to minimize the need for multiple scans

    @get_entry_filter
    def get_entries(self, **kw):
        """
        Primary interface to the store. Returns a list of
        entries as strings sorted by dates. Query critera come as keyword arguments.
        """
        filenames = self._do_query(**kw)
        entries = []
        if filenames:
            for fn in filenames:
                temp = open(fn, 'r')
                entries.append(temp.read())
                temp.close()
        return entries

    def _do_query(self, eid=None, limit=5, offset=0, **kw):
        """
        Core functionality for get_entries.
        Parses the keyword arguments to see what kind of entries to grab.
        Returns list of entries.
        """
        start = kw.get('lower_date', None)
        end = kw.get('upper_date', None)
        entries = []
        if eid:
            entries = self.get_entry_by_id(eid)
        else:
            entries = self.get_entries_by_dates(start, end, offset, limit)
        return entries

    @property
    def index(self):
        if self._index is not None:
            return self._index
        entries = []
        for root, dirs, files in os.walk(self.storedir):
            for fn in files:
                if fn.endswith(ENVELOPE_XML_FILE): continue
                if not fn.endswith('.atom'): continue
                fn = os.path.join(root, fn)
                scan_for = "atom:updated|atom:published"
                details = quick_xml_multiscan(
                    fn, scan_for, count=3, prefixes=COMMON_PREFIXES)
                #Would be nice to find a more elegant way
                for ((ns, local), value) in details:
                   #in globals(), locals()
                   exec "atom_%s = u'%s'"% ( local.decode('utf-8'),
                                             value.decode('utf-8') )
                # scan for self atom:link
                fnuri = Uri.OsPathToUri(fn, attemptAbsolute=1)
                links = amara.pushbind(fnuri, u'atom:link', prefixes=COMMON_PREFIXES)
                for l in links:
                    if str(l.rel) == 'self':
                        atom_id = str(l.href)
                atom_updated = amara.binderytools.parse_isodate(atom_updated)
                try:
                    atom_published = amara.binderytools.parse_isodate(str(atom_published))
                except UnboundLocalError:
                    atom_published = None
                #print "atom_id: %s" % atom_id
                entries.append((atom_updated, atom_published, atom_id, fn))
        entries.sort()
        entries.reverse()
        self._index = entries
        return entries

    def get_entry_by_id(self, url_id):
        entries = self.index
        for atom_updated, atom_published, atom_id, fn in entries:
            if atom_id == url_id:
                return [fn]
        return []

    # def get_entry_by_slug(self, entry_date, slug):
    #     """
    #     Returns a list with a single entry filename--the one that matches the given date and slug
    #     If not found return an empty list
    #     
    #     Note: BC has an axiom that there be only one instance of each slug per date
    #     """
    #     ymd = '%Y-%m-%d'
    #     entries = self.index
    #     for atom_updated, atom_id, atom_published, fn in entries:
    #         reqdate = entry_date.strftime(ymd)
    #         pubdate = atom_published.strftime(ymd)
    #         eslug = posixpath.splitext(posixpath.split(atom_id)[1])[0]
    #         if pubdate == reqdate and eslug == slug:
    #             return [fn]
    #     return []


    def get_entries_by_dates(self, lower_date=None, upper_date=None, offset=0, limit=-1):
        """
        Retrieves sorted entries between 2 given dates
        Note, entries are filtered by published date, if present,
        but always sorted by updated date
        """
        #filenames = glob.glob(os.path.join(self.storedir, '*'))
        #filenames = [ fn for fn in filenames if not fn.endswith(ENVELOPE_XML_FILE) ]
        entries = self.index
        filtered = []
        for atom_updated, atom_published, atom_id, fn in entries:
            index_date = atom_published or atom_updated
            if not((lower_date and index_date < lower_date) or (upper_date and index_date > upper_date)):
                filtered.append(fn)
        return filtered[offset: limit==-1 and -1 or offset+limit]

    @create_entry_filter
    def create_entry(self, entry_text, slug, folder='/'):
        doc = amara.parse(entry_text, prefixes=COMMON_PREFIXES)
        updated = amara.binderytools.parse_isodate(str(doc.entry.updated))
        try:
            published = amara.binderytools.parse_isodate(str(doc.entry.published))
        except AttributeError:
            published = updated
        params = {
            'year': published.year,
            'month': published.month,
            'day': published.day,
            'isodate': published.strftime('%Y-%m-%d'),
            'utc_isodatetime': published.strftime('%Y-%m-%dT%H-%M-%SZ'),
            'slug': self.title_to_slug(slug),
        }
        tail = self.entry_file_pattern%params
        filename = os.path.join(self.storedir, *(folder.split('/') + [tail]))
        parent = os.path.split(filename)[0]
        if not os.path.isdir(parent):
            os.makedirs(parent)
        f = open(filename, 'w+')
        doc.xml(indent=True, stream=f)
        f.close()
        self._index = None #Might be necessary if a plug-in is using the same store instance
        return doc.xml(indent=True)

    def update_entry(self, entry_text, iri_id):
        # TODO: just make this a simple writing of a string to a file
        # doc = amara.parse(entry_text, prefixes=COMMON_PREFIXES)
        # published = amara.binderytools.parse_isodate(str(doc.entry.published))
        #If not found, let the IndexError propagate to caller
        filename = [ f for u, p, i, f in self.index if i == iri_id ][0]
        f = open(os.path.join(self.storedir, filename), 'w+')
        f.write(entry_text)
        f.close()
        return entry_text

    def title_to_slug(self, raw_title):
        title = OMIT_FROM_SLUG_PAT.sub('_', raw_title).lower().decode('utf-8')
        #More a job for translate(), but regex is even closer to what we w
        #title = title.replace(' ', '-').replace('"', '').replace(',', '')
        #title = title.replace('!', '').replace("'", '').replace('.', '')
        #title = title.replace(':', '').replace('/', '_').replace('?', '')
        #title = title.replace(';', '').replace('(', '').replace(')', '')
        return title


OMIT_FROM_SLUG_PAT = re.compile('[^%s]'%SLUGCHARS)

#psycopg imports are below this class
class psycopg_repository(repository):
    """
    PostgreSQL implementation of the repository via psycopg
    Usage:
      Create a devel_config.ini with, e.g.:
        repository_type = psycopg
        psycopg_dsn = dbname=brightcontent
        envelope_xml_file = /etc/bc/envelope.xml
      Then set up DB, populate with contents, then launch the server:
        psql -f etc/postgres8.sql template1
        python etc/import-feed-into-postgres.py http://copia.ogbuji.net/blog/index.atom dbname=brightcontent
        paster serve devel_config.ini
    See also:
      http://initd.org/tracker/psycopg/wiki/PsycopgTwo
      http://initd.org/tracker/psycopg/browser/psycopg2/trunk/examples/lobject.py
      http://www.postgresql.org/docs/8.1/static/largeobjects.html
      http://www.initd.org/tracker/psycopg/wiki/PsycopgTwoFaq
    """
    def __init__(self, psycopg_dsn=None, envelope_xml_file=None, **kw):
        self._dsn = psycopg_dsn
        self._conn = psycopg2.connect(self._dsn)
        #FIXME: Should get the repo name by parsing the DSN
        #But this is hard in the general case, see e.g. http://home.arcor.de/bauhaus/Tools/dsn.html
        #So for now punt by assuming the simplest form of DSN
        #Will break with e.g. dsn='dbname=test user=test password=xxx'
        self.name = self._dsn
        self.envelope_xml_file = envelope_xml_file

    #FIXME: it would be better to add a shutdown() or close() function
    #to the repo API to avoid all the trickiness of __del__
    def __del__(self):
        self._conn.close()
    
    def get_entries(self, offset=0, limit=-1, lower_date=None,
                    upper_date=None, slug=None):
        #See http://www.postgresql.org/docs/8.1/static/functions-datetime.html
        DATE_CHECK = "WHERE (DATE %(lower)s, DATE %(upper)s) OVERLAPS (updated, updated)"
        SLUG_CHECK = "nickname=%(slug)s"
        LIMIT_CHECK = "limit %(limit)s"
        curs = self._conn.cursor()
        query = "SELECT body, updated, nickname FROM entry "
        where_added = False;
        params = {}
        if lower_date and lower_date:
            query += DATE_CHECK
            params.update({'upper': upper_date, 'lower': lower_date})
            where_added = True;
        if slug:
            if not where_added:
                query += " WHERE "
                where_added = True;
            query += SLUG_CHECK
            params.update({'nick': slug})
        if limit != -1:
            query += LIMIT_CHECK
            params.update({'limit': limit})

        #FIXME: handle offset
        curs.execute(query, params)
        result = curs.fetchall()
        entries = [ r[0] for r in result ]
        return entries

try:
    import psycopg2
except ImportError:
    del psycopg_repository

#INCOMPLETE!
class amplee_repository(repository):
    """
    Flat file implementation of the repository
    """
    def __init__(self, amplee_db_connection=None, **kw):
        from atomixlib.mapper import Category
        from amplee.storage import storepyscopg
        from amplee.atompub import store
        self._backend = psycopg.Psycopg2Storage(DB)

        app_store = store.AtomPubStore(backend)

        encoding = kw['encoding']

    def get_entries(self, offset=0, limit=-1, lower_date=None, upper_date=None, slug=None):
        return entries



if __name__ == '__main__':
    import sys
    from dateutil.relativedelta import *
    from dateutil.tz import tzlocal
    store = flatfile_repository('/tmp/atomstore')
    #entries = get_entries(config, lower_date=datetime.datetime.today(),
    #    upper_date=datetime.datetime.today())
    #print assemble_feed(entries, config)
    #entries = get_entries(config)
    #print assemble_feed(entries, config)
    now = datetime.datetime.now(tzlocal())
    try:
        limit = int(sys.argv[1])
    except IndexError:
        limit = -1
    try:
        start = int(sys.argv[2])
        upper = now + relativedelta(days=-start)
    except IndexError:
        upper = None
    try:
        end = int(sys.argv[3])
        lower = now + relativedelta(days=-end)
    except IndexError:
        lower = None
    try:
        slug = sys.argv[4]
    except IndexError:
        slug = None
    entries = store.get_entries(limit, lower_date=lower, upper_date=upper, slug=slug)
    print store.assemble_feed(entries)

