import os, re
import cStringIO
import glob
import threading
import amara
#import mergeatom
import datetime
from dateutil.tz import *
from dateutil import *
from amara import binderytools
from brightcontent.util import fixup_namespaces, quick_xml_multiscan

ATOM10_NS = u'http://www.w3.org/2005/Atom'
XHTML1_NS = u'http://www.w3.org/1999/xhtml'
ENCODING = 'UTF-8'
DUMMY_URI = u'urn:x-brightcontent:dummy'
COMMON_PREFIXES = { u'atom': ATOM10_NS, u'xh': XHTML1_NS }
DEFAULT_LANG = u'en'
ENVELOPE_XML_FILE = 'envelope.xml'

from amplee.storage.dummyfs import DummyStorageFS
from amplee.utils import parse_isodate

class repository:
    """
    Bright Content Atom Store repository
    Primary storage for Weblog entries.  Defines an API sppecifically
    For BC needs, but often subclasses wil also expose themselves
    as more general Atom Stores, and migh even provide APP capability
    (There are plans for WSGI middleware, such as brightcontent.core.store.app,
    which is vaporware at present, for plugging APP capability into any BC
    repository)
    """
    def get_entries(self, limit=-1, lower_date=None, upper_date=None, slug=None):
        raise NotImplementedError

    def assemble_feed(self, entries, stream=None):
        atom = amara.create_document(
            u'feed', ATOM10_NS, attributes={u'xml:lang': u'en'})
        storedoc = amara.parse(self.envelope_xml_file,
            prefixes=COMMON_PREFIXES)
        for element in storedoc.feed.xml_children:
            atom.feed.xml_append(element)
        for entry in entries:
            atom.feed.xml_append_fragment(entry)
        fixup_namespaces(atom)
        if stream:
            atom.xml(indent='yes', stream=stream)
            return stream
        else:
            buffer = cStringIO.StringIO()
            atom.xml(indent='yes', stream=buffer)
            return buffer.getvalue()


class flatfile_repository(repository, DummyStorageFS):
    """
    Flat file implementation of the repository
    """
    def __init__(self, **kw):
        self.storedir = kw.get('flatfile_storedir', 'atomstore')
        self.envelope_xml_file = os.path.join(self.storedir, ENVELOPE_XML_FILE)
        self._archive_dfmt = '%Y%b'
        self._file_dfmt = '%Y-%m-%dT%H-%M-%S'
        self._date_format = '%Y-%m-%dT%H:%M:%SZ'
        self._index = None #used to minimize the need for multiple scans

        DummyStorageFS.__init__(self, storage_path=self.storedir)

    def get_entries(self, **kw):
        """
        Primary interface to the store. Returns a list of
        entries as strings sorted by dates. Query critera come as keyword arguments.
        """
        filenames = self._do_query(**kw)
        if filenames:
            entries = []
            for fn in filenames:
                temp = open(fn, 'r')
                entries.append(temp.read())
                temp.close()
            return entries
        return None

    def _do_query(self, **kw):
        """
        Core functionality for get_entries.
        Parses the keyword arguments to see what kind of entries to grab.
        Returns list o entries.
        """
        print kw
        start = kw.get('lower_date', None)
        end = kw.get('upper_date', None)
        slug = kw.get('slug', None)
        limit = kw.get('limit', 5)
        offset = kw.get('offset', 0)
        entries = []
        if slug:
            entries = self.get_entry_by_slug(start, slug)
        #elif (start != None) or (end != None):
        else:
            #entries = self.get_recent_entries(offset, limit)
            entries = self.get_entries_by_dates(start, end, offset, limit)
        return entries

    @property
    def index(self):
        if self._index is not None:
            return self._index
        entries = []
        for root, dirs, files in os.walk(self.storedir):
            for fn in files:
                if fn.endswith(ENVELOPE_XML_FILE): continue
                fn = os.path.join(root, fn)
                details = quick_xml_multiscan(fn, 'atom:id|atom:updated|atom:published', count=3,
                                               prefixes=COMMON_PREFIXES)
                for ((ns, local), value) in details:
                    exec "atom_%s = u'%s'"%(local.decode('utf-8'), value.decode('utf-8'))
                atom_updated = amara.binderytools.parse_isodate(atom_updated)
                try:
                    atom_published = amara.binderytools.parse_isodate(atom_published)
                except UnboundLocalError:
                    atom_published = None
                entries.append((atom_updated, atom_id, atom_published, fn))
        entries.sort()
        entries.reverse()
        self._index = entries
        return entries

    def get_entry_by_id(self, iri_id):
        entries = self.index
        for atom_updated, atom_id, atom_published, fn in entries:
            if atom_id == iri_id:
                return [fn]
        return []

    def get_entry_by_slug(self, entry_date, slug):
        ymd = '%Y-%m-%d'
        #filenames = glob.glob(os.path.join(self.storedir, '*'))
        #filenames = [ fn for fn in filenames if not fn.endswith(ENVELOPE_XML_FILE) ]
        entries = self.index
        for atom_updated, atom_id, atom_published, fn in entries:
            reqdate = entry_date.strftime(ymd)
            pubdate = atom_published.strftime(ymd)
            if pubdate == reqdate and fn == slug:
                return [fn]
        return []

    def get_entries_by_dates(self, lower_date=None, upper_date=None, offset=0, limit=-1):
        """
        Retrieves sorted entries between 2 given dates
        """
        #filenames = glob.glob(os.path.join(self.storedir, '*'))
        #filenames = [ fn for fn in filenames if not fn.endswith(ENVELOPE_XML_FILE) ]
        entries = self.index
        filtered = []
        offset_count = 0
        for atom_updated, atom_id, atom_published, fn in entries:
            if not((lower_date and atom_updated < lower_date) or (upper_date and atom_updated > upper_date)):
                filtered.append(fn)
        return filtered[offset: limit==-1 and -1 or offset+limit]

    def create_entry(self, entry_text, slug):
        doc = amara.parse(entry_text, prefixes=COMMON_PREFIXES)
        published = amara.binderytools.parse_isodate(str(doc.entry.published))
        if not slug:
            slug = doc.entry.title
        slug = self.title_to_slug(slug)
        iri_id = '%s/%s/%s-%s.atom' % (self.app_conf['atom_base_url'].rstrip('/'),
            self.storedir, published.strftime('%Y-%m-%d-%H-%M-%S'), slug)
        doc.entry.id = unicode(iri_id)
        filename = "%s-%s.atom" % (published.strftime('%Y-%m-%dT%H-%M-%SZ'), slug)
        f = open(os.path.join(self.storedir, filename), 'w+')
        doc.xml(index='yes', stream=f)
        f.close()
        return iri_id

    def make_iri_id(self, collection, seed, slug):
        """
        Called by amplee to get the id value of the atom entry member
        Do whatever you want from here but return a unicode value

        Keyword argument:
        collection -- amplee.atompub.AtomPubCollection instance
        seed -- bridge.Element instance of the atom entry
        slug -- decoded slug value
        """
        if not slug:
            slug = seed.title.xml_text
        slug = self.title_to_slug(slug)
        published = seed.get_child('published', seed.xml_ns)
        published = parse_isodate(published.xml_text)
        iri_id = '%s%s-%s.atom' % (collection.base_edit_uri, published.strftime('%Y-%m-%d-%H-%M-%S'), slug)

        return unicode(iri_id)

    def make_filename(self, base_uri, slug, title):
        """
        Used by amplee to get the name of the file when saving the
        atom entry resource.
        """
        if not slug:
            slug = title
        slug = self.title_to_slug(slug)
        return unicode(slug)

    def update_entry(self, entry_text, iri_id):
        doc = amara.parse(entry_text, prefixes=COMMON_PREFIXES)
        published = amara.binderytools.parse_isodate(str(doc.entry.published))
        title = self.title_to_slug(doc.entry.title)
        doc.entry.id = unicode(iri_id)
        filename = "%s-%s.atom" % (published.strftime('%Y-%m-%dT%H-%M-%SZ'), title)
        f = open(os.path.join(self.storedir, filename), 'w+')
        doc.xml(index='yes', stream=f)
        f.close()
        return iri_id

    def title_to_slug(self, raw_title):
        title = OMIT_FROM_SLUG_PAT.sub('', raw_title).lower().decode('utf-8')
        #More a job for translate(), but regex is even closer to what we w
        #title = title.replace(' ', '-').replace('"', '').replace(',', '')
        #title = title.replace('!', '').replace("'", '').replace('.', '')
        #title = title.replace(':', '').replace('/', '_').replace('?', '')
        #title = title.replace(';', '').replace('(', '').replace(')', '')
        return title


OMIT_FROM_SLUG_PAT = re.compile('[^-\w_]')

#psycopg imports are below this class
class psycopg_repository(repository):
    """
    PostgreSQL implementation of the repository via psycopg
    Usage:
      Create a devel_config.ini with, e.g.:
        repository_type = psycopg
        psycopg_dsn = dbname=brightcontent
        envelope_xml_file = /etc/bc/envelope.xml
      Then set up DB, populate with contents, then launch the server:
        psql -f etc/postgres8.sql template1
        python etc/import-feed-into-postgres.py http://copia.ogbuji.net/blog/index.atom dbname=brightcontent
        paster serve devel_config.ini
    See also:
      http://initd.org/tracker/psycopg/wiki/PsycopgTwo
      http://initd.org/tracker/psycopg/browser/psycopg2/trunk/examples/lobject.py
      http://www.postgresql.org/docs/8.1/static/largeobjects.html
      http://www.initd.org/tracker/psycopg/wiki/PsycopgTwoFaq
    """
    def __init__(self, psycopg_dsn=None, envelope_xml_file=None, **kw):
        self._dsn = psycopg_dsn
        self._conn = psycopg2.connect(self._dsn)
        self.envelope_xml_file = envelope_xml_file

    #FIXME: it would be better to add a shutdown() or close() function
    #to the repo API to avoid all the trickiness of __del__
    def __del__(self):
        self._conn.close()
        
    def get_entries(self, offset=0, limit=-1, lower_date=None,
                    upper_date=None, slug=None):
        #See http://www.postgresql.org/docs/8.1/static/functions-datetime.html
        DATE_CHECK = "WHERE (DATE %(lower)s, DATE %(upper)s) OVERLAPS (updated, updated)"
        SLUG_CHECK = "nickname=%(slug)s"
        LIMIT_CHECK = "limit %(limit)s"
        curs = self._conn.cursor()
        query = "SELECT body, updated, nickname FROM entry "
        where_added = False;
        params = {}
        if lower_date and lower_date:
            query += DATE_CHECK
            params.update({'upper': upper_date, 'lower': lower_date})
            where_added = True;
        if slug:
            if not where_added:
                query += " WHERE "
                where_added = True;
            query += SLUG_CHECK
            params.update({'nick': slug})
        if limit != -1:
            query += LIMIT_CHECK
            params.update({'limit': limit})

        #FIXME: handle offset
        curs.execute(query, params)
        result = curs.fetchall()
        entries = [ r[0] for r in result ]
        return entries

try:
    import psycopg2
except ImportError:
    del psycopg_repository
    

#INCOMPLETE!
class amplee_repository(repository):
    """
    Flat file implementation of the repository
    """
    def __init__(self, amplee_db_connection=None, **kw):
        from atomixlib.mapper import Category
        from amplee.storage import storepyscopg
        from amplee.atompub import store
        self._backend = psycopg.Psycopg2Storage(DB)

        app_store = store.AtomPubStore(backend)

        encoding = kw['encoding']

    def get_entries(self, offset=0, limit=-1, lower_date=None, upper_date=None, slug=None):
        return entries



if __name__ == '__main__':
    import sys
    from dateutil.relativedelta import *
    from dateutil.tz import tzlocal
    store = flatfile_repository('/tmp/atomstore')
    #entries = get_entries(config, lower_date=datetime.datetime.today(),
    #    upper_date=datetime.datetime.today())
    #print assemble_feed(entries, config)
    #entries = get_entries(config)
    #print assemble_feed(entries, config)
    now = datetime.datetime.now(tzlocal())
    try:
        limit = int(sys.argv[1])
    except IndexError:
        limit = -1
    try:
        start = int(sys.argv[2])
        upper = now + relativedelta(days=-start)
    except IndexError:
        upper = None
    try:
        end = int(sys.argv[3])
        lower = now + relativedelta(days=-end)
    except IndexError:
        lower = None
    try:
        slug = sys.argv[4]
    except IndexError:
        slug = None
    entries = store.get_entries(limit, lower_date=lower, upper_date=upper, slug=slug)
    print store.assemble_feed(entries)

