import amara
from pprint import pprint
from brightcontent.core.store import COMMON_PREFIXES
from brightcontent.core.store import create_entry_filter
from amara.binderytools import parse_isodate as mkdate
from datetime import datetime
from brightcontent.core.store import repository
from wsgiappclient.appclient import AppClient

try:
    import sqlite3 as sqlite
except ImportError:
    from pysqlite2 import dbapi2 as sqlite

detect_types = sqlite.PARSE_DECLTYPES|sqlite.PARSE_COLNAMES

import sqlalchemy.pool as pool
sqlite = pool.manage(sqlite)
def getconn():
    return sqlite.connect('entries.db')

p = pool.SingletonThreadPool(getconn)

class sqlite_repository(repository):
    """
    This is a simple sqlite3 db store. The table looks like:
      entries = key(id, url, updated, published, xml_content)
    The the url should be remeniscent of the actual atom_id.
    """
    entries_table_schema = """
    CREATE TABLE IF NOT EXISTS entries (
        id INTEGER PRIMARY KEY AUTOINCREMENT,
        url TEXT UNIQUE,
        published TIMESTAMP NOT NULL,
        updated TIMESTAMP NOT NULL,
        content TEXT
    );"""

    def __init__(self, **app_conf):
        dbfile = app_conf.get('sqlite_dburi', 'entries.db')
        self.name = app_conf.get('atom_default_collection', 'entries')
        self.envelope_xml_file = app_conf.get('sqlite_feed_envelope',
                                              'envelope.xml')
        #self.conn = app_conf.get('bcstore.dbconn')
        #self.conn = getconn(dbfile, detect_types=detect_types)
        #self.conn = p.connect()
        conn = p.connect()
        #self.cursor = self.conn.cursor()
        tables = conn.execute('SELECT name FROM sqlite_master').fetchall()
        if u'entries' not in tables:
            conn.execute(self.entries_table_schema)
            try:
                conn.commit()
            except IntegrityError, e:
                raise e
        self._index = None
        conn.close()

    @property
    def conn(self):
        return p.connect()

    @property
    def index(self):
        sql = """SELECT updated as "updated [timestamp]",
                        published as "published [timestamp]",
                        url, content
                 FROM entries
                 ORDER BY updated DESC"""
        if self._index:
            return self._index
        conn = self.conn
        entries = conn.execute(sql).fetchall()
        conn.close()
        entries = [ (mkdate(e[0]), mkdate(e[1]), str(e[2]), e[3]) for e in entries ]
        # print entries
        # create a tuple of updated, published, id, xml
        self._index = entries
        return entries

    def get_entries(self, **kw):
        #print "in get_entries"
        """
        Primary interface to the store. Returns a list of
        entries as strings sorted by dates. Query critera come as
        keyword arguments.
        """
        entries = self._do_query(**kw)
        return entries

    def _do_query(self, eid=None, limit=5, offset=0, **kw):
        #print "in _do_query"
        """
        Core functionality for get_entries.
        Parses the keyword arguments to see what kind of entries to grab.
        Returns list of entries.
        """
        start = kw.get('lower_date', None)
        end = kw.get('upper_date', None)
        entries = []
        if eid:
            entries = self.get_entry_by_id(eid)
        else:
            entries = self.get_entries_by_dates(start, end, offset, limit)
        return entries

    def get_entry_by_id(self, url):
        #print "in get_entry_by_id"
        sql = 'SELECT content FROM entries WHERE url = ?'
        data = (url,)
        #print "input: %s" % url
        #for e in self.index:
            #print "entry: %s" % e[2]
        conn = self.conn
        entries = conn.execute(sql, data).fetchall()
        conn.close()
        entries = [ str(e[0]) for e in entries ]
        return entries

    def get_entries_by_dates(self, start=None, end=None, offset=None, limit=None):
        if not start:
            return [ str(e[3]) for e in self.index[:limit] ]
        sql = 'SELECT content FROM entries WHERE updated >= ?'
        data = (start,)
        if end:
            sql += ' AND updated <= ?'
            data = (start, end)
        sql += ' ORDER BY updated DESC'
        conn = self.conn
        entries = conn.execute(sql, data).fetchall()
        conn.close()
        #TODO: handle offset and limit
        return [ x[0] for x in entries ]
    
    @create_entry_filter
    def create_entry(self, entry_text, slug, **kw):
        doc = amara.parse(entry_text, prefixes=COMMON_PREFIXES)
        updated = amara.binderytools.parse_isodate(str(doc.entry.updated))
        try:
            published = amara.binderytools.parse_isodate(str(doc.entry.published))
        except AttributeError:
            published = updated
        #url = '/%s/%s.atom' % (published.strftime('%Y/%m/%d'), slug)
        #print slug
        url = self.generate_url(slug, published)
        entry_xml = doc.xml()
        sql = """INSERT INTO entries (updated, published, url, content)
        VALUES (?, ?, ?, ?)"""
        data = ( str(doc.entry.updated),
                 str(doc.entry.published),
                 str(url), str(entry_text) )
        # we want get the connection and potentiall roll it back
        conn = self.conn
        conn.execute(sql, data)
        try:
            conn.commit()
        except IntegrityError, e:
            conn.rollback()
            raise e
        conn.close()
        return entry_text

    def update_entry(self, entry_text, url_id):
        doc = amara.parse(entry_text, prefixes=COMMON_PREFIXES)
        updated = amara.binderytools.parse_isodate(str(doc.entry.updated))
        sql = """UPDATE entries
        SET content = ?, updated = ?
        WHERE url = ?"""
        data = (entry_text, updated, url_id)
        conn = self.conn
        conn.execute(sql, data)
        try:
            conn.commit()
        except Exception, e:
            conn.rollback()
            raise e
        conn.close()
        return entry_text
        
from sqlobject import *

class AtomEntry(SQLObject):
    url_id = StringCol(unique=True, notNone=True)
    updated = DateTimecol(nonNone=True)
    published = DateTimecol(nonNone=True)
    content = StringCol()

class sqlobject_repository(repository):

    def __init__(self, **app_conf):
        dburi = app_conf.get('sqlobject_dburi', None)
        conn = connectionForURI(dburi)
        sqlhub.processConnection = conn
        AtomEntry.createTable()

    @property
    def index(self):
        if self._index:
            return self._index
        entries = Person.select(orderBy=AtomEntry.q.updated).reversed()
        entries = [ (e.updated, e.published, e.url_id, e.content) for e in entries ]
        # print entries
        # create a tuple of updated, published, id, xml
        self._index = entries
        return entries

    def get_entries(self, **kw):
        #print "in get_entries"
        """
        Primary interface to the store. Returns a list of
        entries as strings sorted by dates. Query critera come as
        keyword arguments.
        """
        entries = self._do_query(**kw)
        return entries

    def _do_query(self, eid=None, limit=5, offset=0, **kw):
        #print "in _do_query"
        """
        Core functionality for get_entries.
        Parses the keyword arguments to see what kind of entries to grab.
        Returns list of entries.
        """
        start = kw.get('lower_date', None)
        end = kw.get('upper_date', None)
        entries = []
        if eid:
            entries = self.get_entry_by_id(eid)
        else:
            entries = self.get_entries_by_dates(start, end, offset, limit)
        return entries

    def get_entry_by_id(self, url):
        #print "in get_entry_by_id"
        #print "input: %s" % url
        #for e in self.index:
            #print "entry: %s" % e[2]
        entries = [ e.content for e in AtomEntry.selectBy(url_id=url) ]
        return entries

    def get_entries_by_dates(self, start=None, end=None, offset=None, limit=None):
        if not start:
            return [ e.content for e in self.index[:limit] ]
        if end:
            entries = AtomEntry.select(updated >= start, updated <= end, orderBy='updated').reversed()
        else:
            entries = AtomEntry.select(updated >= start, orderBy='updated').reversed()
        #TODO: handle offset and limit
        return [ x.content for x in entries ]


class appcollection_repository(repository):

    def __init__(self, collection, service, username=None, password=None):
        self.client = AppClient(service, username, password)
        self.collection = [c for c in self.client.collection if c[0] ==  collection][0]
        self._index = self.retrieve_all_entries()

    def retrieve_all_entries(self):
        """
        Keeps going through the atom:link[@rel='next']/@href until it gets a
        404 or matches whatever the potential
        atom:link[@rel='last']/@href is.
        """
        try:
            start = c.xml_xpath(u"/a:feed/a:link[@rel = 'next']")[0].href
        except:
            return
        links = []
        for l in self.get_collection_links(start):
            links.append(l)

    def get_collection_links(self, url):
        headers, content = self.client.h.request(url, 'GET')
        c = amara.parse(content, prefixes=COMMON_PREFIXES)
        links = c.xml_xpath(u"/a:feed/a:link[@rel = 'next']")
        del c
        if links:
            yield links[0].href
            self.get_collection_links(links[0].href)
        

    @property
    def index(self):
        if self._index is not None:
            return self._index
        entries = []
        for e in self.collection[1].feed.entry:
            # create a tuple of updated, published, id, xml
            eid = e.id
            updated = amara.binderytools.parse_isodate(e.updated)
            try:
                published = amara.binderytools.parse_isodate(str(e.published))
            except UnboundLocalError:
                published = None
            entries.append( (updated, published, eid, e.xml()) )
        self._index = entries
        return entries

    def get_entries(self, **kw):
        """
        Primary interface to the store. Returns a list of
        entries as strings sorted by dates. Query critera come as keyword arguments.
        """
        entries = self._do_query(**kw)
        return entries

    def _do_query(self, eid=None, limit=5, offset=0, **kw):
        """
        Core functionality for get_entries.
        Parses the keyword arguments to see what kind of entries to grab.
        Returns list of entries.
        """
        start = kw.get('lower_date', None)
        end = kw.get('upper_date', None)
        entries = []
        if eid:
            entries = self.get_entry_by_id(eid)
        else:
            entries = self.get_entries_by_dates(start, end, offset, limit)
        return entries

    def get_entry_by_id(self, id):
        pass

    def get_entries_by_dates(self, start, end, offset, limit):
        pass

    

