from __future__ import with_statement

import sys
import os
import threading
try:
    import cPickle as pickle
except ImportError: #pragma: no cover
    import pickle
import struct
import datetime
import uuid
from contextlib import closing
try:
    from bsddb3.db import *
except ImportError: #pragma: no cover
    # it's better to use bsddb3 since it will be updated more often than
    # the built-in version, but since it can be hard to build the module
    # we'll make things easy and allow using this in 2.6 for now
    if sys.version_info[0] == 2 and sys.version_info[1] == 6:
        from bsddb.db import *
from kineta.utils import *
from kineta.error import KinetaError


GBYTE = 1024*1024*1024


class BDB(object):

    def __init__(self, path, idxkeyfactory, limits, cache):
        envflags = (DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_RECOVER|
                    DB_INIT_LOG|DB_INIT_TXN)

        if not os.path.isdir(path):
            os.mkdir(path)

        self._proclock = check_pid_file(os.path.join(path, "pidfile"))
        if self._proclock is None:
            raise KinetaError("Cannot open database at '%s' because it is "
                              "already in use" % path)
        self._path = path
        self._dbenv = DBEnv()
        self._dbenv.set_lk_max_locks(limits.max_locks)
        self._dbenv.set_lk_max_objects(limits.max_locks)
        self._dbenv.set_lk_max_lockers(limits.max_locks)
        self._dbenv.set_cachesize(cache/GBYTE, cache%GBYTE,
                                  int(cache/(1024*1024*1024)))
        self._dbenv.open(path, envflags)
        self.pickleproto = self._load_pickle_protocol()
        self.tables = Tables(self._dbenv, idxkeyfactory,
                             self.pickleproto, limits)
        self.dict = Dict(self.tables._open_table("dict.idx", True, None),
                         self.pickleproto)

    def _load_pickle_protocol(self):
        filepath = os.path.join(self._path, "pprotocol.ver")
        if os.path.isfile(filepath):
            with open(filepath, "rb") as file:
                return pickle.load(file)
        else:
            default = pickle.HIGHEST_PROTOCOL
            with open(filepath, "wb") as file:
                pickle.dump(default, file, default)
            return default

    def txn_begin(self, isolated, parent):
        flags = 0 if isolated else DB_READ_COMMITTED
        return self._dbenv.txn_begin(parent, flags)

    def abort(self, txn):
        try:
            txn.abort()
        finally:
            self.tables._close_txn(txn)

    def commit(self, txn):
        try:
            txn.commit()
        finally:
            self.tables._close_txn(txn)

    def lock_detect(self):
        return self._dbenv.lock_detect(DB_LOCK_DEFAULT)

    def close(self):
        if self._dbenv is not None:
            self.tables._close()
            self.dict._close()
            self._dbenv.close()
            self._dbenv = None
        if self._proclock is not None:
            self._proclock.close()
            self._proclock = None


class Dict(object):

    def __init__(self, tbl, pickleproto):
        self._lock = threading.Lock()
        self._tbl = tbl
        self._pickleproto = pickleproto
        self._dictseq = DBSequence(tbl)
        self._dictseq.open("seq", None, DB_CREATE)
        self._load_cache()

    def _load_cache(self):
        self._idcache = {}
        self._namecache = {}

        curs = self._tbl.cursor()
        try:
            rec = curs.first()
            while rec:
                (name, id) = rec
                if name.startswith("d:"):
                    n = name[2:].decode('utf-8')
                    i = struct.unpack("I", id)[0]
                    self._idcache[n] = i
                    self._namecache[i] = n
                rec = curs.next()
        finally:
            curs.close()

    def get_id(self, name, ro=False):
        if name is None:
            return None

        name = unicode(name)

        with self._lock:
            val = self._idcache.get(name, None)
            if val is not None:
                return val
            if ro:
                return None
            val = self._dictseq.get(1)
            self._tbl.put("d:" + name.encode("utf-8"), struct.pack("I", val))
            self._tbl.sync()
            self._idcache[name] = val
            self._namecache[val] = name
            return val

    def get_name(self, id):
        with self._lock:
            if id not in self._namecache: #pragma: no cover
                raise KinetaError("ID %s not found in name cache" % id)
            return self._namecache[id]

    def _close(self):
        self._dictseq.close()
        self._tbl.close()


class Tables(object):

    def __init__(self, dbenv, idxkeyfactory, pickleproto, limits):
        self._dbenv = dbenv
        self._idxkeyfactory = idxkeyfactory
        self._pickleproto = pickleproto
        self._limits = limits
        self._tables = {}
        self._tblobjs = {}
        self._tblobjlock = threading.Lock()
        self._lock = RWLock()
        self._idxtbl = self._open_table("indexes.idx", True, None)
        self._tbltbl = self._open_table("tables.idx", True, None)
        tblcurs = self._tbltbl.cursor()
        try:
            rec = tblcurs.first()
            while rec:
                (name, t) = rec
                self._create_table_entry(None, int(name), None)
                rec = tblcurs.next()
        finally:
            tblcurs.close()

    class TblData(object):

        def __init__(self, idx):
            self.idx = idx

    class IdxData(object):

        def __init__(self, ts, perm=False, score=0):
            self.timestamp = ts
            self.permanent = perm
            self.score = score

        def pickle(self, protocol):
            return pickle.dumps((self.timestamp, self.permanent, self.score),
                                 protocol)

        @staticmethod
        def unpickle(data):
            return Tables.IdxData(*pickle.loads(data))

    class IdxObj(object):

        def __init__(self, obj):
            self._obj = obj

        def close(self):
            pass

        def __getattr__(self, name):
            if name == 'close' or name == 'destroy' or name == '_obj':
                return self.__dict__[name]
            else:
                return getattr(self.__dict__['_obj'], name)

        def destroy(self):
            self._obj.close()

    class TblObj(object):

        def __init__(self, lock, obj, tables, indexes, tableid, txn):
            self.obj = obj
            self.tableid = tableid
            self.idxobjs = {}
            for idx in indexes:
                self.load_index(lock, tables, txn, idx)

        def load_index(self, lock, tables, txn, idx):
            idx = tuple(idx)
            self.idxobjs[idx] = Tables.IdxObj(
                                        tables._open_index_table(lock, self,
                                                                 self.tableid,
                                                                 idx, txn))

        def close_index(self, lock, idx):
            idx = tuple(idx)
            if idx in self.idxobjs:
                self.idxobjs[idx].destroy()
                del self.idxobjs[idx]

        def append(self, data, txn):
            return self.obj.put(0, data, txn, DB_APPEND)

        def update(self, rowid, data, txn):
            self.obj.put(rowid, data, txn)

        def cursor(self, txn):
            return self.obj.cursor(txn)

        def num_records(self, txn):
            return self.obj.stat(DB_FAST_STAT, txn)['ndata']

        def destroy(self):
            if self.idxobjs is not None:
                for obj in self.idxobjs.itervalues():
                    obj.destroy()
                self.idxobjs = None
            self.obj.close()

    class SeqObj(object):

        def __init__(self, seqtbl, seqobj):
            self._seqtbl = seqtbl
            self._seqobj = seqobj

        def get(self, num, txn):
            return self._seqobj.get(num, txn)

        def close(self):
            self._seqobj.close()
            self._seqtbl.close()

    def lock(self):
        return ReadLock(self._lock)

    def table_list(self, lock):
        return self._tables.keys()

    def index_list(self, lock, tableid):
        ret = {}
        if tableid in self._tables:
            for k, v in self._tables[tableid].idx.iteritems():
                ret[k] = v
        return ret

    def index_create(self, tableid, idx, permanent):
        with self.lock() as lock:
            tbl = self._tables.get(tableid, None)
            if tbl is not None:
                name = Tables._index_filename(tableid, idx)
                ts = self._idxtbl.get(name, None)
                if ts is None:
                    comps = tuple(idx)
                    tblobj = self.table_open(lock, tableid, None, True)
                    self._open_index_table(lock, tblobj, tableid,
                                                    comps, None,
                                                    create=True).close()
                    with lock.upgrade_to_writer():
                        self._load_index(lock, tableid, idx)
                        data = Tables.IdxData(datetime.datetime.utcnow(),
                                              permanent)
                        self._idxtbl.put(name,
                                         data.pickle(self._pickleproto))
                        self._idxtbl.sync()
                        tbl.idx[comps] = data
                else:
                    self._update_index(lock, tableid, idx, permanent)

    def index_update_time(self, tableid, idx):
        with self.lock() as lock:
            self._update_index(lock, tableid, idx, False)

    def update_scores(self, num, tableid):
        with self.lock() as lock:
            self._update_scores(lock, num, tableid)

    def index_drop(self, tableid, idx):
        with self.lock() as lock:
            with lock.upgrade_to_writer():
                tbl = self._tables.get(tableid, None)
                if tbl is not None:
                    self._index_drop(lock, tableid, tbl, tuple(idx))

    def table_drop(self, tableid):
        with self.lock() as lock:
            with lock.upgrade_to_writer():
                self._table_drop(lock, tableid)

    def _update_scores(self, lock, num, tableid):
        drop = []
        tbl = self._tables.get(tableid, None)
        if tbl is not None:
            for idx, data in tbl.idx.iteritems():
                if not data.permanent:
                    data.score += num
                    if data.score > self._limits.max_index_score:
                        drop.append(idx)
        if len(drop) > 0:
            with lock.upgrade_to_writer():
                tbl = self._tables.get(tableid, None)
                if tbl is not None:
                    for idx in drop:
                        self._index_drop(lock, tableid, tbl, idx)

    def _table_drop(self, lock, tableid):
        tbl = self._tables.get(tableid, None)
        if tbl is not None:
            self._close_table(tableid)
            for idx in tbl.idx.keys():
                self._index_drop(lock, tableid, tbl, idx)
            self._dbenv.dbremove(Tables._table_filename(tableid))
            try:
                self._dbenv.dbremove(Tables._sequence_filename(tableid))
            except DBNoSuchFileError:
                pass # sequence tables arent created until theyre used
            self._tbltbl.delete(str(tableid))
            self._tbltbl.sync()
            del self._tables[tableid]

    def _index_drop(self, lock, tableid, tbl, idx):
        if idx in tbl.idx:
            self._close_index(lock, tableid, idx)
            name = Tables._index_filename(tableid, idx)
            self._dbenv.dbremove(name)
            self._idxtbl.delete(name)
            self._idxtbl.sync()
            del tbl.idx[idx]

    def _update_index(self, lock, tableid, idx, permanent):
        tblobj = self.table_open(lock, tableid, None, True)
        if tblobj is not None:
            comps = tuple(idx)
            if comps in self._tables[tableid].idx:
                with closing(self.index_open(lock, tblobj,
                                    tableid, comps, None)) as idxobj:
                    name = Tables._index_filename(tableid, comps)
                    data = self._tables[tableid].idx[comps]
                    data.timestamp = datetime.datetime.utcnow()
                    data.score = 0
                    if permanent:
                        data.permanent = True
                    self._idxtbl.put(name, data.pickle(self._pickleproto))

    @staticmethod
    def _table_filename(tableid):
        return "%s.tbl" % tableid

    @staticmethod
    def _sequence_filename(tableid):
        return "%s.seq" % tableid

    @staticmethod
    def _index_filename(tableid, columns):
        return "%s.%s.idx" % (tableid, ".".join("%s.%s" % (c[0], c[1]) \
                                                for c in columns))

    @staticmethod
    def _temp_filename():
        return "%s.tmp" % uuid.uuid4().hex

    def _close_txn(self, txn):
        with self._tblobjlock:
            if txn in self._tblobjs:
                for tblobj in self._tblobjs[txn].itervalues():
                    tblobj.destroy()
                del self._tblobjs[txn]

    def _load_index(self, lock, tableid, idx):
        with self._tblobjlock:
            for txn, d in self._tblobjs.items():
                if tableid in d:
                    d[tableid].load_index(lock, self, txn, idx)

    def _close_index(self, lock, tableid, idx):
        with self._tblobjlock:
            for txn, d in self._tblobjs.items():
                if tableid in d:
                    d[tableid].close_index(lock, idx)

    def _close_table(self, tableid):
        with self._tblobjlock:
            for txn, d in self._tblobjs.items():
                if tableid in d:
                    if txn is None:
                        d[tableid].destroy()
                        del d[tableid]
                        if len(d) == 0:
                            del self._tblobjs[None]
                    else:
                        raise KinetaError("Cannot drop table because there are"
                                          " open transactions which reference "
                                          "it")

    def _cache_table(self, lock, filename, txn, indexes, tableid):
        with self._tblobjlock:
            if txn in self._tblobjs:
                d = self._tblobjs[txn]
                if tableid in d:
                    tblobj = d[tableid]
                else:
                    tblobj = Tables.TblObj(lock,
                                        self._open_table(filename, False, txn),
                                           self, indexes, tableid, txn)
                    d[tableid] = tblobj
            else:
                tblobj = Tables.TblObj(lock,
                                       self._open_table(filename, False, txn),
                                       self, indexes, tableid, txn)
                self._tblobjs[txn] = {tableid: tblobj}
            return tblobj

    def table_open(self, lock, tableid, txn, ro):
        filename = Tables._table_filename(tableid)
        if tableid in self._tables:
            indexes = self._tables[tableid].idx
            return self._cache_table(lock, filename, txn, indexes, tableid)
        elif not ro:
            with lock.upgrade_to_writer():
                # read data could be stale
                if tableid in self._tables: #pragma: no cover
                    indexes = self._tables[tableid].idx
                    return self._cache_table(lock, filename, txn, indexes,
                                            tableid)
                self._tbltbl.put(str(tableid), "")
                self._tbltbl.sync()
                self._tables[tableid] = Tables.TblData({})
                indexes = self._tables[tableid].idx
                return self._cache_table(lock, filename, txn, indexes, tableid)

    def sequence_open(self, lock, tableid, txn):
        seqtbl = self._open_table(Tables._sequence_filename(tableid),
                                  True, txn, threaded=False)
        seqobj = None
        try:
            seqobj = DBSequence(seqtbl)
            seqobj.init_value(1)
            seqobj.open("seq", txn, DB_CREATE)
        except: #pragma: no cover
            seqtbl.close()
            raise
        return Tables.SeqObj(seqtbl, seqobj)

    def index_open(self, lock, tblobj, tableid, idx, txn, detached=False):
        if detached:
            return self._open_index_table(lock, tblobj, tableid,
                                          idx, txn, detached)
        else:
            return tblobj.idxobjs[idx]

    def _open_index_table(self, lock, tblobj, tableid, idx, txn,
                          detached=False, create=False):
        idxobj = self._open_table(Tables._index_filename(tableid, idx), True,
                                  txn, True, False)
        if not detached:
            flag = (DB_CREATE if create else 0)
            tblobj.obj.associate(idxobj, self._idxkeyfactory(idx), flag, txn)
        return idxobj

    def _create_table_entry(self, lock, tableid, txn):
        idxdict = {}
        idxcurs = self._idxtbl.cursor(txn)
        try:
            prefix = str(tableid) + "."
            rec = idxcurs.set_range(prefix)
            while rec:
                (name, d) = rec
                if not name.startswith(prefix):
                    break
                data = Tables.IdxData.unpickle(d)
                comps = []
                num = None
                for c in name.split('.')[1:-1]:
                    if num is None:
                        num = int(c)
                    else:
                        comps.append((num, c))
                        num = None
                idxdict[tuple(comps)] = data
                rec = idxcurs.next()
        finally:
            idxcurs.close()
        self._tables[tableid] = Tables.TblData(idxdict)

    def _open_table(self, filename, btree, txn, dupe=False, threaded=True):
        dbflags = DB_CREATE
        if threaded:
            dbflags |= DB_THREAD
        type = DB_RECNO
        if btree:
            type = DB_BTREE

        db = DB(self._dbenv)
        if dupe:
            db.set_flags(DB_DUPSORT)
        db.open(filename, None, type, dbflags, 0660, txn)
        return db

    def _close(self):
        self._close_txn(None)
        self._idxtbl.close()
        self._tbltbl.close()
