#==============================================================================
#
#  $Id$
#
"""
   Full implementation: does everything that mem does without having to keep
   the entire environment in memory or write all of it during a checkpoint:
   loads the databases on demand and keeps a configurable subset in memory.
   During checkpointing, writes only the dirty parts.

   If you use the full implementation directly, switching to a BDB back-end 
   may require some very heavy code changes..  If you strive for portability, 
   use the generic interface.  See "pydoc odb".
   
   Usage:
   
      from odb.full import FullEnv()
      
      # create a database environment in the db_env directory.
      env = FullEnv('db_env')
      
      # get a b-tree (b-tree functions follow the abstract interface, see 
      # "pydoc odb"
      my_btree = env.getBTreeDB('my_btree')
      
      # get a BList (balanced-tree list implementation - this type is NOT 
      # SUPPORTED BY BDB, if you use it you are locking yourself into the full 
      # and mem implementations).
      my_blist = env.getBListDB('my_blist')
      
      # look at some values (NOTE: slicing is not supported)
      print my_blist[1]
      
      
      # make some changes (we can add any pickleable object)
      my_blist.insert(4, 'test')
      my_blist[3] = 100
      del my_blist[3]
      first = my_blist.pop(0)
      
      # do it transactionally
      txn = my_blist.getTxn() # env.getTxn() works, too.
      my_blist.insert(4, 'test', txn)
      my_blist.set(txn, 3, 100)
      my_blist.delete(txn, 3)
      first = my_blist.pop(0, txn)
      txn.commit()
      
      # iterate over it
      for item in my_blist.cursor():
         print item
   
   Also see "pydoc api" under "API Patterns" for a useful description of the 
   general rules for the access methods in the API.
      
"""
#
#   Copyright (C) 2007 Michael A. Muller
#   Portions Copyright 2008 Google Inc.
#
#   This file is part of ODB.
#
#   ODB is free software: you can redistribute it and/or modify it under the 
#   terms of the GNU Lesser General Public License as published by the Free 
#   Software Foundation, either version 3 of the License, or (at your option) 
#   any later version.
#
#   ODB is distributed in the hope that it will be useful,
#   but WITHOUT ANY WARRANTY; without even the implied warranty of
#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#   GNU Lesser General Public License for more details.
#
#   You should have received a copy of the GNU Lesser General Public License 
#   along with ODB.  If not, see <http://www.gnu.org/licenses/>.
#
#==============================================================================

import os, struct, array
from dbfile import HeapFile
from mem import Filer, MemEnv, MemKeyDB, MemListDB, KeyVal
import btree

_trace = False
def trace(msg):
   if _trace:
      print msg

class Stateful:

   # Node states
   STUB = 0         # the node has not been loaded yet - just a position
   CLEAN = 1        # the node is the same as what is persisted
   DIRTY = 2        # the node needs to be persisted

class FileWrapper:
   """
      This is a protection class that wraps the file interface so that we can 
      be sure that we are only writing in the context of a checkpoint 
      operation.
   """
   
   debugEnabled = False
   def __init__(self, fp, writable):
      self.__fp = fp
      self.writable = writable
   
   def debug(self, msg):
      if self.debugEnabled: print 'FileWrapper: %s' % msg
      
   def write(self, data):
      if not self.writable:
         raise Exception('Not in write mode')
      self.debug('at %d writing %s' % (self.__fp.tell(), repr(data)))
      self.__fp.write(data)
   
   def read(self, size):
      pos = self.__fp.tell()
      data = self.__fp.read(size)
      self.debug('from %d read %d:%s' % (pos, size, repr(data)))
      return data
   
   def seek(self, pos, whence = 0):
      self.__fp.seek(pos, whence)
      self.debug('seeking %d, at %d' % (pos, self.__fp.tell()))
   
   def tell(self):
      return self.__fp.tell()
   
   def truncate(self, size):
      if not self.writable:
         raise Exception('Not in write mode')
      self.__fp.truncate(size)
   
   def flush(self):
      self.__fp.flush()
   
class _Storage(btree.Storage, Stateful):
   """
      Storage implementation for full btree databases.
   """
   
   class Common:
      def __init__(self, file):
         self.file = file
         self.keyStorageByPos = {}
         self.keyStorageByKey = {}
         self.deleted = []
         self.dirty = {}

   def _handlePosAssigned(self, pos):
      """
         Lets derived clases do their own thing when the position is assigned.
         
         Base class implementation does nothing.
      """
      pass
   
   @property
   def pos(self):
      if not self.__pos:
         self.__pos = self.common.file.alloc(self.size)
         self._handlePosAssigned(self.__pos)
      return self.__pos

   commonsForFile = {}
   def __init__(self, file = None, pos = None, common = None, size = None):
      self.__pos = pos
      self.size = size
      if common:
         self.common = common
      else:
         self.common = self.Common(file)
         if self.commonsForFile.has_key(file):
            raise Exception('duplicate commons')
         self.commonsForFile[file] = self.common
      if pos is not None:
         self._handlePosAssigned(pos)
      
      # for BTreeElem's, this contains the associated key storage object if it 
      # has been allocated.
      self.keyStorage = None

      # if we're creating a node to correspond to mapped storage, this is a 
      # stub, otherwise its a dirty new node.
      if pos:
         self.__state = self.STUB
         self.__keyDirty = False
      else:
         self.__state = self.DIRTY
         self.__keyDirty = True

   def __str__(self):
      rep = 'pos = %s' % self.__pos
      if self.keyStorage:
         rep += ', keyPos = %s' % self.keyStorage.__pos
      return rep

   @property
   def allocated(self):
      """
         returns true if the storage is allocated (has a position in the 
         underlying file storage mechanism.
      """
      return self.__pos and True or False

   def write(self, data):
      trace('writing to pos %d' % self.pos)
      self.common.file.seek(self.pos)
      self.common.file.write(data)
      self.common.file.flush()

   def read(self):
      data = self.common.file.get(self.pos)
      self.size = len(data)
      return data

   def getKey(self, keyPos):
      """
         Returns the key (@_KeyStorage instance) at the given position.
      """
      # try to resolve in the cache, if not found go to the file system
      if self.common.keyStorageByPos.has_key(keyPos):
         return self.common.keyStorageByPos[keyPos]
      else:

         # create a new key
         keyStorage = _KeyStorage(self.common.file, keyPos, self.common)
         return keyStorage
   
   def getKeyStorage(self, key):
      """
         Returns a @_KeyStorage instance for the key.
      """
      storage = self.common.keyStorageByKey.get(key)
      if storage:
         storage.refCount += 1
         return storage
      else:
         storage = _KeyStorage(self.common.file, common = self.common,
                               key = key,
                               refCount = 1
                               )
         return storage

   def addKey(self, key):
      """
         Adds a new key to the cache and stores it in the storage device.
         Returns the key storage (@_KeyStorage).
      """
      # see if we've already got this key
      storage = self.common.keyStorageByKey.get(key)
      if storage:
         return storage
      else:
         # we don't: store it
         trace('creating storage for key %s' % key)
         storage = _KeyStorage(self.common.file, common = self.common,
                               key = key,
                               refCount = 1
                               )
         return storage

   def alloc(self, size):
      """
         Returns a new storage instance capable of storing a block of the
         given size.  The actual allocation of the block may be deferred until
         the write.
      """
      return _Storage(self.common.file, common = self.common, size = size)

   def create(self):
      """
         Returns a new storage instance derived from this one.
      """
      return _Storage(common = self.common)

   def markModified(self, node):
      """
         Implements @btree.Storage.markModified().

         Mark the node as modified, causing it to be stored  during the next 
         checkpoint.
      """
      self.__state = self.DIRTY
   
   def markKeyModified(self, node):
      "Implements @btree.Storage.markKeyModified()."
      self.__keyModified = True
   
   def markDeleted(self, node):
      """
         Adds the storage node to the deleted list of the commons.
      """
      # we only need to worry about deleting this if it is actually allocated
      if self.__pos:
         trace('marking %s at %s for deletion' % (node, self.__pos))
         assert self not in self.common.deleted
         self.common.deleted.append(self)
      elif self in self.common.dirty:
         # the storage is dirty (scheduled to be deleted).  Remove it from the 
         # dirty set.
         trace('removing dirty %s' % self)
         del self.common.dirty[self]
      
      # need to delete the key of element nodes
      if isinstance(node, btree.BTreeElem):
         self._releaseKey(node)
   
   def _releaseKey(self, node):
      keyStorage = self.common.keyStorageByKey.get(node._keyMaster.key)
      if keyStorage: keyStorage.releaseReference()

   def __restoreMeat(self, node, data):
      raw = array.array('I', data)
      slots = []
      for i in range(0, node.MAX_SLOTS * 2, 2):
         if raw[i]:

            # see if we need to create a leaf node
            if raw[i] & 0x80000000:
               childPos = raw[i] & 0x7FFFFFFF
               isLeaf = True
            else:
               childPos = raw[i]
               isLeaf = False

            childStorage = self.create()
            childStorage.__pos = childPos
            keyStorage = self.getKey(raw[i + 1])

            if isLeaf:
               childStorage = self.create()
               self._setKeyStorage(childStorage, keyStorage)
               child = btree.BTreeElem(None, node._keyMaster.create(keyStorage), 
                                       childStorage
                                       )
            else:
               child = btree.BTreeMeat(None, node._keyMaster.create(keyStorage),
                                       self.create()
                                       )
            
            # fix the storage
            child._storage.__pos = childPos
            child._storage.__state = self.STUB
            
            # add the child to the slots
            slots.append(child)
      node._slots = slots

   def _setKeyStorage(self, storage, keyStorage):
      storage.keyStorage = keyStorage

   def insureReadable(self, node):
      if self.__state == self.STUB:
         data = self.read()
         if isinstance(node, btree.BTreeMeat):
            self.__restoreMeat(node, data)
         else:
            node.rawVal = data

         self.__state = self.CLEAN         
   
   def __flattenMeat(self, node):
      raw = array.array('I')
      pad = node.MAX_SLOTS
      for slot in node._slots:

         # flag child nodes with a high-bit
         childPos = slot._storage.pos
         if isinstance(slot, btree.BTreeElem):
            childPos |= 0x80000000

         # get the storage position of the slot and the key
         raw.append(childPos)
         maxKeyToken = self._getMaxKeyToken(slot._keyMaster)
         raw.append(maxKeyToken)

         pad -= 1

      # pad the rest of the slots with zeroes
      while pad:
         raw.append(0)
         raw.append(0)
         pad -= 1
      
      return raw.tostring()
   
   def _getMaxKeyToken(self, keyMaster):
      return self.addKey(keyMaster.key).pos

   def _flatten(self, node):
      if isinstance(node, btree.BTreeMeat):
         flat = self.__flattenMeat(node)
      else:
         flat = node.rawVal
      
      self.size = len(flat)
      return flat

   def _checkpointKey(self, node):
      if not self.keyStorage:
         self.keyStorage = self.getKeyStorage(node._keyMaster.key)
      self.keyStorage.checkpoint(node._keyMaster)

   def isCheckpointable(self):
      """
         Implements @btree.Storage.isCheckpointable(). Returns true if the 
         node is not a stub.
      """
      return self.__state != self.STUB

   def checkpoint(self, node):
      """
         Checkpoints the individual storage node.  Returns true if the parent 
         node needs to be rewritten as a result of a position change in the 
         child.
      """
      rewriteParent = False

      # elements get special treatment
      if isinstance(node, btree.BTreeElem):
         
         # make sure that the key gets checkpointed
         self._checkpointKey(node)
         
         # if we're dirty and we've already been allocated, Delete 
         # and reallocate.
         if self.__state == self.DIRTY and self.allocated:
            trace('freeing %d' % self.pos)
            self.common.file.free(self.pos)

            # this will force a reallocate during "write()" below
            self.__pos = None
            rewriteParent = True
      elif isinstance(node, btree.BTreeMeat):
         # since we store the child's keys, if a child key is dirty, we are 
         # dirty
         for child in node._slots:
            if child._storage.__keyDirty:
               self.__state = self.DIRTY
               child._storage.__keyDirty = True

      if self.__state == self.DIRTY:
         self.write(self._flatten(node))
         self.__state = self.CLEAN

      return rewriteParent         

   def flush(self):
      """
         Flush all changes recorded in the common object.
      """
      # remove all deleted items
      trace('flushing')
      for storage in self.common.deleted:
         trace('freeing %d' % storage.pos)
         self.common.file.free(storage.pos)
      self.common.deleted = []
      
      # write all dirty items
      for storage, data in self.common.dirty.items():
         storage.write(data)
      self.common.dirty = {}
      trace('done with flush')

class _BListStorage(_Storage):
   """
      Storage object for a BList.
      
      This class (and the storage mechanism in general) is the biggest "wart" 
      on the system at this point.  When I get a chance I'll rip it up and do 
      it right.
   """
   
   def getKey(self, val):
      """
         Overrides the standard key to just return the value.
      """
      return int(val)
   
   def _setKeyStorage(self, storage, keyStorage):
      pass

   def _getMaxKeyToken(self, keyMaster):
      return keyMaster.childCount
   
   def _releaseKey(self, node):
      pass

   def _checkpointKey(self, node):
      pass

   def create(self):
      """
         Returns a new storage instance derived from this one.
      """
      return _BListStorage(common = self.common)
      
class _KeyStorage(_Storage):
   """
      Specialization of storage for storing b-tree keys.
   """
   
   def __init__(self, file = None, pos = None, common = None, key = None,
                refCount = None
                ):
      if key is not None:
         assert refCount is not None
         self.key = key
         self.refCount = refCount
         size = 4 + len(key)
         common.dirty[self] = self._flatten(None)
      else:
         assert pos, 'you must have either a key or a pos'
         size = None
      
      _Storage.__init__(self, file, pos, common, size)
      
      # read the key if appropriate
      if pos and not key:
         raw = self.read()
         self.key = raw[4:]
         self.refCount = struct.unpack('I', raw[:4])[0]
      
      # add it to the cache
      trace('adding key %s to key cache' % self.key)
      self.common.keyStorageByKey[self.key] = self

   def _flatten(self, keyMaster):
      return struct.pack('I', self.refCount) + self.key

   def releaseReference(self):
      self.refCount -= 1
      if not self.refCount:
         trace('removed last reference to key %s at %d' %
                (self.key, self.pos)
               )
         del self.common.keyStorageByKey[self.key]
         if self.allocated:
            del self.common.keyStorageByPos[self.pos]
         self.common.deleted.append(self)
      else:
         self.common.dirty[self] = self._flatten(None)
   
   def _handlePosAssigned(self, pos):
      self.common.keyStorageByPos[pos] = self
   
   def write(self, node):
      _Storage.write(self, node)

class FullDBBase:
   """
      Base implementation of the full database classes - here's where we put 
      the common code.
   """
   def __init__(self, rootStorage, heap, fp):
      self.__heap = heap
      self.__fp = fp
      self.__rootStorage = rootStorage

   def checkIntegrity(self):
      """
         Does a full integrity check of the dataabase.
      """
      if _trace:
         self.dump()
         self.__heap.dump()
      self.__heap.checkIntegrity()
      records = {}
      
      # add all of the records in the heap to our dictionary
      for rec in self.__heap.getBlockIter():
         assert not records.has_key(rec.pos)
         if not rec.free:
            records[rec.pos + rec.SIZE] = rec.usedSize
   
      # now go through and match up to the nodes
      for node in self._impl.getNodeIter():
         storage = node._storage
         if storage.allocated:
            assert records.has_key(storage.pos) and \
               (storage.size is None or records[storage.pos] == storage.size)
            del records[storage.pos]
      
      # note - the pointer to root exists outside of the managed heap file, so
      # we don't have to check that.
      
      # verify that the storage areas scheduled for deletion (but not yet 
      # deleted) are still present in the heap.
      for storage in self.__rootStorage.common.deleted:
         assert records.has_key(storage.pos) and \
            storage.size == records[storage.pos]
         del records[storage.pos]
      
      # check the keys
      self._checkKeys(self.__rootStorage, records)

      # make sure there are no allocated records that are not accounted for. 
      if _trace:
         print 'leftovers:', records
      assert not records
   
   def setWritable(self, writable):
      self.__fp.writable = writable
   
   def checkpoint(self):
      self.setWritable(True)
      try:
         MemKeyDB.checkpoint(self)
         self.__rootStorage.flush()
      finally:
         self.setWritable(False)

class FullKeyDB(FullDBBase, MemKeyDB):
   """
      Full implementation of a key database.
   """

   def __init__(self, env, name, impl, rootStorage, heap, fp):
      # XXX only storing fp so we can setWritable()
      FullDBBase.__init__(self, rootStorage, heap, fp)
      MemKeyDB.__init__(self, env, name, impl)

   def _checkKeys(self, rootStorage, records):
      for key, storage in rootStorage.common.keyStorageByKey.items():
         if storage.allocated:
            assert records.has_key(storage.pos) and \
                  records[storage.pos] == len(key) + 4
            del records[storage.pos]

   @staticmethod
   def _checkFile(filer, filename):
      """
         Returns true if the specified file is a FullBTreeDB file.
         
         parms:
            filer: [Filer]
            filename: [string]
      """
      fp = filer.openStateFile(filename, Filer.READONLY)
      return fp.read(8) == BTREE_MAGIC

class FullListDB(FullDBBase, MemListDB):
   """
      Full implementation of a list database.
   """
   
   def __init__(self, env, name, impl, rootStorage, heap, fp):
      FullDBBase.__init__(self, rootStorage, heap, fp)
      MemListDB.__init__(self, env, name, impl)
   
   def _checkKeys(self, rootStorage, records):
      # got no keys
      pass
   
   @staticmethod
   def _checkFile(filer, filename):
      """
         Returns true if the specified file is a FullListDB file.
         
         parms:
            filer: [Filer]
            filename: [string]
      """
      fp = filer.openStateFile(filename, Filer.READONLY)
      return fp.read(8) == BLIST_MAGIC

BTREE_MAGIC = 'SpDB\0\0\0\001'
BLIST_MAGIC = 'SpDBL\0\0\001'

class FullEnv(MemEnv):
   """
      Implementation of the environment object for the "Full" databases.
   """

   def __init__(self, path, maxTxnFileSize = MemEnv.DEFAULT_MAX_TXN_FILE_SIZE,
                filer = None,
                checkpointCallback = None
                ):
      MemEnv.__init__(self, path, maxTxnFileSize, filer, checkpointCallback)
      
      # create the ".full" marker file
      file(os.path.join(path, '.full'), 'w')

   def _loadState(self):
      # load the database caches
      self._tables = {}
      foundState = False
      for file in self._filer.getAllStateFiles():
         if FullKeyDB._checkFile(self._filer, file):
            # XXX need to read "allow dups" from the file.  Also need to 
            # put "allow dups" back into the system.
            self._tables[file] = self._createBTreeDB(file, False)
            foundState = True
         elif FullListDB._checkFile(self._filer, file):
            self._tables[file] = self._createBListDB(file)
            foundState = True
      return foundState

   def hasTable(self, name):
      "Returns true if the table 'name' exists."
      return self._tables.has_key(name)

   def __createDB(self, name, magic, storageFactory):
      rootSize = btree.BTreeNode.MAX_SLOTS * 8
      try:
         fp = FileWrapper(self._filer.openStateFile(name, Filer.WRITABLE), 
                          False
                          )

         # check the magic number
         assert fp.read(8) == magic

         # get the root record
         rootPos = struct.unpack('I', fp.read(4))[0]

         heap = HeapFile(fp, 12)
      except IOError:
         fp = FileWrapper(self._filer.openStateFile(name, Filer.CREATE), 
                          True
                          )
         fp.write(magic)

         # write a space for the pointer to the root record
         rootPointerPos = fp.tell()
         fp.write('\0' * 4)
         rootPos = 0

         heap = HeapFile(fp, 12, True)
         rootPos = heap.alloc(rootSize)
         heap.seek(rootPos)
         heap.write('\0' * rootSize)
         
         # write the pointer to the root record
         fp.seek(rootPointerPos)
         fp.write(struct.pack('I', rootPos))
      
      # create the root storage
      rootStorage = storageFactory(heap, pos = rootPos, size = rootSize)
      
      return rootStorage, heap, fp

   def _createBTreeDB(self, name, allowDups):
      rootStorage, heap, fp = self.__createDB(name, BTREE_MAGIC, _Storage)
      return FullKeyDB(self, name, btree.BTree(rootStorage), rootStorage, heap,
                       fp
                       )
   
   def _createBListDB(self, name):
      rootStorage, heap, fp = self.__createDB(name, BLIST_MAGIC, _BListStorage)
      return FullListDB(self, name, btree.BList(rootStorage), rootStorage,
                        heap,
                        fp
                        )

   def _checkpointTables(self):
      for table in self._tables.values():
         table.checkpoint()
   
   def checkIntegrity(self):
      """
         Does a full internal integrity check of the database.
      """
      for table in self._tables.values():
         table.checkIntegrity()
