#==============================================================================
#
#  $Id$
#
"""
   Implementation of a heap mapped onto a file.
"""
#
#   Copyright (C) 2007 Michael A. Muller
#   Portions Copyright (C) 2008 Google Inc.
#
#   This file is part of ODB.
#
#   ODB is free software: you can redistribute it and/or modify it under the 
#   terms of the GNU Lesser General Public License as published by the Free 
#   Software Foundation, either version 3 of the License, or (at your option) 
#   any later version.
#
#   ODB is distributed in the hope that it will be useful,
#   but WITHOUT ANY WARRANTY; without even the implied warranty of
#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#   GNU Lesser General Public License for more details.
#
#   You should have received a copy of the GNU Lesser General Public License 
#   along with ODB.  If not, see <http://www.gnu.org/licenses/>.
#
#==============================================================================

import struct, weakref

# *&^!@ posixfile gives a deprecation warning, but it's the only place the 
# SEEK_* constants are defined.
SEEK_END = 2

import traceback
import sys
def trace(msg):
   return
   print msg

class _FreeNodes:
   """
      Keeps track of a limited number of free nodes so that we don't have to 
      scan the entire file every time that we want to allocate.
   """
   
   MAX_QUEUE = 1000
   
   def __init__(self, heap):
      self.__recs = {}
      self.__recsBySize = []
      
      # where to start the next scan from
      self.startRec = heap._getRoot()
      self.scan(self.MAX_QUEUE)

   def __bsearch(self, key, start, end):
      """
         Binary search a range of the list for the given key.  Returns the 
         index of where the key belongs.
      """
      if start == end:
         return start
      
      mid = start + (end - start) / 2
      if key > self.__recsBySize[mid].capacity:
         return self.__bsearch(key, mid + 1, end)
      else:
         return self.__bsearch(key, start, mid)
   
   def add(self, rec):
      
      # if we've already got this record, ignore it.
      if self.__recs.has_key(rec.pos):
         return
      
      # if we're already full and the record is not larger than our smallest, 
      # ignore it
      count = len(self.__recs)
      if count >= self.MAX_QUEUE and \
         rec.capacity < self.__recsBySize[0].capacity:
         return

      self.__recs[rec.pos] = rec
      index = self.__bsearch(rec.capacity, 0, count)
      self.__recsBySize.insert(index, rec)
      
      # if we've exceeded our capacity, start removing the lower ranking 
      # elements
      if count + 1 > self.MAX_QUEUE:
         # remove the lowest ranking element
         rec = self.__recsBySize.pop(0)
         del self.__recs[rec.pos]
      
   def __remove(self, rec, index = None):
      del self.__recs[rec.pos]

      if index is None:      
         # find and remove the node from the "by size" list (this will give us an 
         # IndexError if the record is not in the list
         index = self.__bsearch(rec.capacity, 0, len(self.__recsBySize))
         while self.__recsBySize[index] is not rec:
            index += 1
         
      del self.__recsBySize[index]
   
   def scan(self, numRecs, sizeNeeded = None):
      """
         Scan the file from the start record until we either arrive back at
         the start record or have scanned "numRecs" records.  Returns a record 
         of the specified capacity if one was found.
         
         parms:
            numRecs::
               [int] number of records to scan
            sizeNeeded::
               [int or None] if specified, this is the size of a node that 
               we are specifically looking for
      """
      rec = self.startRec
      match = None
      i = 0
      while i < numRecs:
         if rec.free:
            # see if we've got a match
            if sizeNeeded is not None and rec.capacity > sizeNeeded:
               match = rec
            else:
               self.add(rec)
         
         rec = rec.next
         i += 1

         # break out if we loop back to the beginning of the scan         
         if rec == self.startRec:
            break
      
      # remember where we left off
      self.startRec = rec
      
      return match

   def find(self, size):
      """
         Returns the first @Record in the free node set that is large enough 
         to accomodate the specified size and removes it from the set.
      """      
      i = self.__bsearch(size, 0, len(self.__recsBySize))
      if i < len(self.__recsBySize):
         rec = self.__recsBySize[i]
         assert rec.capacity >= size
         self.__remove(rec, i)
                         
         # if it's the start record, make the previous node the start record
         if self.startRec == rec:
            if rec.prev is not rec:
               self.startRec = rec.prev
         
         return rec
      
      # try scanning the next 10 nodes
      return self.scan(10)

   def invalidate(self, rec):
      """
         Invalidate a record that's getting merged out of existence.
      """
      # if it's our start recorg, use a different start recorg
      if self.startRec is rec:
         self.startRec = rec.prev
      
      # remove from our dictionary
      if self.__recs.has_key(rec.pos):
         self.__remove(rec)
   
   def changeCapacity(self, prevCap, rec):
      """
         Change the capacity of an existing free node.
      """
      if not self.__recs.has_key(rec.pos):
         # we don't manage this - ignore it
         return

      # remove it from its old location
      index = self.__bsearch(prevCap, 0, len(self.__recsBySize))
      while self.__recsBySize[index] is not rec:
         index += 1
      del self.__recsBySize[index]
      
      # add it to its new location
      index = self.__bsearch(rec.capacity, 0, len(self.__recsBySize))
      self.__recsBySize.insert(index, rec)

class HeapFile:

   class Record(object):
      """
         Keeps track of a block of the file, either free or allocated.

         Free blocks point to the next and previous free block.  Allocated
         blocks also point to the next and previous _free_ block.
         
         The record consists of a header and a parcel.  The parcel is the 
         application defined data area.  The header consists of:
            
         -  the four byte address of the previous node
         -  one byte whose high bit which is set if the record is free 
            (unallocated).  The remaining bits are the difference between the 
            capacity of the parcel and the size of the data that is actually 
            used.
         -  3 bytes indicating the record's capacity - this is the size of the 
            parcel
         
         A record can store up to 16M of data.
      """

      # the basic size of the record's housekeeping area
      SIZE = 8

      def __init__(self, file, pos, prev, capacity, free, usedSize):
         self.file = file
         self.pos = pos
         self.__prev = prev
         self.capacity = capacity
         self.usedSize = usedSize
         self.free = free

         # because it's a WeakValueDictionary, this will automatically get
         # removed on destruction
         self.file._cache[pos] = self
         
         # this is used by _FreeNodes
         # it gets set to false if record node gets merged with the record 
         # before and set to true if it is reestablished as a new node
         self.valid = True

      def write(self, seek = True):
         if seek: self.file.seek(self.pos)
         
         # the low 24 bits of the capacity word are the capacity, the high 8 
         # bits are either the unused portion of the capacity (which must be 
         # less than the node size otherwise we would have split the record) 
         # or 128, indicating a free node
         capacity = \
            self.capacity | (self.free and 0x80000000L or 
                             ((self.capacity - self.usedSize) << 24)
                             )
         self.file.write(struct.pack('>II', self.__prev, capacity))
         self.file.flush()

      def _read(self):
         data = self.file.read(self.SIZE)
         prev, capacity = struct.unpack('>II', data)
         self.__prev = prev
         # last 24 bits is the capacity
         self.capacity = capacity & 0x00FFFFFF
         self.usedSize = self.capacity - ((capacity & 0x7F000000) >> 24)
         self.free = capacity & 0x80000000L and True or False

      @staticmethod
      def read(file, pos):

         # check the cache
         rec = file._cache.get(pos)
         if rec:
            return rec

         file.seek(pos)
         rec = HeapFile.Record(file, pos, 0, 0, 0, 0)
         rec._read()
         return rec

      def __getNext(self):
         pos = self.pos + self.capacity + self.SIZE
         #print 'getting next = %d' % pos
         if pos == self.file._getEndPos():
            return self.file._getRoot()
         else:
            return HeapFile.Record.read(self.file, pos)
      
      next = property(__getNext)

      def __getPrev(self):
         return self.read(self.file, self.__prev)

      def __setPrev(self, prev):
         self.__prev = prev.pos

      prev = property(__getPrev, __setPrev)

      def mergeNext(self, txn):
         """
            Merge with the next record.
         """
         next = self.next
         oldCapacity = self.capacity
         self.capacity += next.capacity + next.SIZE

         # remove the next record
         nextNext = next.next
         nextNext.prev = self
         txn.update((self, nextNext))
         
         # invalidate it so that FreeNodes knows about this
         self.file._freeNodes.invalidate(next)
         self.file._freeNodes.changeCapacity(oldCapacity, self)

      def split(self, txn, size, free = False):
         """
            Splits the record into two - the first record will now be of the
            specified size, the next record will be the remainder.
         """
         newRec = \
            HeapFile.Record(self.file, self.pos + size + self.SIZE,
                            self.pos,
                            self.capacity - size - HeapFile.Record.SIZE,
                            True,
                            0
                            )

         next = self.next
         next.prev = newRec
         self.free = free
         oldCapacity = self.capacity
         self.capacity = size
         self.__next = newRec.pos
         txn.update((self, next, newRec))
         self.file._freeNodes.add(newRec)
         self.file._freeNodes.changeCapacity(oldCapacity, self)

      def extend(self, txn, size, free = False):
         """
            Extends the file with a new block.
            
            This method assumes that the record is the last in the file.
         """
         next = self.next
         assert next is self.file._getRoot()

         # create a new record on the end of the file
         newRec = \
            HeapFile.Record(self.file, 
                            self.file._getEndPos(),
                            self.pos, 
                            size, 
                            free,
                            0
                            )

         # extend the size of the file
         newEnd = newRec.pos + self.SIZE + size
         self.file.truncate(newEnd)

         # fix my pointers
         next.prev = newRec
         self.file._setEndPos(newEnd)
         txn.update((next, self, newRec))

         return newRec

      def __hash__(self):
         return int(self.pos)

      def __cmp__(self, other):
         return cmp(self.pos, other.pos)
      
      def dump(self):
         print 'Record %s(%s): %s, capacity = %s, used = %s, prev = %s' % \
            (self.pos + self.SIZE, self.pos,
             self.free and 'free     ' or 'allocated', 
             self.capacity, 
             self.usedSize, 
             self.__prev
             )

   # end Record

   class Txn(set):

      def commit(self):
         for rec in self:
            #print 'writing record:'
            #rec.dump()
            rec.write()

      def abort(self):
         pass

   def __init__(self, file, startOffset = 0, initialize = False):
      """
         parms:
            file::
               [file] the underlying file object.  Should support write(),
               read(), seek() and flush() operations.
            startOffset::
               [int] a seekable start offset of the heap area of the file.
            initialize::
               [boolean] if true, initialize the root record of the heap.
      """
      self.__file = file
      self.__offset = startOffset
      self._cache = weakref.WeakValueDictionary()

      self.__file.seek(startOffset)
      if initialize:

         # initialize the root record
         self.__root = self.Record(self, startOffset, startOffset, 0, True, 0)
         self.__root.write()
      else:
         self.__root = self.Record.read(self, startOffset)
      
      # store the end of the file
      self.__file.seek(0, SEEK_END)
      self.__end = self.__file.tell()

      # create the free nodes list, which includes scanning for the first set 
      # of free nodes - we also add the root node, mainly as a formality to 
      # satisfy a unit test that expects certain positions to be preserved
      self._freeNodes = _FreeNodes(self)
      if self.__root.free:
         self._freeNodes.add(self.__root)
   
   def close(self):
      self.__file.close()
      del self._freeNodes

   def write(self, data): self.__file.write(data)
   def read(self, size): return self.__file.read(size)
   def seek(self, pos, whence = 0): return self.__file.seek(pos, whence)
   def truncate(self, size): return self.__file.truncate(size)
   def flush(self): return self.__file.flush()

   def _getRoot(self):
      return self.__root

   def _getEndPos(self):
      return self.__end
   
   def _setEndPos(self, pos):
      self.__end = pos

   def __getRec(self, pos):
      return self.Record.read(self, pos)

   def alloc(self, size):
      assert size >= 0

      # create a transaction for the changed records
      txn = self.Txn()

      # find a free record big enough to accomodate
      rec = self._freeNodes.find(size)

      if rec:
         # got a big enough block

         # mark it allocated
         rec.free = False
         
         # see if there's room enough to divide this
         if rec.capacity >= size + rec.SIZE:
            # split off a new record and store it
            newRec = rec.split(txn, size)
         else:
            # just use the entire record
            rec.free = False
            txn.add(rec)

         rec.usedSize = size
         txn.commit()
         trace('allocating %d at %d' % (size, rec.pos + rec.SIZE))
         return rec.pos + rec.SIZE

      else:
         # get the last block on the file
         rec = self.__root.prev
         
         # add a new block to the end of the file
         newRec = rec.extend(txn, size)
         newRec.usedSize = size
         txn.commit()
         trace('allocating %d at %d' % (size, newRec.pos + newRec.SIZE))
         return newRec.pos + newRec.SIZE

   def free(self, ptr):

      # create a transaction for the changes
      txn = self.Txn()

      trace('freeing %d' % ptr)
      rec = self.Record.read(self, ptr - HeapFile.Record.SIZE)
      assert not rec.free
      rec.free = True
      if rec.next.free and rec.next != self.__root:
         rec.mergeNext(txn)

      # if the previous record is free, merge with it, otherwise rewrite
      if rec.prev.free and rec != self.__root:
         rec.prev.mergeNext(txn)
      else:
         txn.add(rec)
         self._freeNodes.add(rec)

      txn.commit()

   def get(self, ptr):
      """
         Returns the entire data block associated with the position as a
         string - this implies reading the size from the record.

         parms:
            ptr::
               [int] the block position
      """
      rec = self.Record.read(self, ptr - self.Record.SIZE)
      assert not rec.free, "get(%d) - record is free" % ptr
      self.seek(ptr)
      return self.read(rec.usedSize)
   
   def getBlockIter(self):
      """
         Returns an iterator to walk over the list of blocks in the file.
         
         The iterator will return @HeapFile.Record objects.
      """
      rec = self.__root
      while True:
         yield rec
         
         rec = rec.next
         if rec == self.__root: break

   def dump(self):
      rec = self.__root
      while True:
         rec.dump()
         rec = rec.next

         if rec == self.__root: break
   
   def checkIntegrity(self):
      """
         Verify that all structures in the file are reasonable.
      """
      rec = self.__root
      
      # the end of root should be the end of the file
      self.__file.seek(0, 2)
      assert self.__end == self.__file.tell()
      
      # iterate the records
      last = None
      while True:
         
         # last pointer is correct
         if last:
            assert rec.prev.pos == last.pos
            
            # no contiguous free blocks
            if last.free: assert not rec.free
            elif rec.free: assert not last.free
            
         nextPos = rec.pos + rec.SIZE + rec.capacity
         assert nextPos <= self.__end
         assert rec.usedSize <= rec.capacity         
         
         last = rec
         rec = rec.next
         if rec == self.__root: break
         

if __name__ == '__main__':
   import os
   if os.path.exists('heap.test'): os.remove('heap.test')
   heap = HeapFile(file('heap.test', 'w+'), 0, True)
   print 'alloc 100'
   x = heap.alloc(100)
   assert x == HeapFile.Record.SIZE * 2
   print 'free'
   heap.free(x)
   print 'alloc 10'
   x = heap.alloc(10)
   assert x == HeapFile.Record.SIZE
   print 'alloc 10'
   y = heap.alloc(10)
   assert y == HeapFile.Record.SIZE * 2 + 10

   heap.close()
   del heap
   #print 'new file'
   heap = HeapFile(file('heap.test', 'r+'), 0, False)
   #print 'alloc 10'
   z = heap.alloc(10)
   print z
   assert z == y + HeapFile.Record.SIZE + 10
   
   os.remove('heap.test')

   print "lookin' good"


