#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# slb64 -- sqlite with base64-codec
#

#
# This program is free software; you can redistribute it and/or modify  
# it under the terms of the GNU General Public License as published by  
# the Free Software Foundation; either version 2 of the License, or     
# (at your option) any later version.                                   
#                                                                         
# A copy of the license can be found in the license.txt file supplied   
# with this software or at: http://www.gnu.org/copyleft/gpl.html       
#

#
#
#  CREATE TABLE IF NOT EXISTS index.db (key TEXT, op INTEGER, ts INTEGER, fileno INTEGER, offset INTEGER, length INTEGER, UNIQUE (key,ts,fileno))
#     ====>
#  CREATE TABLE IF NOT EXISTS index.db (key TEXT, op INTEGER, ts INTEGER, fileno INTEGER, offset INTEGER, length INTEGER, INDEX (key))
#


import os,os.path,tempfile,fnmatch
import base64, time, re
import traceback

import sqlite3 as sqlite
from sqlite3 import IntegrityError

from cStringIO import StringIO

import mmap

try :
    from os import SEEK_SET,SEEK_END
except :
    SEEK_SET = 0 
    SEEK_END = 2 

import celtuce.help.logging as logging


class   Slb64DB(object) :
    
    INDEX_FILE = 'index.db'
    SLICE_FILENAME_PREFIX = 'data'
    WORK_FILE  = '.'.join((SLICE_FILENAME_PREFIX,'0'))

    OP_INSERT = 0 
    OP_DELETE = 9 

    OP_INSERT_STR = str(OP_INSERT)
    OP_DELETE_STR = str(OP_DELETE)

    SLICE_MINOR_COMPACT_SIZE = 1024 * 1024 * 512
    SLICE_NAME_PATTERN = re.compile(r'^data\.(\d+)$', re.UNICODE)

    SPLIT_NAME_PATTERN = re.compile(r'^split\.(\d+)$', re.UNICODE)

    ERASE_DATA = base64.b64encode('D')

    DELETED_LOG = 'deleted.log'

    # maximum size of data returned for find()
    MAX_FIND_DATA_SIZE = 1024 * 1024 * 4 

    
    def __init__(self, db_path) :
        self.table_dir = db_path
        if not os.path.exists(self.table_dir) and not os.path.lexists(self.table_dir) :
            os.makedirs(self.table_dir, 0755)

        self.table_index_db = None
        self.table_index = None

        self.fp_work_path = os.sep.join((self.table_dir, self.WORK_FILE))
        self.fp_work = None
        self.fps_readonly = {}

        self.table_index_name = 'cell_idx'
        self.SQL_INSERT = 'INSERT INTO %s(key,op,ts,fileno,offset,length) VALUES(?,?,?,?,?,?)' % self.table_index_name

        # minor_compact
        self.table_index_mc_name = 'cell_idx_mc'
        self.SQL_INSERT_MC = 'INSERT INTO %s(key,op,ts,fileno,offset,length) VALUES(?,?,?,?,?,?)' % self.table_index_mc_name
        self.SQL_INSERT_FROM_MC = 'INSERT INTO %s(key,op,ts,fileno,offset,length) SELECT key,op,ts,fileno,offset,length FROM %s ORDER BY key' % (self.table_index_name, self.table_index_mc_name)

        # record buffer
        self._fp_record = StringIO()

        # SQL find
        self.SQL_FIND_BY_KEY_TS = None
        self.SQL_FIND_BY_KEY = None

        # count of slice files
        self._number_slice_files = 0
        self._approx_total_size = 0

        # deleted logs
        self.deleted_log = os.sep.join((self.table_dir, self.DELETED_LOG))
        self._fp_deleted_log = None

        self._logger = logging.getLogger('celtuce')
        ## __init__()

    def __del__(self) :
        self.shutdown()
        ## __del__()

    def shutdown(self) :
        if self.table_index_db is not None :
            self.table_index = None

            try :
                self.table_index_db.commit()
                self.table_index_db.close()
                self.table_index_db = None
            except :
                self._logger.excpt()

        if self.fp_work is not None :
            self.fp_work.close()
            self.fp_work = None

        for fno,fp in self.fps_readonly.iteritems() :
            fp.close()
        self.fps_readonly = {}

        if self._fp_record :
            self._fp_record.close()
            self._fp_record = None

        if self._fp_deleted_log :
            self._fp_deleted_log.close()
            self._fp_deleted_log = None
        ## shutdown()

    def getApproxVolume(self) :
        if self._approx_total_size < 1 :
            if self._number_slice_files < 1 :
                self._number_slice_files = self.__count_slice_files()
            self._approx_total_size = self._number_slice_files * self.SLICE_MINOR_COMPACT_SIZE
        return self._approx_total_size
        ## getApproxVolume()

    def isCompactable(self) :
        return False
        ## isCompactable()

    def compact(self) :
        raise NotImplementedError
        ## compact()

    ####------------------------------------------------------------####

    def insert(self, key, data, timestamp_ms) :
        if not key or not data :
            return True
        if not self.__init_db() :
            return False

        if not timestamp_ms :
            timestamp_ms = self.__get_timestamp()

        if self.__existKeyWithTimestamp(key, timestamp_ms) :
            # key with the same timestamp exists
            return True

        return self.__doInsert(key, data, timestamp_ms)
        ## insert()

    def __doInsert(self, key, data, timestamp_ms) :
        if not timestamp_ms :
            timestamp_ms = self.__get_timestamp()

        data_b64 = base64.b64encode(data)

        # STEP. append data to file
        self._fp_record.truncate(0)
        self._fp_record.write(key)
        self._fp_record.write("\t")
        self._fp_record.write(self.OP_INSERT_STR)
        self._fp_record.write("\t")
        self._fp_record.write(str(timestamp_ms))
        self._fp_record.write("\t")
        self._fp_record.write(data_b64)
        value = self._fp_record.getvalue()

        self.fp_work.seek(0, SEEK_END)
        offset = self.fp_work.tell()
        length = len(value)
        self.fp_work.write(value)
        self.fp_work.write("\n")


        # STEP. update index
        try :
            self.table_index.execute(self.SQL_INSERT, (key,self.OP_INSERT,timestamp_ms,0,offset,length))
        except IntegrityError :
            # IntegrityError: columns key, ts, fileno are not unique
            # just discard it
            return True

        if (offset + length) >= self.SLICE_MINOR_COMPACT_SIZE :
            self.__minor_compact()

        return True
        ## __doInsert()

    def insertunique(self, key, data, timestamp_ms) :
        '''insert data with unique key. if key exists, just return without insertion/updation; otherwise insert data.
        '''
        if not key or not data :
            return True
        if not self.__init_db() :
            return False

        if not timestamp_ms :
            timestamp_ms = self.__get_timestamp()

        if self.__existKey(key) :
            # key exists, just return
            return True

        # key does NOT exist, insert it
        return self.__doInsert(key, data, timestamp_ms)
        ## insertunique()

    def update(self, key, data, timestamp_ms) :
        '''update data if key exists, otherwise do nothing
        '''
        if not key or not data :
            return True
        if not self.__init_db() :
            return False

        if not timestamp_ms :
            timestamp_ms = self.__get_timestamp()

        if self.__existKey(key) :
            # key exists, just delete them
            ok = self.erase(key, timestamp_ms)
            if not ok :
                return False

            # insert new one
            return self.__doInsert(key, data, timestamp_ms)

        # key does NOT exist, do nothing
        return True
        ## update()

    def upsert(self, key, data, timestamp_ms) :
        '''update data if key exists, otherwise insert it
        '''
        if not key or not data :
            return True
        if not self.__init_db() :
            return False

        if not timestamp_ms :
            timestamp_ms = self.__get_timestamp()

        if self.__existKey(key) :
            # key exists, just delete them, first
            ok = self.erase(key, timestamp_ms)
            if not ok :
                return False

        # key does NOT exist, do nothing
        return self.__doInsert(key, data, timestamp_ms)
        ## upsert()

    def __existKey(self, key) :
        '''test if key already exists in db (not deleted)

        return True if key exists, False otherwise
        '''
        if not key : return False

        row = self.__findone_in_index(key)
        if not row :
            return False
        op,ts,fileno,offset,length = row
        if op == self.OP_DELETE :
            return False

        return True
        ## __existKey()

    def __existKeyWithTimestamp(self, key, timestamp) :
        '''test if key already exists in db (not deleted)

        return True if key exists, False otherwise
        '''
        if not key : return False

        row = self.__findone_in_index(key)
        if not row :
            return False
        op,ts,fileno,offset,length = row
        if op == self.OP_DELETE :
            return False
        if ts == timestamp :
            return True

        return False
        ## __existKeyWithTimestamp()

    ####------------------------------------------------------------####

    def findall(self, key, timestamp_ms=0) :
        '''
        @param key : key of record

        @return list of data for the key
        '''
        if not key :
            return []

        rowset = self.__find_in_index(key,timestamp_ms)
        if not rowset :
            return []
        data_list = []
        data_size = 0
        for row in rowset :
            op,ts,fileno,offset,length = row
            if op == self.OP_DELETE :
                break
            data = self.__read_data(fileno, offset, length)
            data_list.append( data )
            data_size += len(data)
            if data_size >= self.MAX_FIND_DATA_SIZE :
                break

        return data_list
        ## findall()

    def find(self, key, timestamp_ms=0) :
        '''
        @param key : key of record
        
        @return 4-tuple(key,op,timestamp,data)
        '''
        if not key :
            return (key,None,None,None)

        row = self.__findone_in_index(key, timestamp_ms)
        if not row :
            return (key, 0, 0, None)
        op,ts,fileno,offset,length = row
        if op == self.OP_DELETE :
            return (key,ts,op,None)

        data = self.__read_data(fileno,offset,length)
        return (key,ts,op,data)
        ## find()

    def __find_in_index(self, key, timestamp_ms=0) :
        '''
        @param key : key of record
        @param timestamp_ms : key occurs before this timestamp will be return

        @return list of tuples (op,ts,fileno,offset,length)
        '''
        if not key :
            return []
        if not self.__init_db() :
            return []

        if timestamp_ms > 0 :
            sql = self.__sql_find_by_key_timestamp()
            self.table_index.execute(sql, (key,timestamp_ms))
        else :
            sql = self.__sql_find_by_key()
            self.table_index.execute(sql, (key,))

        return  self.table_index.fetchall()
        ## __find_in_index()

    def __findone_in_index(self, key, timestamp_ms=0) :
        '''
        @param key : key of record
        @param timestamp_ms : key occurs before this timestamp will be return

        @return tuple (op,ts,fileno,offset,length)
        '''
        if not key :
            return []
        if not self.__init_db() :
            return []

        if timestamp_ms > 0 :
            sql = self.__sql_find_by_key_timestamp()
            self.table_index.execute(sql, (key,timestamp_ms))
        else :
            sql = self.__sql_find_by_key()
            self.table_index.execute(sql, (key,))

        return  self.table_index.fetchone()
        ## __findone_in_index()

    def __sql_find_by_key(self) :
        if not self.SQL_FIND_BY_KEY :
            self.SQL_FIND_BY_KEY = 'SELECT op,ts,fileno,offset,length FROM %s WHERE key=? ORDER BY ts DESC' % self.table_index_name

        return  self.SQL_FIND_BY_KEY
        ## __sql_find_by_key()

    def __sql_find_by_key_timestamp(self) :
        if not self.SQL_FIND_BY_KEY_TS :
            self.SQL_FIND_BY_KEY_TS = 'SELECT op,ts,fileno,offset,length FROM %s WHERE key=? and ts <= ? ORDER BY ts DESC' % self.table_index_name

        return  self.SQL_FIND_BY_KEY_TS
        ## __sql_find_by_key_timestamp()


    def __read_data_FILE(self, fileno, offset, length) :
        if fileno < 0 :
            return None

        if fileno in self.fps_readonly :
            _fp = self.fps_readonly[fileno]
        else :
            _fp = self.__open_slice_file(fileno)
            self.fps_readonly[fileno] = _fp

        _fp.seek(offset)
        data = _fp.read(length)
        if len(data) != length :
            raise RuntimeError,'fileno=%d offset=%d length=%d (length-got=%d)' % (fileno,offset,length,len(data))

        _key,_op,_ts,_data = data.split("\t",3)
        data = base64.b64decode(_data)

        return data.strip()
        ## __read_data()

    def __read_data_MMAP(self, fileno, offset, length) :
        if fileno < 0 :
            return None

        if fileno in self.fps_readonly :
            _fp = self.fps_readonly[fileno]
        else :
            _fp = self.__open_slice_file(fileno)
            self.fps_readonly[fileno] = _fp

        _mem = mmap.mmap(_fp.fileno(), offset + length, mmap.MAP_PRIVATE, mmap.PROT_READ, 0)
        data = _mem[offset : offset + length]
        _mem.close()
        if len(data) != length :
            raise RuntimeError,'fileno=%d offset=%d length=%d (length-got=%d)' % (fileno,offset,length,len(data))

        _key,_op,_ts,_data = data.split("\t",3)
        data = base64.b64decode(_data)

        return data.strip()
        ## __read_data()

    __read_data = __read_data_FILE


    def __open_slice_file(self, fileno) :
        filename  = '.'.join((self.SLICE_FILENAME_PREFIX,str(fileno)))
        filepath = os.sep.join((self.table_dir, filename))
        fp = file(filepath, 'r')
        return fp
        ## __open_slice_file()

    ####------------------------------------------------------------####

    def erase(self, key, timestamp_ms=0) :
        if not key :
            return True
        if not self.__init_db() :
            return False

        if not timestamp_ms :
            timestamp_ms = self.__get_timestamp()

        # STEP. get deleted records size since last delete operation
        deleted_data_size = self.__get_deleted_data_size(key, timestamp_ms)

        # STEP. append data to file
        value = self.__makeup_deletion_line(key, timestamp_ms)

        self.fp_work.seek(0, SEEK_END)
        offset = self.fp_work.tell()
        length = len(value)
        self.fp_work.write(value)
        self.fp_work.write("\n")

        # STEP. update index
        try :
            self.table_index.execute(self.SQL_INSERT, (key,self.OP_DELETE,timestamp_ms,0,offset,length))
        except IntegrityError :
            # IntegrityError: columns key, ts, fileno are not unique
            # just discard it
            #self._logger.excpt()
            return True

        # STEP. append deletion log
        self.__write_deletion_log(key, timestamp_ms, deleted_data_size + length)

        # STEP. minor compact
        if (offset + length) >= self.SLICE_MINOR_COMPACT_SIZE :
            self.__minor_compact()

        return True
        ## erase()

    def __makeup_deletion_line(self, key, timestamp_ms) :
        self._fp_record.truncate(0)
        self._fp_record.write(key)
        self._fp_record.write("\t")
        self._fp_record.write(self.OP_DELETE_STR)
        self._fp_record.write("\t")
        self._fp_record.write(str(timestamp_ms))
        self._fp_record.write("\t")
        self._fp_record.write(self.ERASE_DATA)

        return self._fp_record.getvalue()
        ## __makeup_deletion_line()

    def __get_deleted_data_size(self, key, timestamp_ms) :
        deleted_data_size = 0

        rowset = self.__find_in_index(key,timestamp_ms)
        if not rowset :
            return deleted_data_size
        for row in rowset :
            op,ts,fileno,offset,length = row
            if op == self.OP_DELETE :
                break
            deleted_data_size += length

        return deleted_data_size
        ## __get_deleted_data_size()

    def __write_deletion_log(self, key, timestamp_ms, deleted_data_size) :
        if not self._fp_deleted_log :
            self._fp_deleted_log = open(self.deleted_log, 'a', 1)

        self._fp_deleted_log.write('\t'.join((key, str(timestamp_ms), str(deleted_data_size))))
        self._fp_deleted_log.write('\n')
        ## __write_deletion_log()

    ####------------------------------------------------------------####

    def __init_db(self) :
        if not self.table_index :
            table_index_path = os.sep.join((self.table_dir, self.INDEX_FILE))

            self.table_index_db = sqlite.connect(table_index_path)
            #self.table_index_db.execute('CREATE TABLE IF NOT EXISTS %s (key TEXT, op INTEGER, ts INTEGER, fileno INTEGER, offset INTEGER, length INTEGER, UNIQUE (key,ts,fileno))' % self.table_index_name)
            self.table_index_db.execute('CREATE TABLE IF NOT EXISTS %s (key TEXT, op INTEGER, ts INTEGER, fileno INTEGER, offset INTEGER, length INTEGER)' % self.table_index_name)
            self.table_index_db.execute('CREATE INDEX IF NOT EXISTS %s_idx_key ON %s (key)' % (self.table_index_name, self.table_index_name))

            # “DEFERRED”, “IMMEDIATE” or “EXCLUSIVE”
            self.table_index_db.isolation_level = "IMMEDIATE"

            self.table_index = self.table_index_db.cursor()

        if not self.fp_work :
            self.fp_work = file(self.fp_work_path, 'a', 1)

        return True
        ## __init_db()


    def __get_timestamp(self) :
        '''
        return timestamp in micro-seconds
        '''
        now = time.time()
        return int(now * 1000)
        ## __get_timestamp()

    def __get_max_slice_number(self) :
        max_slice_number = 0
        entries = os.listdir(self.table_dir)
        for entry in entries :
            matched = self.SLICE_NAME_PATTERN.search(entry)
            if matched :
                slice_number = int(matched.group(1))
                if slice_number > max_slice_number :
                    max_slice_number = slice_number

        return max_slice_number
        ## __get_max_slice_number()

    def __count_slice_files(self) :
        count = 0
        entries = os.listdir(self.table_dir)
        for entry in entries :
            matched = self.SLICE_NAME_PATTERN.search(entry)
            if matched :
                slice_number = int(matched.group(1))
                if slice_number > 0 :
                    count += 1

        # include data.0
        return count + 1
        ## __count_slice_files()

    
    ####------------------------------------------------------------####

    def __minor_compact_FILE(self) :
        if self.fp_work :
            self.fp_work.close()
            self.fp_work = None

        fd_n,name_n = tempfile.mkstemp(suffix='.tmp',prefix='slice-',dir=self.table_dir)
        os.close(fd_n)
        fp_n = file(name_n, 'a', 1)

        locs_cache = []
        local_cache = StringIO()
        local_cache_size = 0
        local_cache_capacity = 1024 * 1024 * 1

        fp_n.seek(0, SEEK_END)
        _tail_fp_n = fp_n.tell()

        max_slice_number = self.__get_max_slice_number()
        fileno = max_slice_number + 1
        filesize_0 = os.path.getsize(self.fp_work_path)
        self._logger.info('minor compact to slice #%d (data-size:%d) ...', fileno, filesize_0)
        fp_0 = file(self.fp_work_path, 'r')

        try :
            self.table_index_db.execute('DROP TABLE IF EXISTS %s' % self.table_index_mc_name)
        except :
            self._logger.error('SQL ERROR : DROP TABLE IF EXISTS %s', self.table_index_mc_name)
            self._logger.excpt()

        try :
            #self.table_index_db.execute('CREATE TEMP TABLE IF NOT EXISTS %s (key TEXT, op INTEGER, ts INTEGER, fileno INTEGER, offset INTEGER, length INTEGER, UNIQUE (key,ts,fileno))' % self.table_index_mc_name)
            self.table_index_db.execute('CREATE TEMP TABLE IF NOT EXISTS %s (key TEXT, op INTEGER, ts INTEGER, fileno INTEGER, offset INTEGER, length INTEGER)' % self.table_index_mc_name)
            self.table_index_db.execute('CREATE INDEX IF NOT EXISTS %s_tmp_idx_key ON %s (key)' % (self.table_index_mc_name, self.table_index_mc_name))
            #self.table_index_db.execute('DELETE FROM %s WHERE 1=1' % self.table_index_mc_name)

            cursor_0 = self.table_index_db.cursor()
            cursor_0.execute('SELECT key,op,ts,offset,length FROM %s WHERE fileno=0 ORDER BY key DESC,ts DESC,op DESC' % self.table_index_name)

            r = cursor_0.fetchone()
            while r :
                key,op,ts,offset_0,length_0 = r

                fp_0.seek(offset_0)
                value = fp_0.read(length_0)
                length = len(value)
                if length != length_0 :
                    # error : length mismatch
                    raise RuntimeError,'key=%s offset=%d length=%d (length-got=%d)' % (key,offset_0,length_0,length)

                offset = _tail_fp_n
                local_cache.write(value)
                local_cache.write("\n")
                _tail_fp_n += length + 1

                locs_cache.append( (key,op,ts,fileno,offset,length) )
                local_cache_size += length

                r = cursor_0.fetchone()

                if (local_cache_size >= local_cache_capacity) or (not r) :
                    _data = local_cache.getvalue()
                    fp_n.write(_data)
                    fp_n.seek(0, SEEK_END)
                    _tail_fp_n = fp_n.tell()

                    local_cache.truncate(0)
                    local_cache_size = 0

                    # update index
                    #locs_cache.sort()  # low efficient # already sorted by key
                    self.table_index.executemany(self.SQL_INSERT_MC, locs_cache)
                    locs_cache = []
                    # flush cache

                # while r 

            local_cache.close()
            local_cache = None
            cursor_0 = None

            # merge index
            self.table_index_db.execute(self.SQL_INSERT_FROM_MC)

            self._logger.info('slice #%d size is %d', fileno, fp_n.tell())

            fp_n.close()
            slice_name = 'data.%d' % fileno
            slice_path = os.sep.join((self.table_dir, slice_name))
            os.rename(name_n, slice_path)
            os.chmod(slice_path, 0644)

            self._logger.info('minor compact to slice #%d done.', fileno)

            # clear temporary index
            self.table_index_db.execute('DELETE FROM %s WHERE fileno=0' % self.table_index_name)

            # clear temporary data
            fp_0.close()
            if 0 in self.fps_readonly :
                _fp_0 = self.fps_readonly[0]
                _fp_0.close()
                del self.fps_readonly[0]
            os.remove(self.fp_work_path)

            # clear size estimation
            self._number_slice_files = 0
            self._approx_total_size = 0
        except :
            self._logger.excpt()
            traceback.print_exc()
        finally :
            if fp_n :
                fp_n.close()
            if fp_0 :
                fp_0.close()
            if os.path.exists(name_n) :
                # failure
                os.remove(name_n)

            try :
                self.table_index_db.execute('DROP TABLE IF EXISTS %s' % self.table_index_mc_name)
            except :
                self._logger.error('SQL ERROR : DROP TABLE IF EXISTS %s', self.table_index_mc_name)
                self._logger.excpt()
            #self.table_index_db.execute('DELETE FROM %s WHERE 1=1' % self.table_index_mc_name)
        ## __minor_compact_FILE()

    def __minor_compact_MMAP(self) :
        if self.fp_work :
            self.fp_work.close()
            self.fp_work = None

        fd_n,name_n = tempfile.mkstemp(suffix='.tmp',prefix='slice-',dir=self.table_dir)
        fp_n = os.fdopen(fd_n, 'a', 1)

        fp_n.seek(0, SEEK_END)

        max_slice_number = self.__get_max_slice_number()
        fileno = max_slice_number + 1
        filesize_0 = os.path.getsize(self.fp_work_path)
        self._logger.info('minor compact to slice #%d (data-size:%d) ...', fileno, filesize_0)
        fp_0 = file(self.fp_work_path, 'r')

        try :
            self.table_index_db.execute('DROP TABLE IF EXISTS %s' % self.table_index_mc_name)
        except :
            self._logger.error('SQL ERROR : DROP TABLE IF EXISTS %s', self.table_index_mc_name)
            self._logger.excpt()

        try :
            _mem_0 = mmap.mmap(fp_0.fileno(), filesize_0, mmap.MAP_PRIVATE, mmap.PROT_READ)

            ##self.table_index_db.execute('CREATE TEMP TABLE IF NOT EXISTS %s (key TEXT, op INTEGER, ts INTEGER, fileno INTEGER, offset INTEGER, length INTEGER, UNIQUE (key,ts,fileno))' % self.table_index_mc_name)
            self.table_index_db.execute('CREATE TEMP TABLE IF NOT EXISTS %s (key TEXT, op INTEGER, ts INTEGER, fileno INTEGER, offset INTEGER, length INTEGER)' % self.table_index_mc_name)
            self.table_index_db.execute('CREATE INDEX IF NOT EXISTS %s_tmp_idx_key ON %s (key)' % (self.table_index_mc_name, self.table_index_mc_name))
            #self.table_index_db.execute('DELETE FROM %s WHERE 1=1' % self.table_index_mc_name)

            cursor_0 = self.table_index_db.cursor()
            cursor_0.execute('SELECT key,op,ts,offset,length FROM %s WHERE fileno=0 ORDER BY key DESC,ts DESC,op DESC' % self.table_index_name)

            r = cursor_0.fetchone()
            while r :
                key,op,ts,offset_0,length_0 = r

                value = _mem_0[ offset_0 : offset_0 + length_0 ]
                length = len(value)
                if length != length_0 :
                    # error : length mismatch
                    raise RuntimeError,'key=%s offset=%d length=%d (length-got=%d)' % (key,offset_0,length_0,length)

                offset = fp_n.tell()
                fp_n.write(value)
                fp_n.write("\n")

                self.table_index.execute(self.SQL_INSERT_MC, (key,op,ts,fileno,offset,length))

                r = cursor_0.fetchone()
                # while r 

            cursor_0 = None

            # merge index
            self.table_index_db.execute(self.SQL_INSERT_FROM_MC)

            self._logger.info('slice #%d size is %d', fileno, fp_n.tell())

            fp_n.close()
            slice_name = 'data.%d' % fileno
            slice_path = os.sep.join((self.table_dir, slice_name))
            os.rename(name_n, slice_path)
            os.chmod(slice_path, 0644)

            self._logger.info('minor compact to slice #%d done.', fileno)

            # clear temporary index
            self.table_index_db.execute('DELETE FROM %s WHERE fileno=0' % self.table_index_name)

            # clear temporary data
            fp_0.close()
            if 0 in self.fps_readonly :
                _fp_0 = self.fps_readonly[0]
                _fp_0.close()
                del self.fps_readonly[0]
            os.remove(self.fp_work_path)

            # clear size estimation
            self._number_slice_files = 0
            self._approx_total_size = 0
        except :
            self._logger.excpt()
            traceback.print_exc()
        finally :
            if fp_n :
                fp_n.close()
            if fp_0 :
                fp_0.close()
            if os.path.exists(name_n) :
                # failure
                os.remove(name_n)

            try :
                self.table_index_db.execute('DROP TABLE IF EXISTS %s' % self.table_index_mc_name)
            except :
                self._logger.error('SQL ERROR : DROP TABLE IF EXISTS %s', self.table_index_mc_name)
                self._logger.excpt()
            #self.table_index_db.execute('DELETE FROM %s WHERE 1=1' % self.table_index_mc_name)
        ## __minor_compact_MMAP()

    __minor_compact = __minor_compact_MMAP

    ####------------------------------------------------------------####

    def split(self, table_name, new_db_root_path, max_cell_volume) :
        # STEP. pre-split
        #       close index db
        if self.table_index_db is not None :
            self.table_index = None

            try :
                self.table_index_db.commit()
                self.table_index_db.close()
                self.table_index_db = None
            except :
                self._logger.excpt()

        #       close fp_work
        if self.fp_work :
            self.fp_work.close()
            self.fp_work = None

        # STEP. split
        #       return split-key
        split_cell_volume = max_cell_volume // 2
        
        #       open index 
        table_index_path = os.sep.join((self.table_dir, self.INDEX_FILE))
        table_index_db = sqlite.connect(table_index_path)

        #       get dir-name of split-B
        table_dir,tablet_dirname = os.path.split(self.table_dir)
        new_tablet_dirname = '.'.join((tablet_dirname, 'split.b'))
        new_db_path = os.sep.join((new_db_root_path, table_name, new_tablet_dirname))

        cursor = None
        try :
            if not os.path.exists(new_db_path) and not os.path.lexists(new_db_path) :
                os.makedirs(new_db_path, 0755)

            cursor = table_index_db.cursor()
            cursor.execute('SELECT key,op,ts,fileno,offset,length FROM %s ORDER BY key DESC,ts DESC,op DESC' % self.table_index_name)

            ok, record_last = self.__do_split(cursor, self.table_dir, split_cell_volume)
            if not ok :
                self.__remove_split_files( self.table_dir )
                raise RuntimeError, 'Fail to split A-part of tablet "%s"' % self.table_dir

            split_key = record_last[0]
            ok, record_last = self.__do_split(cursor, new_db_path, 0, record_last)
            if not ok :
                self.__remove_split_files( self.table_dir )
                self.__remove_split_files( new_db_path )
                raise RuntimeError, 'Fail to split B-part of tablet "%s"' % self.table_dir
        finally :
            if table_index_db is not None :
                table_index_db.close()
                table_index_db = None
            if cursor is not None :
                cursor = None

        # STEP. post-split
        #       close all slice files
        for fno,fp in self.fps_readonly.iteritems() :
            fp.close()
        self.fps_readonly = {}

        #       remove all slice files
        self.__remove_slice_files(self.table_dir)

        #       rename split files
        self.__rename_split_files( self.table_dir )
        self.__rename_split_files( new_db_path )

        # clear size estimation
        self._number_slice_files = 0
        self._approx_total_size = 0

        # STEP. return response
        return (split_key, new_db_path)
        ## split()

    def __do_split(self, cursor, db_path, split_cell_volume, record_last=None) :
        index_filename = 'split.index'
        index_path = os.sep.join((db_path, index_filename))

        self._logger.info('open  split index %s ...', index_path)
        split_index_db = sqlite.connect(index_path)
        #split_index_db.execute('CREATE TABLE IF NOT EXISTS %s (key TEXT, op INTEGER, ts INTEGER, fileno INTEGER, offset INTEGER, length INTEGER, UNIQUE (key,ts,fileno))' % self.table_index_name)
        split_index_db.execute('CREATE TABLE IF NOT EXISTS %s (key TEXT, op INTEGER, ts INTEGER, fileno INTEGER, offset INTEGER, length INTEGER)' % self.table_index_name)
        split_index_db.execute('CREATE INDEX IF NOT EXISTS %s_idx_key ON %s (key)' % (self.table_index_name, self.table_index_name))

        # “DEFERRED”, “IMMEDIATE” or “EXCLUSIVE”
        split_index_db.isolation_level = "IMMEDIATE"
        split_index = split_index_db.cursor()

        split_slice_index = 0
        split_slice_filepath = None
        fp_split_slice = None

        split_size = 0
        split_will_stop = False
        split_key = None
        try :
            last_key = None
            last_op_is_deletion = False

            while True :
                if record_last :
                    r = record_last
                    record_last = None
                else :
                    r = cursor.fetchone()
                if not r : 
                    break
                key,op,ts,fileno,offset,length = r

                #if fileno == 0 :
                #    self._logger.info('SPLIT Read  key %s fileno=%d offset=%d length=%d',repr(key),fileno,offset,length)

                if fp_split_slice is None :
                    split_slice_index += 1
                    split_slice_filename = '.'.join(('split',str(split_slice_index)))
                    split_slice_filepath = os.sep.join((db_path, split_slice_filename))
                    self._logger.info('open  split file  %s (first-key: %s) ...', split_slice_filepath, repr(key))
                    fp_split_slice = file(split_slice_filepath, 'a', 1)

                if key == last_key :
                    if last_op_is_deletion :
                        continue
                    if self.OP_DELETE == op :
                        last_op_is_deletion = True
                        continue
                    else :
                        # will read data and write data and index
                        pass
                else :  # key != last_key
                    if split_will_stop :
                        record_last = r

                        if fp_split_slice : 
                            self._logger.info('close split file  %s (last--key: %s)', split_slice_filepath, repr(last_key))
                            tail_offset = fp_split_slice.tell()
                            fp_split_slice.close()
                            fp_split_slice = None
                            if tail_offset < 1 :
                                # empty file, just remove it
                                os.remove(split_slice_filepath)
                        break

                    last_key = key
                    last_op_is_deletion = False

                    if self.OP_DELETE == op :
                        # write a delete record and index
                        value = self.__makeup_deletion_line(key, ts)
                        _offset = fp_split_slice.tell()
                        _length = len(value)
                        fp_split_slice.write(value)
                        fp_split_slice.write("\n")
                        split_index.execute(self.SQL_INSERT, (key,op,ts,split_slice_index,_offset,_length))

                        last_op_is_deletion = True
                        continue
                    else :
                        # will read data and write data and index
                        pass
                    
                if fileno in self.fps_readonly :
                    _fp = self.fps_readonly[fileno]
                else :
                    _fp = self.__open_slice_file(fileno)
                    self.fps_readonly[fileno] = _fp

                _fp.seek(offset)
                data = _fp.read(length)
                if len(data) != length :
                    raise RuntimeError,'fileno=%d offset=%d length=%d (length-got=%d)' % (fileno,offset,length,len(data))

                _offset = fp_split_slice.tell()
                fp_split_slice.write(data)
                fp_split_slice.write("\n")
                split_index.execute(self.SQL_INSERT, (key,op,ts,split_slice_index,_offset,length))

                #if fileno == 0 :
                #    self._logger.info('SPLIT Write key %s fileno=%d offset=%d',repr(key),split_slice_index,_offset)

                if (_offset + length) >= self.SLICE_MINOR_COMPACT_SIZE :
                    self._logger.info('close split file  %s (last--key: %s)', split_slice_filepath, repr(last_key))
                    fp_split_slice.close()
                    fp_split_slice = None

                    if split_cell_volume > 0 :
                        split_size += _offset + length
                        if split_size >= split_cell_volume :
                            split_will_stop = True

                ## while r

        finally :
            self._logger.info('close split index %s', index_path)
            split_index = None
            split_index_db.commit()
            split_index_db.close()
            split_index_db = None

        return True,record_last
        ## __do_split()

    def __remove_slice_files(self, path) :
        filenames = os.listdir(path)
        for filename in filenames :
            if fnmatch.fnmatch(filename, 'data.*') or (self.INDEX_FILE == filename) :
                _path = os.sep.join((path, filename))
                os.remove(_path)
        ## __remove_slice_files()

    def __remove_split_files(self, path) :
        filenames = os.listdir(path)
        for filename in filenames :
            if fnmatch.fnmatch(filename, 'split.*') :
                _path = os.sep.join((path, filename))
                os.remove(_path)
        ## __remove_split_files()

    def __rename_split_files(self, path) :
        filenames = os.listdir(path)
        for filename in filenames :
            if fnmatch.fnmatch(filename, 'split.*') :
                if 'split.index' == filename :
                    _old_path = os.sep.join((path, filename))
                    _new_path = os.sep.join((path, self.INDEX_FILE))
                    os.rename(_old_path, _new_path)
                    os.chmod(_new_path, 0644)
                else :
                    matched = self.SPLIT_NAME_PATTERN.search(filename)
                    if matched :
                        slice_number = int(matched.group(1))
                        slice_filename = '.'.join((self.SLICE_FILENAME_PREFIX, str(slice_number)))
                        _old_path = os.sep.join((path, filename))
                        _new_path = os.sep.join((path, slice_filename))
                        os.rename(_old_path, _new_path)
                        os.chmod(_new_path, 0644)
        ## __rename_split_files()

    ####------------------------------------------------------------####

    ## class Slb64DB


