import xapian
import math
import pathutils
import os
import glob
import simplejson
import queries
import pdb

from mbxap.document import IndexableDocument, WordField

class IndexLocked(StandardError):
    ''' Error that's raised if the index is already locked '''

class OccuranceDocument(IndexableDocument):
    stm = WordField()

class Indexer(object):
    '''
    This class will update xapian indexes
    in a multi-threaded, multi-process safe way
    using file system lock files.
    '''
    def __init__(self, index_home, index_name):
        self._index_home = index_home
        self._index_name = index_name

        self._index_path = "%s/%s" % (index_home, index_name)
        self._lock_path = "%s/%s.lock" % (index_home, index_name)
        self._lock = pathutils.Lock(self._lock_path, timeout=0.01)
        self._db = WriteDB(index_home, index_name)

    def acquire(self):
        ''' acquire the file lock'''
        try:
            self._lock.lock(force=False)
        except pathutils.LockError, le:
            raise IndexLocked, le
                
    def release(self):
        ''' release the file lock '''
        self._lock.unlock()

    def delete_matches(self, query):
        searcher = IndexSearcher(self._index_home, self._index_name)
        mset = searcher.find(query)
        self.delete_doclist(list(mset))

    def delete_doclist(self, doclist):
        '''
        remove a list of documents from the index
        '''
        self.acquire()

        self._db.begin_transaction(False)
        for doc in doclist:
            self._db.delete_document(doc.get_docid())
        self._db.commit_transaction()
        self._db.flush()

        self.release()

    def replace_doc(self, old_doc, new_doc):
        '''
        This method should only be used in 
        '''
        self.acquire()
        self._db.replace_document(old_doc.get_docid(), new_doc.as_xapian_doc())
        self._db.flush()
        self.release()
        
    def index_dociter(self, doc_iter):
        ''' index documents contained in the iterator '''
        self.acquire()
        self._db.begin_transaction(False)
        while True:
            try:
                for i in xrange(200):
                    doc = doc_iter.next()
                    self._db.add_document(doc.as_xapian_doc())
            except StopIteration, si:
                # end of the iterator
                break
        self._db.commit_transaction()
        self._db.flush()
        self.release()

        
            

class MSetWrapper(object):
    '''
    An iterator which returns wrapped document instances 
    '''
    def __init__(self, mset):
        self._mset = mset
        self._msetiter = iter(mset)

    def __getitem__(self, index):
        ''' 
        Provide a more pythonic interface to get_document

        This function is O(n) runtime for the size of the matchset 
        '''
        inner_mset = MSetWrapper(self._mset)
        res = None
        while index >= 0:
            res = inner_mset.next()
            index -=1
        return res

    def next(self):
        return WrappedDocument(*self._msetiter.next())

    def __iter__(self):
        return self

    def size(self):
        return self._mset.size()

class WrappedDocument(object):
    def __init__(self, docid, weight, rank, percent, doc):
        self._doc = doc
        self._docid = docid
        self._weight = weight
        self._rank = rank
        self._percent = percent

    def get_docid(self):
        return self._docid

    def get_weight(self):
        return self._weight

    def get_rank(self):
        return self._rank

    def get_percent(self):
        return self._percent

    def get_doc(self):
        return self._doc

    def __getattr__(self, key):
        return getattr(self._doc, key)

    def __str__(self):
        return "<docid:%s weight:%s rank:%s percent:%s doc:%s>" % (self.get_docid(), self.get_weight(), self.get_rank(), self.get_percent(), self._doc)

class AbstractDB(object):
    def __init__(self, index_home, index_name):
        self._index_path = "%s/%s" % (index_home, index_name)

        # force the index home to be created if necessary
        if not os.path.exists(index_home):
            os.makedirs(index_home)

        self._db = self.get_db()

    def search_raw_query(self, query, start_index=0, end_index=0):
        ''' run a raw xapian query against the search index '''
        enquire = xapian.Enquire(self._db)
        enquire.set_query(query.as_xapian())
        mset = enquire.get_mset(start_index, end_index)
        return mset

    def __getattr__(self, key):
        # Delegate any unknown method down to the raw Xapian database
        return getattr(self._db, key)

    
class ReadDB(AbstractDB):
    '''
    abstraction over Xapian so that we can possibly replace it with a different
    bckend if necessary.  
    '''
    def get_db(self):
        return xapian.flint_open(self._index_path)


class WriteDB(AbstractDB):
    def get_db(self):
        db = xapian.flint_open(self._index_path, xapian.DB_CREATE_OR_OPEN)
        return xapian.WritableDatabase(db)

class Stemmer(object):
    def __init__(self, language='porter'):
        self.language = language
        self._stemmer = xapian.Stem(language)

    def stem(self, word):
        ''' just a simple wrapper around the standard xapian stemmer '''
        assert isinstance(word, str), "[%r] is an instance of %s" % (word, type(word))
        return self._stemmer(word.encode('utf8')).decode('utf8')


class PluggableIndexer(object):
    '''
    This indexer has a built in stemmer map to count occurances of stemmed
    words.
    '''

    def __init__(self, index_home, index_name):
        self.index_home = index_home
        self.index_name = index_name

        self._std_index = Indexer(index_home, index_name)

        self._plugins = []

    def register_plugin(self, plugin_cls):
        if plugin in self._plugins:
            raise RuntimeError, "This plugin is already registered: [%s]" % plugin.__class__
        self._plugins.append(plugin_cls(self))

    def index_dociter(self, doc_iter):
        '''
        walk each term in the document and pass it to the stemmer map as well as the regular
        indexer.
        '''

        # Preserve the original document so that we can later do further document 
        # analysis
        doc_list = []
        for doc in doc_iter:
            for plugin in self._plugins:
                plugin.process_document(doc)
            # TODO: the standard indexer needs to maintain state to make sure that batch operations properly  occur
            self._std_index.index_dociter(iter([doc]))

class IndexSearcher(object):
    '''
    Use this to search the index
    '''
    def __init__(self, index_home, index_name, db=None):
        if db:
            self._db = db
        else:
            self._db = ReadDB(index_home, index_name)

    def find(self, query, start_index=0, end_index=1000):
        '''
        Search for query in the index.  query can be either a string, field, or
        list of fields and the search will dispatch correctly.
        '''
        assert isinstance(query, queries._Query)
        mset = self._db.search_raw_query(query, start_index, end_index)
        return MSetWrapper(mset)


