import xapian
import math
import pathutils
import os
import glob
import simplejson
import queries
import pdb

from mbxap.document import IndexableDocument, WordField

class IndexLocked(StandardError):
    ''' Error that's raised if the index is already locked '''

class OccuranceDocument(IndexableDocument):
    stm = WordField()

class StemmerMap(object):
    '''
    A stemmer map is an extension of a standard stemmer in that it can keep
    track of stemmed to unstemmed forms.
    
    The each unstemmed form keeps track of the number of occurances internally
    so that we can provide an ordered list of suggestions based on existing
    occurance in the corpus.
    '''
    def __init__(self, index_home, index_name, language='porter'):
        self._index_home = index_home
        self._index_name = index_name
        self._stemmer = Stemmer(language)
        self._stem_index = Indexer(index_home, "%s.stemmap" % index_name)


    def bulk_load(self, stemmap):
        '''
        This is a bulk loader for the stem map. Note that it will *destroy*
        the old stemmer map with the new dataset.

        stemmap should have a datastructure like this:

        { 
            stm_word1:{ expansion1: count, 
                        expansion2: count}, 
            stm_word2:{ expansion3: count, 
                        expansion4: count}, 
        }
        '''

        # blow away the old stemmer map
        self._stem_index = None

        idx_name = "%s.stemmap" % self._index_name
        for fname in glob.glob("%s/%s/*" % (self._index_home, idx_name)):
            os.unlink(fname)
        for index_dir in glob.glob("%s/%s" % (self._index_home, idx_name)):
            os.removedirs(index_dir)
        self._stem_index = Indexer(self._index_home, idx_name)

        doc_list = []
        for stemmed_form, occurance_map in stemmap.items():
            occurance_list = simplejson.dumps(occurance_map.items())
            doc = OccuranceDocument(occurance_list, stm =stemmed_form)
            doc_list.append(doc)

        self._stem_index.index_dociter(iter(doc_list))



    def stem(self, word):
        ''' 
        This stem function stems a word, but also keeps track of the number of
        occurances for each word.
        '''
        return self._stemmer.stem(word)

    def expand(self, word):
        '''
        Take an unstemmed word and return all expanded forms of the stemmed word
        '''
        stem_word = self._stemmer.stem(word).encode('utf8')

        stem_search = IndexSearcher(self._index_home, "%s.stemmap" % self._index_name)

        # Try locating an existing 
        mset = stem_search.find(queries.AND(stm=stem_word))
        if mset.size():
            result = set([t1.encode('utf8') for (t1, t2) in simplejson.loads(mset[0].get_data())])
        else:
            result = set()

        # append the raw and stemmed form to the list just in case
        result.add(stem_word)
        result.add(word)

        return result

    def occurance_list(self, word):
        '''
        return a list of tuples showing the occurance of each unstemmed form of the stemmed word.

        We should get back something like this:

            [('connection', 5), ('connections', 3), ('connected', 1)]
        '''

        stem_word = self._stemmer.stem(word).encode('utf8')

        stem_search = IndexSearcher(self._index_home, "%s.stemmap" % self._index_name)

        # Try locating an existing 
        mset = stem_search.find(queries.AND(stm=stem_word))

        if mset.size():
            result = simplejson.loads(mset[0].get_data())
        else:
            result = []
        return result

class Indexer(object):
    '''
    This class will update xapian indexes
    in a multi-threaded, multi-process safe way
    using file system lock files.
    '''
    def __init__(self, index_home, index_name):
        self._index_path = "%s/%s" % (index_home, index_name)
        self._lock_path = "%s/%s.lock" % (index_home, index_name)
        self._lock = pathutils.Lock(self._lock_path, timeout=0.01)
        self._db = WriteDB(index_home, index_name)

    def acquire(self):
        ''' acquire the file lock'''
        try:
            self._lock.lock(force=False)
        except pathutils.LockError, le:
            raise IndexLocked, le
                
    def release(self):
        ''' release the file lock '''
        self._lock.unlock()

    def delete_doclist(self, doclist):
        '''
        remove a list of documents from the index
        '''
        self.acquire()

        for doc in doclist:
            self._db.delete_document(doc.get_docid())
        self._db.flush()

        self.release()

    def replace_doc(self, old_doc, new_doc):
        '''
        This method should only be used in 
        '''
        self.acquire()
        self._db.replace_document(old_doc.get_docid(), new_doc.as_xapian_doc())
        self._db.flush()
        self.release()
        
    def index_dociter(self, doc_iter):
        ''' index documents contained in the iterator '''
        self.acquire()
        while True:
            try:
                for i in xrange(200):
                    doc = doc_iter.next()
                    self._db.add_document(doc.as_xapian_doc())
            except StopIteration, si:
                # end of the iterator
                break
        self._db.flush()
        self.release()

        
            

class MSetWrapper(object):
    '''
    An iterator which returns wrapped document instances 
    '''
    def __init__(self, mset):
        self._mset = mset
        self._msetiter = iter(mset)

    def __getitem__(self, index):
        ''' 
        Provide a more pythonic interface to get_document

        This function is O(n) runtime for the size of the matchset 
        '''
        inner_mset = MSetWrapper(self._mset)
        res = None
        while index >= 0:
            res = inner_mset.next()
            index -=1
        return res

    def next(self):
        return WrappedDocument(*self._msetiter.next())

    def __iter__(self):
        return self

    def size(self):
        return self._mset.size()

class WrappedDocument(object):
    def __init__(self, docid, weight, rank, percent, doc):
        self._doc = doc
        self._docid = docid
        self._weight = weight
        self._rank = rank
        self._percent = percent

    def get_docid(self):
        return self._docid

    def get_weight(self):
        return self._weight

    def get_rank(self):
        return self._rank

    def get_percent(self):
        return self._percent

    def get_doc(self):
        return self._doc

    def __getattr__(self, key):
        return getattr(self._doc, key)

    def __str__(self):
        return "<docid:%s weight:%s rank:%s percent:%s doc:%s>" % (self.get_docid(), self.get_weight(), self.get_rank(), self.get_percent(), self._doc)

class AbstractDB(object):
    def __init__(self, index_home, index_name):
        self._index_path = "%s/%s" % (index_home, index_name)

        # force the index home to be created if necessary
        if not os.path.exists(index_home):
            os.makedirs(index_home)

        self._db = self.get_db()

    def search_raw_query(self, query, start_index=0, end_index=0):
        ''' run a raw xapian query against the search index '''
        enquire = xapian.Enquire(self._db)
        enquire.set_query(query.as_xapian())
        mset = enquire.get_mset(start_index, end_index)
        return mset

    def __getattr__(self, key):
        # Delegate any unknown method down to the raw Xapian database
        return getattr(self._db, key)

    
class ReadDB(AbstractDB):
    '''
    abstraction over Xapian so that we can possibly replace it with a different
    bckend if necessary.  
    '''
    def get_db(self):
        return xapian.flint_open(self._index_path)


class WriteDB(AbstractDB):
    def get_db(self):
        db = xapian.flint_open(self._index_path, xapian.DB_CREATE_OR_OPEN)
        return xapian.WritableDatabase(db)

class Stemmer(object):
    def __init__(self, language='porter'):
        self.language = language
        self._stemmer = xapian.Stem(language)

    def stem(self, word):
        ''' just a simple wrapper around the standard xapian stemmer '''
        assert isinstance(word, str), "[%r] is an instance of %s" % (word, type(word))
        return self._stemmer(word.encode('utf8')).decode('utf8')


class PluggableIndexer(object):
    '''
    This indexer has a built in stemmer map to count occurances of stemmed
    words.
    '''

    def __init__(self, index_home, index_name):
        self.index_home = index_home
        self.index_name = index_name

        self._std_index = Indexer(index_home, index_name)

        self._plugins = []

    def register_plugin(self, plugin_cls):
        if plugin in self._plugins:
            raise RuntimeError, "This plugin is already registered: [%s]" % plugin.__class__
        self._plugins.append(plugin_cls(self))

    def index_dociter(self, doc_iter):
        '''
        walk each term in the document and pass it to the stemmer map as well as the regular
        indexer.
        '''

        # Preserve the original document so that we can later do further document 
        # analysis
        doc_list = []
        for doc in doc_iter:
            for plugin in self._plugins:
                plugin.process_document(doc)
            # TODO: the standard indexer needs to maintain state to make sure that batch operations properly  occur
            self._std_index.index_dociter(iter([doc]))

class IndexSearcher(object):
    '''
    Use this to search the index
    '''
    def __init__(self, index_home, index_name, db=None):
        if db:
            self._db = db
        else:
            self._db = ReadDB(index_home, index_name)

    def find(self, query, start_index=0, end_index=1000):
        '''
        Search for query in the index.  query can be either a string, field, or
        list of fields and the search will dispatch correctly.
        '''
        assert isinstance(query, queries._Query)
        mset = self._db.search_raw_query(query, start_index, end_index)
        return MSetWrapper(mset)


