'''
    Concrete DocIndexer class.
'''
import sys, os, time, PyLucene
from common import EDocIndexer, Doc, Hit, Log, files_gen, decode_input, encode_output
from abstract_indexer import AbstractIndexer


class DocIndexer(AbstractIndexer):

    def __init__(self, docsdir):
        AbstractIndexer.__init__(self)
        self.format = self.LISTING_FORMAT
        self.includes = ['*']
        self.excludes = []
        self.rebuild = False
        self.optimize = True
        self.docsdir = os.path.abspath(docsdir)
        if not os.path.isdir(self.docsdir):
            raise EDocIndexer,'documents directory not found: %s' % self.docsdir
        self.start_time = time.time()
        self.writer = None
        self.reset_counts()
        self.dryrun = False

    def reset_counts(self):
        self.indexed_count = 0
        self.skipped_count = 0
        self.bytes_indexed = 0L

    def log_header(self):
        details = 'rebuild index' if self.rebuild else 'update index'
        Log.info('start: %s: %s' % (details, time.ctime(self.start_time)))

    def log_footer(self):
        self.elapsed_time = time.time() - self.start_time
        Log.info('files indexed: %s' % self.indexed_count)
        if self.skipped_count > 0:
            Log.info('files skipped: %s' % self.skipped_count)
        if self.bytes_indexed > 0:
            Log.info('bytes indexed: %0.2fMB' %
                (self.bytes_indexed / (1024.0*1024.0)))
        Log.info('elapsed time:  %s' %
            time.strftime('%H:%M:%S', time.gmtime(self.elapsed_time)))

    def fsfiles_gen(self):
        '''Documents directory file name generator.'''
        excludes = self.excludes + [os.path.split(self.indexdir())[1]]
        for f in files_gen(self.docsdir, self.includes, excludes):
            yield decode_input(f)

    def delete_doc(self, doc, reader):
        ''' Delete Doc from index reader. '''
        if not self.dryrun:
            reader.deleteDocument(doc['id'])
        Log.info('deleted index entry: %s' % doc['pathname'])

    def find_doc(self, fsfile, searcher):
        ''' Return index Doc for file with file system path fsfile from index
        searcher. Return None if document is not in index.'''
        assert isinstance(fsfile, unicode)
        indexfile = self.indexpath(fsfile)
        term = PyLucene.Term('pathname', indexfile)
        query = PyLucene.TermQuery(term)
        hits = searcher.search(query)
        assert hits.length() <= 1,'duplicate index document: %s' % indexfile
        if hits.length() == 1:
            return Hit(hits, 0)
        else:
            return None

    def files_to_index_gen(self):
        ''' Returns a generator for the files to be indexed.'''
        if self.rebuild:
            # Create new index and index all documents.
            return self.fsfiles_gen()
        else:
            self.check_index_exists()
            Log.info('searching for documents to update...')
            # Reindex documents that have changed, add new documents, delete
            # missing documents.
            files_to_index = []  # Paths of new or updated files to be added.
            # Find index documents whose source file has been updated since last
            # indexed and delete documents whose source file no longer exists.
            reader = PyLucene.IndexReader.open(self.indexdir())
            try:
                for doc in self.docs_gen(reader):
                    fsfile = self.fspath(doc['pathname'])
                    if not os.path.isfile(fsfile):
                        self.delete_doc(doc, reader)
                    # Allow 1 second for conversion rounding.
                    elif os.path.getmtime(fsfile) > doc['mtime'] + 1:
                        self.delete_doc(doc, reader)
            finally:
                reader.close()

            # Find new files not in the index and files to be updated.
            # Updated files are found by virtue of the fact that they
            # have just been deleted from the index.
            searcher = PyLucene.IndexSearcher(self.indexdir())
            try:
                for fsfile in self.fsfiles_gen():
                    doc = self.find_doc(fsfile,searcher)
                    if not doc:
                        files_to_index.append(fsfile)
            finally:
                searcher.close()
            # Index updated and new files.
            return iter(files_to_index)

    def update_index(self):
        ''' Index files in self.docsdir. '''
        self.add_files(self.files_to_index_gen())

    def add_files(self, fsfiles):
        ''' Adds files in fsfiles iterator to the document index.'''
        self.open_writer()
        try:
            for f in fsfiles:
                self.add_doc(f)
        finally:
            self.close_writer()

    def delete_files(self, fsfiles):
        ''' Deletes files in fsfiles iterator from the document index.'''
        assert not self.rebuild, 'index deletion during rebuild'
        self.check_index_exists()
        for fsfile in fsfiles:
            # If the document is in the index delete it.
            searcher = PyLucene.IndexSearcher(self.indexdir())
            try:
                hit = self.find_doc(fsfile, searcher)
            finally:
                searcher.close()
            if hit:
                reader = PyLucene.IndexReader.open(self.indexdir())
                try:
                    self.delete_doc(hit, reader)
                finally:
                    reader.close()

    def update_files(self, fsfiles):
        ''' Updates files in fsfiles iterator to the document index.'''
        if not self.rebuild:
            self.delete_files(fsfiles)
        self.add_files(fsfiles)

    def open_writer(self):
        if not self.rebuild:
            self.check_index_exists()
        if not self.dryrun:
            if not os.path.exists(self.indexdir()):
                try:
                    os.mkdir(self.indexdir())
                except:
                    raise EDocIndexer, \
                        'failed to create index directory: %s' % self.indexdir()
            store = PyLucene.FSDirectory.getDirectory(self.indexdir(),
                    self.rebuild)
            self.writer = PyLucene.IndexWriter(store, self.analyzer,
                    self.rebuild)
            self.writer.setMaxFieldLength(self.MAX_FIELD_LENGTH)

    def close_writer(self):
        if self.optimize:
            Log.info('optimizing...')
            if not self.dryrun:
                self.writer.optimize()
        if not self.dryrun:
            self.writer.close()

    def add_doc(self, fsfile):
        ''' Add file with file system path fsfile to index.
        '''
        assert isinstance(fsfile, unicode)
        fsfile = os.path.abspath(fsfile)
        if not os.path.isfile(fsfile):
            Log.warning('skipped: missing file: %s' % fsfile)
            self.skipped_count += 1
            return
        indexfile = self.indexpath(fsfile)
        if os.path.isabs(indexfile):
            Log.warning('skipped: file outside documents directory: %s' % fsfile)
            self.skipped_count += 1
            return
        try:
            doc = PyLucene.Document()
            doc.add(PyLucene.Field('pathname', indexfile,
                                   PyLucene.Field.Store.YES,
                                   PyLucene.Field.Index.UN_TOKENIZED))
            dirname, filename = os.path.split(indexfile)
            filename, ext = os.path.splitext(filename)
            doc.add(PyLucene.Field('dirname', dirname,
                                   PyLucene.Field.Store.YES,
                                   PyLucene.Field.Index.TOKENIZED))
            doc.add(PyLucene.Field('filename', filename,
                                   PyLucene.Field.Store.YES,
                                   PyLucene.Field.Index.TOKENIZED))
            # Extension is stored as lower case without leading period.
            doc.add(PyLucene.Field('ext', ext[1:].lower(),
                                   PyLucene.Field.Store.YES,
                                   PyLucene.Field.Index.UN_TOKENIZED))
            doc.add(PyLucene.Field('mtime', Doc.time2str(os.path.getmtime(fsfile)),
                                   PyLucene.Field.Store.YES,
                                   PyLucene.Field.Index.UN_TOKENIZED))
            doc.add(PyLucene.Field('size', str(os.path.getsize(fsfile)),
                                   PyLucene.Field.Store.YES,
                                   PyLucene.Field.Index.UN_TOKENIZED))
            parser = self.parsers.fileparser(fsfile)
            if parser is None:
                Log.info('skipped: no parser: %s' % fsfile)
                self.skipped_count += 1
                status = Doc.NO_PARSER
            else:
                try:
                    if self.dryrun:
                        content = ' '      # Dummy non-zero length content.
                    else:
                        content = parser.text(fsfile)
                        assert isinstance(content, unicode)
                except EDocIndexer,e:
                    Log.warning('%s: %s' % (fsfile,e))
                    self.skipped_count += 1
                    status = Doc.PARSER_ERROR
                else:
                    if len(content) > 0:
                        doc.add(PyLucene.Field('content', content,
                                               PyLucene.Field.Store.NO,
                                               PyLucene.Field.Index.TOKENIZED))
                        for fld in parser.additional_fields():
                            if not self.dryrun:
                                doc.add(fld)
                        Log.info('indexed: %s' % fsfile)
                        self.indexed_count += 1
                        self.bytes_indexed += os.path.getsize(fsfile)
                        status = Doc.INDEXED
                    else:
                        Log.warning('empty document: %s' % fsfile)
                        status = Doc.EMPTY
            doc.add(PyLucene.Field('status', str(status),
                                   PyLucene.Field.Store.YES,
                                   PyLucene.Field.Index.UN_TOKENIZED))
            if not self.dryrun:
                self.writer.addDocument(doc)
        except Exception,e:
            Log.error('unexpected error indexing: %s: %s' % (fsfile,e))
            raise

    def printf(self, list=False, summary=False):
        ''' List index contents and/or statistics summary.
        '''
        reader = PyLucene.IndexReader.open(self.indexdir())
        try:
            for doc in self.docs_gen(reader):
                if doc['status'] == Doc.INDEXED:
                    self.indexed_count += 1
                    self.bytes_indexed += doc['size']
                else:
                    self.skipped_count += 1
                if list:
                    Log.info(doc.printf(self.format))
        finally:
            reader.close()
        if summary:
            if list:
                Log.info('')
            self.log_footer()
        return

    @classmethod
    def file_to_text(cls, filename):
        ''' Converts the file to text and returns the text as a string.'''
        if not (os.path.exists(filename) and os.path.isfile(filename)):
            raise EDocIndexer,encode_output('unable to open document %s' % filename)
        try:
            cls.parsers.open()
        except:
            raise EDocIndexer,'unable to open document parsers'
        try:
            parser = cls.parsers.fileparser(filename)
            if parser is None:
                raise EDocIndexer,'no parser available for this document type'
            try:
                result = parser.text(filename)
            except EDocIndexer:
                raise
            except Exception,e:
                # Unexpected error.
                raise EDocIndexer,'unexpected error: %s' % e
        finally:
            cls.parsers.close()
        return result

    @classmethod
    def analyze(cls,istream,ostream):
        ''' Analyze the contents of input stream and write content terms
            (words) to output stream ostream.
            istream must be unicode.
        '''
        stream = cls.analyzer.tokenStream('content', istream)
        while True:
            token = stream.next()
            if not token: break
            ostream.write(encode_output(token.termText(),'replace',ostream)+'\n')


# vim: set ts=4 sw=4:
