# pytombo - Python library for processing plain text + encrypted notes.
# Copyright (C) 2007  Chris Clark

# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.

# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

import os
import re
import shutil
from cgi import escape as escape_html

# 3rd party library from http://whoosh.ca/
# TODO consider wrapping whoosh with pyndexter http://pypi.python.org/pypi/pyndexter
# Tested with version 2.2.2 from hg rev 1025:10b3e7689475 (2011-08-29).  http://bitbucket.org/mchaput/whoosh

import whoosh.index as index
from whoosh.fields import Schema, STORED, ID, KEYWORD, TEXT
import whoosh.qparser
from whoosh.qparser import QueryParser
from whoosh import analysis
from whoosh.highlight import highlight, SentenceFragmenter#, UppercaseFormatter #, TextHtmlFormatter


import tombonote
import chi_io
import search
from search import safe_mkdir
from search import SearchException, SearchCancelled, recurse_notes, is_text, note_filename_filter_gen, example_progess_callback
import pytombo
from pytombo.tombonote import remove_leading_path


def index_filesystem_content(index_name, content_path, note_encoding=None, filesystem_encoding=None, progess_callback=None, clear_existing_index=False):
    """index files in the filesystem
    """
    #can not (yet) index encrypted content
    note_encoding = note_encoding or search.config['file_encoding']
    filesystem_encoding = filesystem_encoding or search.config['filesystem_encoding']
    ## TODO handle update index
    schema = Schema(title=TEXT(stored=True), content=TEXT, path=ID(stored=True))
    # TODO index_name may need some work, e.g. use that as a dir and create things inside it. maybe too complex though..
    if not index.exists_in(index_name) or clear_existing_index:
        safe_mkdir(index_name)
        # Create using a Schema object
        ix = index.create_in(index_name, schema)
        
        # Add documents
        writer = ix.writer()
        
        file_count=0
        for content_filename in recurse_notes(content_path, is_text):
            print repr(content_filename) ## REMOVE BEFORE SUBMIT
            content_filename = unicode(content_filename, filesystem_encoding)
            if progess_callback:
                progess_callback(filename=content_filename)

            #tmp_text = open(content_filename, 'rb').read().decode('iso-8859-1') ## content was on Unix box in iso88591 / latin1
            tmp_text = open(content_filename, 'rb').read()
            tmp_text = unicode(tmp_text, note_encoding)
            title = os.path.basename(content_filename) # TODO more tricks...
            #content_filename = remove_leading_path(content_path, content_filename)
            writer.add_document(title=title, content=tmp_text, path=content_filename)
            file_count+=1
        writer.commit() ## TODO commit after each or defer, which is faster?
    else:
        raise merryhell # TODO FIXME not implement - updates, could just delete and re-add for simplity
        ix = index.open_dir(index_name)
    #print dir(index)
    #print dir(ix)
    ## TODO close / unlock?
    return file_count


def index_content(index_name, note_store, note_encoding=None, filesystem_encoding=None, progess_callback=None, clear_existing_index=False):
    """where is note_store is a BaseNotes derived object.
    """
    
    #can not (yet) index encrypted content
    note_encoding = note_encoding or search.config['file_encoding']
    filesystem_encoding = filesystem_encoding or search.config['filesystem_encoding']
    ## TODO handle update index
    schema = Schema(title=TEXT(stored=True), content=TEXT, path=ID(stored=True))
    # TODO index_name may need some work, e.g. use that as a dir and create things inside it. maybe too complex though..
    if not index.exists_in(index_name) or clear_existing_index:
        safe_mkdir(index_name)
        # Create using a Schema object
        ix = index.create_in(index_name, schema)
        
        # Add documents
        writer = ix.writer()
        
        file_count=0
        
        for content_filename in note_store.recurse_notes(filename_filter=is_text):
            print repr(content_filename) ## REMOVE BEFORE SUBMIT
            if isinstance(content_filename, str):
                content_filename = unicode(content_filename, filesystem_encoding)
            if progess_callback:
                progess_callback(filename=content_filename)

            tmp_text = note_store.note_contents(content_filename)
            if isinstance(tmp_text, str):
                try:
                    tmp_text = tmp_text.decode('utf8')  # horrible hack, turns out I have a dirty mix of encodings in my notes collection
                except UnicodeDecodeError:
                    tmp_text = tmp_text.decode(note_encoding)
            title = os.path.basename(content_filename) # TODO more tricks...
            writer.add_document(title=title, content=tmp_text, path=content_filename)
            file_count+=1
        writer.commit() ## TODO commit after each or defer, which is faster?
    else:
        raise merryhell # TODO FIXME not implement - updates, could just delete and re-add for simplity
        ix = index.open_dir(index_name)
    #print dir(index)
    #print dir(ix)
    ## TODO close / unlock?
    return file_count



def search_filesystem_iter(search_term, search_path, search_term_is_a_regex=True, ignore_case=True, search_encrypted=False, get_password_callback=None, progess_callback=None, findonly_filename=None, index_name=None, note_encoding=None, filesystem_encoding=None):
    """ NOTE this BREAKS the api, could make it optional so as to extend but why fake it ?
    may decide to do it later on, this is an experiment
    FIXME TODO, maybe extend API and make index an extra named param? be much cleaner
    Same api as search.search_iter() but implements
    full text search (some params are ignored, only have them
    for api compatibility with grep like search.
    
    search_path is the normal search path
    index_name - as the index (location/identifier)
    can not (yet) search encrypted content
    
    TODO Add limit (pagination), i.e. if get more than 200 hits stop or just return the top 200
    """
    if not index_name:
        raise SearchException('full text search requested with out an index being provided')
    if not isinstance(search_path, basestring) or not os.path.exists(search_path):
        raise SearchException('bad search path %s' % search_path)
    
    #TODO look into bugs in pyparsing interface that whoosh offers, e.g. crashs on ".deb" "-term"
    
    note_encoding = note_encoding or search.config['file_encoding']
    filesystem_encoding = filesystem_encoding or search.config['filesystem_encoding']
    
    #TODO make unicode search_term and search_path
    try:
        ix = index.open_dir(index_name)
    except IOError:
        raise SearchException('IOError on full text search index open (missing or corrupt?)')

    # Search documents

    searcher = ix.searcher()

    """ # FIXME TODO new parser options
    from http://groups.google.com/group/whoosh/msg/38d7c288b4533ddc
    consider SimpleParser
    
        You might try whoosh.qparser.SimpleParser or
        whoosh.qparser.DisMaxParser. These parsers use a simpler syntax similar
        to Altavista, but don't support many features... basically just
        unfielded words and phrases, with an optional "+" prefix for required
        terms or "-" prefix for forbidden (NOT) terms, e.g.:

            render +"lighting model" shader -rman 
    """
    parser = QueryParser("content", schema = ix.schema)
    print 'search_term', repr(search_term)
    search_term = search_term.decode('utf8')  # make unicode FIXME - dietcherry does the right thing
    query = parser.parse(search_term)
    
    results = searcher.search(query, terms=True)  # terms=True should be faster
    #print(len(results))
    
    ## TODO make high lighting optional
    
    for hit in results:
        #print hit
        x = hit['path']
        if findonly_filename:
            yield (x, [(1, 'FILENAME SEARCH NOT REALLY IMPLEMENTED, pretending only want file names for results\r')]) # note \r on the end, issue under window that needs to be fixed in regular search module - this is a work around
        else:
            # either we store the content in the index and return extracted line from there
            # or we use the path name to get contents of (current) file and extract from there
            # NOTE extraction requires positive search term extraction.
            # e.g. do not want to extract lines  for search term "-hello" ("not hello")
            # right now the index does NOT use STORED file contents, so need to local file and open it to perform high light
            result_text_fragment='CONTENT YET NOT IMPLEMENTED IN FTS'
            doc_contents=open(x, 'rb').read() # reads entire file....
            
            doc_contents = unicode(doc_contents, note_encoding)
            
            ## TODO Whoosh fragmenter is OK, but not that that great. E.g. I have document with the words "home directions" at the start of the file, first line, first byte. lowercase highlighter does not highlight it
            ## Also note the Woosh 2.2.2 highlighter shows less context :-(
            
            result_text_fragment = hit.highlights('content', text=doc_contents)
            result_text_fragment+='\r' # note \r on the end, issue under window that needs to be fixed in regular search module - this is a work around
            yield (x, [(1, result_text_fragment)])
    searcher.close()
    #ix.commit()


def search_iter(search_term, note_store, search_term_is_a_regex=True, ignore_case=True, search_encrypted=False, get_password_callback=None, progess_callback=None, findonly_filename=None, index_name=None, note_encoding=None, filesystem_encoding=None):
    """same as search_filesystem_iter() but without dirname check and opens files using note_store.note_contents().
    NOTE this BREAKS the api, could make it optional so as to extend but why fake it ?
    may decide to do it later on, this is an experiment
    FIXME TODO, maybe extend API and make index an extra named param? be much cleaner
    Same api as search.search_iter() but implements
    full text search (some params are ignored, only have them
    for api compatibility with grep like search.
    
    search_path is the normal search path
    index_name - as the index (location/identifier)
    can not (yet) search encrypted content
    
    TODO Add limit (pagination), i.e. if get more than 200 hits stop or just return the top 200
    """
    if not index_name:
        raise SearchException('full text search requested with out an index being provided')
    
    #TODO look into bugs in pyparsing interface that whoosh offers, e.g. crashs on ".deb" "-term"
    
    note_encoding = note_encoding or search.config['file_encoding']
    filesystem_encoding = filesystem_encoding or search.config['filesystem_encoding']
    
    #TODO make unicode search_term and search_path
    try:
        ix = index.open_dir(index_name)
    except IOError:
        raise SearchException('IOError on full text search index open (missing or corrupt?)')

    # Search documents

    searcher = ix.searcher()

    """ # FIXME TODO new parser options
    from http://groups.google.com/group/whoosh/msg/38d7c288b4533ddc
    consider SimpleParser
    
        You might try whoosh.qparser.SimpleParser or
        whoosh.qparser.DisMaxParser. These parsers use a simpler syntax similar
        to Altavista, but don't support many features... basically just
        unfielded words and phrases, with an optional "+" prefix for required
        terms or "-" prefix for forbidden (NOT) terms, e.g.:

            render +"lighting model" shader -rman 
    """
    parser = QueryParser("content", schema=ix.schema, termclass=whoosh.qparser.query.Variations)  # Variations means search for "appliance" will match "appliance" and "appliances" (and vice-versa)
    print 'search_term', repr(search_term)
    search_term = search_term.decode('utf8')  # make unicode FIXME - dietcherry does the right thing
    query = parser.parse(search_term)
    
    results = searcher.search(query, terms=True)  # terms=True should be faster
    #print(len(results))
    
    ## TODO make high lighting optional
    
    for hit in results:
        #print hit
        x = hit['path']
        if findonly_filename:
            yield (x, [(1, 'FILENAME SEARCH NOT REALLY IMPLEMENTED, pretending only want file names for results\r')]) # note \r on the end, issue under window that needs to be fixed in regular search module - this is a work around
        else:
            # either we store the content in the index and return extracted line from there
            # or we use the path name to get contents of (current) file and extract from there
            # NOTE extraction requires positive search term extraction.
            # e.g. do not want to extract lines  for search term "-hello" ("not hello")
            # right now the index does NOT use STORED file contents, so need to local file and open it to perform high light
            result_text_fragment='CONTENT YET NOT IMPLEMENTED IN FTS'
            try:
                doc_contents = note_store.note_contents(x)
            except pytombo.PyTomboIO:
                doc_contents = 'UNABLE TO OPEN FILE'  # TODO use params
            if isinstance(doc_contents, str):
                try:
                    doc_contents = doc_contents.decode('utf8')  # horrible hack, turns out I have a dirty mix of encodings in my notes collection
                except UnicodeDecodeError:
                    doc_contents = doc_contents.decode(note_encoding)
            
            ## TODO Whoosh fragmenter is OK, but not that that great. E.g. I have document with the words "home directions" at the start of the file, first line, first byte. lowercase highlighter does not highlight it
            ## Also note the Woosh 2.2.2 highlighter shows less context :-(
            
            result_text_fragment = hit.highlights('content', text=doc_contents)
            result_text_fragment+='\r' # note \r on the end, issue under window that needs to be fixed in regular search module - this is a work around
            yield (x, [(1, result_text_fragment)])
    searcher.close()
    #ix.commit()

###########################################################

import whoosh

## used to monkey patch Whoosh
from heapq import nlargest
from whoosh.reading import IndexReader


def most_frequent_terms_startswith(self, fieldid, startswith, number=5):
    """Yields the top 'number' most frequent terms that start with "startswith"
    in the given field as a series of (frequency, text) tuples.
    """
    return nlargest(number,
                    ((self.frequency("content", token), token)
                     for token
                     in self.expand_prefix("content", startswith)))
IndexReader.most_frequent_terms_startswith = most_frequent_terms_startswith  # Monkey patch Whoosh


def gen_find_top_search_terms(index_name, searchterm, maxnum=5):
    """TODO remove and use internal to Whoosh function, add in r164 in Whoosh svn. open search style finx pop search terms
    
    AIM of gen function is to create lookup class/function that already has index open
    for now this is not being done...... posibly performance improvement!
    """
    try:
        ix = whoosh.index.open_dir(index_name)
    except IOError:
        raise SearchException('IOError on full text search index open (missing or corrupt?)')

    searcher = ix.searcher()
    return searcher.ixreader.most_frequent_terms_startswith("content", searchterm, maxnum)

###########################################################


if __name__ == "__main__":
    ## Example

    path_to_search = "."
    search_term = 'and' # in full text search, this is often a special word and is either ignored or used as a joiner/command - ignored in whoosh if he only term used
    search_term = 'the' # in full text search, this is often a special word and is ignored - ignored in whoosh
    search_term = 'fox' # aesop fables extract from Lupy
    
    import tempfile
    tempdir = tempfile.mkdtemp() ## other idea is to use (system) tmp or home directory and then md5hash the note dir and use the hexdigest form as the dir/file in the tmp/home dir
    print 'temp dir:', tempdir
    index_name = os.path.join(tempdir, 'ftsindex')
    print 'index:', index_name
    file_count = index_content(index_name, path_to_search, progess_callback=example_progess_callback)
    print 'indexed %d documents' % file_count
    print '.. in index', index_name
    #shutil.rmtree(tempdir)
    
    # search
    """
    from pytombo.tombonote import gen_caching_get_password, gen_static_password, remove_leading_path
    caching_console_password_prompt = gen_caching_get_password(dirname=path_to_search).gen_func()
    """
    caching_console_password_prompt=None
    #orig_path_to_search = path_to_search
    #path_to_search = index_name
    try:
        #for hit in search_iter(search_term, path_to_search, search_term_is_a_regex=False, ignore_case=True, search_encrypted=True, get_password_callback=caching_console_password_prompt):
        for hit in search_iter(search_term, index_name, search_term_is_a_regex=False, ignore_case=True, search_encrypted=True, get_password_callback=caching_console_password_prompt):
            filename, hit_detail = hit
            filename = tombonote.remove_leading_path(path_to_search, filename)
            for result_hit_line, result_hit_text in hit_detail:
                print '%s:%d:%s' % (filename, result_hit_line, result_hit_text)
    except SearchCancelled, info:
        print 'search cancelled because of', info
    
    shutil.rmtree(tempdir)

