from org.apache.lucene.store import RAMDirectory
from org.apache.lucene.index import IndexWriter, Term
from org.apache.lucene.analysis import Analyzer, SimpleAnalyzer, WhitespaceTokenizer, LowerCaseFilter
from org.apache.lucene.analysis.standard import StandardAnalyzer, StandardTokenizer
from org.apache.lucene.document import Document, Field
from org.apache.lucene.search import IndexSearcher, Query, Hits, TermQuery, PrefixQuery
from org.apache.lucene.queryParser import QueryParser


class CustomIndex:
    
    def __init__(self):
        self.directory = RAMDirectory()
        self.analyzer = SimpleAnalyzer()
        self.writer = IndexWriter(self.directory, self.analyzer, True, IndexWriter.MaxFieldLength.UNLIMITED);

    def add(self, title, content):
        doc = Document()
        doc.add(Field("title", title, Field.Store.YES, Field.Index.ANALYZED))
        doc.add(Field("content", content, Field.Store.NO, Field.Index.ANALYZED))
        # doc.boost = 
        self.writer.addDocument(doc)
        self.writer.commit()

    def find(self, query):
        # query = TermQuery(Term("content", query))
        parser = QueryParser("content", self.analyzer)
        query = parser.parse(query)
        searcher = IndexSearcher(self.directory)
        # searcher.similarity = 
        return searcher.search(query)

    
class CustomAnalyzer(Analyzer):
    
    def tokenStream(self, fieldName, reader):
        return LowerCaseFilter(WhitespaceTokenizer(reader))


if __name__ == '__main__':

    index = CustomIndex()    
    index.add("Document A", "one two three")
    index.add("Document B", "three four five")
    index.add("Document C", "one three six seven eight")
    
    hits = index.find("one")
    print "Results: %d" % hits.length()
    for hit in hits.iterator():
        print "[%f] %s" % (hit.score, hit.document.get("title"))
