import org.apache.lucene.store.Directory
import org.apache.lucene.store.RAMDirectory
import org.apache.lucene.analysis.th.ThaiAnalyzer
import org.apache.lucene.analysis.SimpleAnalyzer
import org.apache.lucene.index.IndexWriter
import org.apache.lucene.index.IndexReader
import org.apache.lucene.index.Term
import org.apache.lucene.document.Document
import org.apache.lucene.document.Field
import org.apache.lucene.search.IndexSearcher
import org.apache.lucene.search.TermQuery
import org.apache.lucene.search.BooleanQuery
import org.apache.lucene.search.BooleanClause
import org.apache.lucene.queryParser.QueryParser
import org.apache.lucene.search.similar.MoreLikeThis
import org.apache.lucene.search.similar.MoreLikeThisQuery

import java.io.File

class LuceneTests extends GroovyTestCase {
    
    Directory directory
    def writer
    def analyzer 
    def msg1 = "ทดสอบภาษาไทย"
    def msg2 = "ODF Office Office XML ODF Office ODF"
    def msg3 = "ไมโครซอฟท์ OpenXML ไมโครซอฟท์ ODF Office Office OOXML ODF "
    
    void setUp() {
        directory = new RAMDirectory()
        analyzer = new ThaiAnalyzer()
        writer = new IndexWriter(directory, analyzer, true);        
        createDocument(msg1, "1")        
        createDocument(msg2, "2")
        createDocument(msg3, "3")
        writer.close()
    }
    
    void tearDown() {
    	directory.close()
    }

    def createDocument(content, String id) {
        def doc = new Document()
        doc.add(new Field("contents", content, Field.Store.YES, 
                          Field.Index.TOKENIZED, 
                          Field.TermVector.YES))
        doc.add(new Field("id", id, Field.Store.YES, Field.Index.UN_TOKENIZED))
        writer.addDocument(doc)        
    } 
    
    def getDocument(String id) {
    	TermQuery qry = new TermQuery(new Term("keyword", id))
    	def searcher = new IndexSearcher(directory)
    	def hits = searcher.search(qry)    	
    	return hits.doc(0)
    }
    
    def dumpTermFreq(int seq) {
    	def reader = IndexReader.open(directory)
    	def terms = reader.getTermFreqVector(seq, "contents").getTerms()
    	def freqs = reader.getTermFreqVector(seq, "contents").getTermFrequencies()
    	terms.eachWithIndex() { term, idx ->
    		System.out.println(term + "," + freqs[idx])
    	}
    	System.out.println("-" * 80);
    }
    
    void testSimpleMatch() {  
        assertNotNull("directory shoud not be null", directory)

        def searcher = new IndexSearcher(directory)
        def parser = new QueryParser("contents", analyzer)
        def query = parser.parse("ภาษาไทย")
        def hits = searcher.search(query)
        assertEquals("should match only one hit", 1, hits.length)
        Document doc = hits.doc(0)
        assertEquals(msg1, doc.get("contents"))        
    }
    
    void testMoreLikeThis() {
    	def reader = IndexReader.open(directory)
    	    	
    	def searcher = new IndexSearcher(directory)        
    	MoreLikeThis mlt = new MoreLikeThis(reader);
    	mlt.setMinWordLen(3)
    	mlt.setAnalyzer(analyzer)
    	mlt.setFieldNames((String[]) ["contents"]);
    	mlt.setMinTermFreq(2)
    	mlt.setMinDocFreq(2)

    	System.out.println(mlt.describeParams())
    	def mltQuery = mlt.like(2);

        // exclude current document
        def query = new BooleanQuery()
        query.add(mltQuery, BooleanClause.Occur.MUST)
        query.add(new TermQuery(new Term("id", '2')), BooleanClause.Occur.MUST_NOT)
        
    	
    	def hits = searcher.search(query)
        
        assertEquals("should match only one hit", 1, hits.length)
        assertEquals(msg3, hits.doc(0).get("contents")) 
    }
}
