package index

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Date;


def indexPath =  "C:\\Users\\laurie\\Java\\indexes" // Create Lucene index in this directory
def docsPath =  "C:\\Users\\laurie\\Documents\\datasets\\test"// Index files in this directory

boolean create = true;

Date start = new Date();
println("Indexing to directory '" + indexPath + "'...");

Directory dir = FSDirectory.open(new File(indexPath));
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_41);
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_41, analyzer);

if (create) {
	// Create a new index in the directory, removing any
	// previously indexed documents:
	iwc.setOpenMode(OpenMode.CREATE);
} else {
	// Add new documents to an existing index:
	iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
}

// Optional: for better indexing performance, if you
// are indexing many documents, increase the RAM
// buffer.  But if you do this, increase the max heap
// size to the JVM (eg add -Xmx512m or -Xmx1g):
//
iwc.setRAMBufferSizeMB(256.0);

IndexWriter writer = new IndexWriter(dir, iwc);
new File(docsPath).eachFileRecurse {
	if (it.name.endsWith('.txt')){
	//(it.name =~ /.txt$/) { // Index .txt files only
		indexDocs(writer,it)
	}
}

//	indexDocs(writer, docDir);

// NOTE: if you want to maximize search performance,
// you can optionally call forceMerge here.  This can be
// a terribly costly operation, so generally it's only
// worth it when your index is relatively static (ie
// you're done adding documents to it):
//
// writer.forceMerge(1);

writer.close();

Date end = new Date();
System.out.println(end.getTime() - start.getTime() + " total milliseconds");



/**
 * Indexes the given file using the given writer, or if a directory is given,
 * recurses over files and directories found under the given directory.
 * 
 * NOTE: This method indexes one document per input file.  This is slow.  For good
 * throughput, put multiple documents into your input file(s).  An example of this is
 * in the benchmark module, which can create "line doc" files, one document per line,
 * using the
 * <a href="../../../../../contrib-benchmark/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.html"
 * >WriteLineDocTask</a>.
 *  
 * @param writer Writer to the index where the given file/dir info will be stored
 * @param file The file to index, or the directory to recurse into to find files to index
 * @throws IOException If there is a low-level I/O error
 */
void indexDocs(IndexWriter writer, File f)
throws IOException {

	if (f.hidden || !f.exists() || !f.canRead() || f.directory) { return }
	Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_41);

	println "Indexing $f.canonicalPath"
	def doc = new Document()

	FileInputStream fis=new FileInputStream(f);

	// Construct a Field that is tokenized and indexed, but is not stored in the index verbatim.
	//	doc.add(Field.Text("contents", fis))

	// Add the path of the file as a field named "path".  Use a
	// field that is indexed (i.e. searchable), but don't tokenize
	// the field into separate words and don't index term frequency
	// or positional information:
	Field pathField = new StringField("path", f.getPath(), Field.Store.YES);
	doc.add(pathField);

	doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(fis, "UTF-8"))));

	String category;
	String tt
	if ( f.canonicalPath.contains("pos")) category="pos" else category="neg";
	if ( f.canonicalPath.contains("test")) tt="test" else tt="train";
	Field categoryField = new StringField("category", category, Field.Store.YES);
	Field ttField = new StringField("tt", tt, Field.Store.YES)

	doc.add(categoryField)
	doc.add(ttField)

	if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
		// New index, so we just add the document (no old document can be there):
		System.out.println("adding " + f);
		writer.addDocument(doc);
	} else {
		// Existing index (an old copy of this document may have been indexed) so
		// we use updateDocument instead to replace the old one matching the exact
		// path, if present:
		System.out.println("updating " + f);
		writer.updateDocument(new Term("path", f.getPath()), doc);
	}
}
