package old;

/**
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.io.ObjectInputStream.GetField;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.concurrent.TimeUnit;

import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermVectorMapper;
import org.apache.lucene.index.TermVectorOffsetInfo;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.regex.SpanRegexQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.Spans;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;

/**
 *  
 *
 *
 *
 **/

public class Indexer2 {

	// public static String indexDir = "/local/joofeit/out/luceneIndex";
	public static String indexDir = "/home/joo/Desktop/testDir/luceneIndex";
	// public static String dataDir = "/local/joofeit/testDir4/";
	public static String dataDir = "/home/joo/Desktop/testDir/dataDir";

	public static void main(String[] args) throws IOException, ParseException {
		long start = System.currentTimeMillis();
		System.out.println("Start : " + start);

		System.out.println("FSDirectory");

		Directory dir = FSDirectory.open(new File(indexDir));

		System.out.println("Directory created");

		IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(
				Version.LUCENE_30), true, IndexWriter.MaxFieldLength.UNLIMITED);
		writer.setRAMBufferSizeMB(2048);
		// writer.maxDoc();
		File[] files = new File(dataDir).listFiles();
		System.out.println("Files length " + files.length);
		for (int i = 0; i < files.length; i++) {
			System.out.println(">>Files length: " + files.length + " Index : "
					+ i);
			File f = files[i];
			Reader br = new BufferedReader(new FileReader(f));
			Document doc = getDocument(f);

			Field id = new Field("id", "doc_" + i, Field.Store.YES,
					Field.Index.NOT_ANALYZED_NO_NORMS);
			doc.add(id);

			// Field text = new Field("content",br);//, Field.Store.NO,
			// Field.Index.ANALYZED,
			// Field.TermVector.WITH_POSITIONS_OFFSETS);
			Field text = new Field("content", br,
					Field.TermVector.WITH_POSITIONS_OFFSETS);
			doc.add(text);
			writer.addDocument(doc);

			List<Fieldable> fields = doc.getFields();
			System.out.println("String Value: " + text.stringValue());
			for (Fieldable x : fields) {
				System.out.println("Field: " + x.name());
			}
		}
		writer.close();
		// Get a searcher
		IndexSearcher searcher = new IndexSearcher(dir);
		IndexReader reader = searcher.getIndexReader();
		System.out.println("Parser created");
		System.out.println("Num DOCS: " + reader.numDocs());
		// Do a search using SpanQuery
		// QueryParser parser = new QueryParser(Version.LUCENE_30, "content",
		// new StandardAnalyzer(Version.LUCENE_30));
		SpanQuery fleeceQ = (SpanQuery) new SpanRegexQuery(new Term("content",
				"die")).rewrite(reader);

		System.out.println("Query : " + fleeceQ.toString());

		TopDocs results = searcher.search(fleeceQ, 10);

		for (int i = 0; i < results.scoreDocs.length; i++) {
			ScoreDoc scoreDoc = results.scoreDocs[i];
			System.out.println("Score Doc: " + scoreDoc);
		}

		Spans spans = fleeceQ.getSpans(reader);
		WindowTermVectorMapper2 tvm = new WindowTermVectorMapper2();
		int window = 23;// get the words within two of the match

		while (spans.next() == true) {
			System.out.println("Doc: " + spans.doc() + " Start: "
					+ spans.start() + " End: " + spans.end());
			// build up the window
			tvm.start = spans.start() - window + 1;

			tvm.end = spans.end() + window - 1;

			System.out.println("BEFORE!!!!!!!!!!!!!!!!!!!!!!!!!!!");
			System.out.println(">>>IS EMPTY: " + tvm.entries.isEmpty());

			reader.getTermFreqVector(spans.doc(), "content", tvm);
			System.out.println("AFTER!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");

			String[] sentence = new String[window * 2 - 1];
			for (WindowEntry2 entry : tvm.entries.values()) {
				Iterator positions = entry.positions.iterator();
				while (positions.hasNext()) {
					int p = (Integer) positions.next() - tvm.start;
					System.out.println(p);
					sentence[p] = entry.term;
				}
				System.out.println("Entry: " + entry);
			}

			// print whole sentence
			String sentString = "";
			for (String s : sentence) {
				if (s != null) {
					sentString += s + " ";
				}
			}
			System.out.println("!!!!!: " + sentString);

			// clear out the entries for the next round
			tvm.entries.clear();
		}
		long end = System.currentTimeMillis();
		System.out.println("End: " + end);

		long millis = end - start;
		String time = String.format("%d min, %d sec", TimeUnit.MILLISECONDS
				.toMinutes(millis), TimeUnit.MILLISECONDS.toSeconds(millis)
				- TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS
						.toMinutes(millis)));
		System.out.println("Time needed : " + time);
	}

	// ////////////////////////////////////////////////////////////////////////////
	static Document getDocument(File f) throws IOException {
		Document doc = new Document();
		doc.add(new Field("contents", new FileReader(f)));
		doc.add(new Field("filename", f.getCanonicalPath(), Field.Store.YES,
				Field.Index.NOT_ANALYZED));
		return doc;
	}
}

// Not thread-safe
class WindowTermVectorMapper2 extends TermVectorMapper {

	int start;
	int end;
	LinkedHashMap<String, WindowEntry2> entries = new LinkedHashMap<String, WindowEntry2>();

	public void map(String term, int frequency, TermVectorOffsetInfo[] offsets,
			int[] positions) {
		System.out.println(">>>>>>>>>>>>>>>>>HIER");

		for (int i = 0; i < positions.length; i++) {// unfortunately, we still
			// have to loop over the
			// positions
			// we'll make this inclusive of the boundaries
			if (positions[i] >= start && positions[i] < end) {
				WindowEntry2 entry = entries.get(term);
				if (entry == null) {
					entry = new WindowEntry2(term);
					entries.put(term, entry);
				}
				entry.positions.add(positions[i]);
			}
		}

	}

	public void setExpectations(String field, int numTerms,
			boolean storeOffsets, boolean storePositions) {
		// do nothing for this example
		// See also the PositionBasedTermVectorMapper.
	}

}

class WindowEntry2 {
	String term;
	List positions = new ArrayList();// a term could appear more than once w/in

	// a position

	WindowEntry2(String term) {
		this.term = term;

	}

	@Override
	public String toString() {
		return "WindowEntry2{" + "term='" + term + '\'' + ", positions="
				+ positions + '}';
	}

}