import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;

import java.io.IOException;

import org.json.simple.*;

public class Principal 
{
	public static void main(String[] args) throws IOException, ParseException 
	{
		// Especifica que analizador se usará para tokenizar el texto
		// Se debe usar el mismo analizador para  indexar y buscar.
		StandardAnalyzer analyzer = new StandardAnalyzer(Version.LUCENE_35);

		//Con esto muestro los stopwords usados actualmente como filtro
		System.out.print(StandardAnalyzer.STOP_WORDS_SET.toString());

		// 1. create the index
		Directory index = new RAMDirectory();

		IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_35, analyzer);
		
		// Obviamente esta parte se tiene que hacer de una manera más inteligente en el futuro
		IndexWriter w = new IndexWriter(index, config);
		addDoc(w, "Tensions are at an all-time high as the @AmericanIdol Hollywood Round continues, Tonight at 8/7c. #Idol");
		addDoc(w, "Launched 2+ years ago, Google Public DNS is now the largest public DNS service in the world, handling 70B+ requests/day http://t.co/A5InMkeI");
		addDoc(w, "Johnny Cash museum set for downtown Nashville http://t.co/zaDabK4j");
		addDoc(w, "#rugby PETER JACKSON: Wales returning to Twickenham as top dogs #RBS6Nations http://t.co/yn54OtQr");
		addDoc(w, "Al Gore Compares Investing in Oil and Gas to Subprime Mortgage Crises and Lays Out \u201CA Manifesto for Sustainable ... http://t.co/d5rv4zjY");
		addDoc(w, "Verizon Offering Gift Cards For Phone Trade-Ins http://t.co/VuEPcVVY");
		addDoc(w, "Bobby Brown -- Fighting Whitney Houston's Family to See Bobbi Kristina http://t.co/ZYa9WC6m");
		//addDoc(w, "On #InternationalWomensDay, we are proud to present our new Francesca Woodman exhibition opening Mar 16: http:\/\/t.co\/AyuRH1OF");
		//addDoc(w, "RT @theartwolf: Piet Mondrian was born on this day 140 years ago. His works at MoMA illustrate his artistic evolution http:\/\/t.co\/KlikQjSH");
		addDoc(w, "@CameronMiquelon College really sucks the money out of you doesn't it?! University tuition fees are still on the rise in the UK, crazy!");
		w.close();

		// Consulta, se le hacen consultas y te devuelve la respuesta como string
		System.out.println("Primera Busqueda");
		String querystr = args.length > 0 ? args[0] : "nations";

		
		// "title" se refiere al campo de referencia hay como 4 campos todavía no se exactamente que diferencia hacen.
		Query q = new QueryParser(Version.LUCENE_35, "text", analyzer).parse(querystr);

		// Busqueda, esto es más o menos estándar
		int hitsPerPage = 10;
		IndexReader reader = IndexReader.open(index);
		IndexSearcher searcher = new IndexSearcher(reader);
		TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage, true);
		searcher.search(q, collector);
		ScoreDoc[] hits = collector.topDocs().scoreDocs;

		// Tiré los resultados a la consola, recuerden que la consulta es de indexación nomás.
		System.out.println("Found " + hits.length + " hits.");
		for(int i=0;i<hits.length;++i) {
			int docId = hits[i].doc;
			Document d = searcher.doc(docId);
			System.out.println((i + 1) + ". " + d.get("title"));
		}
		
		//comentario
		// se cierra al final, como todo en java
		searcher.close();
	}

	private static void addDoc(IndexWriter w, String value) throws IOException {
		Document doc = new Document();
		doc.add(new Field("title", value, Field.Store.YES, Field.Index.ANALYZED));
		w.addDocument(doc);
	}
}
