package br.cos.ufrj.bd3.control;
import java.io.File;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.br.BrazilianAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;

import br.cos.ufrj.bd3.model.Extrato;
import br.cos.ufrj.bd3.model.Frase;
import br.cos.ufrj.bd3.model.Noticia;
import br.cos.ufrj.bd3.model.Paragrafo;
import br.cos.ufrj.bd3.model.SubTitulo;
import br.cos.ufrj.bd3.model.Titulo;


public class IndexadorLucene {

	public static final String INDEX_NOTICIA = "indexNoticia";
	public static final String INDEX_NOTICIA_ANOTADA = "indexNoticiaAnotada";
	public static final String INDEX_NOTICIA_SENTENCA = "indexNoticiaSentenca";	
	public static final String INDEX_NOTICIA_SENTENCA_ANOTADA = "indexNoticiaAnotadaSentenca";

	public void indexarNoticias(List<Extrato> listaExtratos) throws Exception {

		File index = new File(INDEX_NOTICIA);

		IndexWriter indexWriter;

		//Analyzer a = new WhitespaceAnalyzer();
		Analyzer a = new BrazilianAnalyzer(ParserCETENFolha.listaStopWords("stopwords.txt"));

		if(index.mkdir()) {
			//indexWriter = new IndexWriter(index,new BrazilianAnalyzer(), true, new MaxFieldLength(10000000));			
			indexWriter = new IndexWriter(index, a , true, new MaxFieldLength(10000000));
		}
		else {
			//indexWriter = new IndexWriter(index,new BrazilianAnalyzer(), false, new MaxFieldLength(10000000));
			indexWriter = new IndexWriter(index, a, false, new MaxFieldLength(10000000));
		}

		for (Extrato e: listaExtratos) {			

			List<Noticia> listaNoticias = e.getListaNoticias();

			for (Noticia n: listaNoticias) {
				List<Paragrafo> listaParagrafo = n.getListaParagrafos();
				SubTitulo subTitulo = n.getSubTitulo();
				Titulo titulo = n.getTitulo();

				Document document = new Document();

				Field id = new Field("id", n.getId() + "", Field.Store.YES, Field.Index.NO);
				document.add(id);
				StringBuilder builder = new StringBuilder();

				for (Paragrafo p: listaParagrafo) {
					List<Frase> listaFrases = p.getListaFrases();
					for (Frase f: listaFrases) {
						builder.append(f.getConteudo() + " ");
					}
				}
				Field fieldConteudo = new Field("conteudo", builder.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES);
				document.add(fieldConteudo);

				String conteudoSubTitulo = subTitulo != null ? subTitulo.getConteudo() : "";
				Field fieldSubTitulo = new Field("subTitulo", conteudoSubTitulo, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES);
				document.add(fieldSubTitulo);

				String conteudoTitulo = titulo != null ? titulo.getConteudo() : "";
				Field fieldTitulo = new Field("titulo", conteudoTitulo, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES);
				document.add(fieldTitulo);

				indexWriter.addDocument(document);
			}

		}

		indexWriter.optimize();
		indexWriter.close();
	}

	public void indexarNoticiasSentencas(List<Noticia> listaNoticias) throws Exception {

		File index = new File(INDEX_NOTICIA_SENTENCA);
		IndexWriter indexWriter;

//		Analyzer a = new BrazilianAnalyzer();
		Analyzer a = new BrazilianAnalyzer(ParserCETENFolha.listaStopWords("stopwords.txt"));
		
		index.mkdir();

		indexWriter = new IndexWriter(index, a, true, new MaxFieldLength(10000000));


		for (Noticia n: listaNoticias) {
			List<Paragrafo> listaParagrafo = n.getListaParagrafos();

			int indParagrafo = 0;

			for (Paragrafo p: listaParagrafo) {

				int indFrase = 0; 

				List<Frase> listaFrases = p.getListaFrases();

				for (Frase f: listaFrases) {

					Document document = new Document();
					Field noticia = new Field("noticia", n.getId() + "", Field.Store.YES, Field.Index.NO);
					document.add(noticia);
					Field par = new Field("indParagrafo", indParagrafo + "", Field.Store.YES, Field.Index.NO);
					document.add(par);
					Field frase = new Field("indFrase", indFrase++ + "", Field.Store.YES, Field.Index.NO);
					document.add(frase);
					Field fieldConteudo = new Field("sentenca", f.getConteudo(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES);
					document.add(fieldConteudo);

					//imprimeTransformado(a, f.getConteudo());

					indexWriter.addDocument(document);
				}

				indParagrafo++;
			}				
		}

		indexWriter.optimize();
		indexWriter.close();
	}

	public static void imprimeTransformado(Analyzer a, String texto2) {

		String resultante = "";

		ArrayList<String> tokens = new ArrayList<String>();		

		try {			

			StringReader sr = new StringReader(texto2);
			TokenStream ts = a.tokenStream("", sr);

			boolean isEmpty = false;
			while (!isEmpty) {
				Token t = new Token();				
				ts.next(t);
				String s = t.term();
				if (s.isEmpty()) {
					isEmpty = true;
				}
				else {
					resultante += " " + s;
					//tokens.add(s);	
				}
			}		

		} catch (Exception e) {
			e.printStackTrace();
		}

		System.out.println(resultante);
	}


	public void indexarNoticiasAnotada(List<Extrato> listaExtratos) throws Exception {

		File index = new File(INDEX_NOTICIA_ANOTADA);

		IndexWriter indexWriter;

//		Analyzer a = new WhitespaceAnalyzer();
		Analyzer a = new StandardAnalyzer(ParserCETENFolha.listaStopWords("stopwords.txt"));
		

		if(index.mkdir()) {
			//indexWriter = new IndexWriter(index,new BrazilianAnalyzer(), true, new MaxFieldLength(10000000));			
			indexWriter = new IndexWriter(index, a , true, new MaxFieldLength(10000000));
		} else {
			//indexWriter = new IndexWriter(index,new BrazilianAnalyzer(), false, new MaxFieldLength(10000000));
			indexWriter = new IndexWriter(index, a, false, new MaxFieldLength(10000000));
		}

		for (Extrato e: listaExtratos) {
			List<Noticia> listaNoticias = e.getListaNoticias();

			for (Noticia n: listaNoticias) {
				List<Paragrafo> listaParagrafo = n.getListaParagrafos();

				Document document = new Document();

				Field id = new Field("id", n.getId() + "", Field.Store.YES, Field.Index.NO);
				document.add(id);
				StringBuilder builder = new StringBuilder();
				for (Paragrafo p: listaParagrafo) {
					List<Frase> listaFrases = p.getListaFrases();

					for (Frase f: listaFrases) {

						StringTokenizer tokenizer = new StringTokenizer(f.getConteudo(), "\n");
						while (tokenizer.hasMoreElements()) {
							String token = tokenizer.nextToken();

							int inicioString = token.indexOf("[");
							int terminoString = token.indexOf("]");

							if (inicioString != -1 && terminoString != -1) {
								String termo = token.substring(inicioString + 1, terminoString);
								builder.append(termo.replaceAll("=", " ") + " ");
							} 
						}
					}
				}

				Field fieldConteudo = new Field("conteudo", builder.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES);
				document.add(fieldConteudo);
				
				SubTitulo subTitulo = n.getSubTitulo();
				StringBuilder sbSubT = new StringBuilder();
				
				String conteudoSubTitulo = subTitulo != null ? subTitulo.getConteudo() : "";
				StringTokenizer tokenizerSubTitulo = new StringTokenizer(conteudoSubTitulo, "\n");
				while (tokenizerSubTitulo.hasMoreElements()) {
					String token = tokenizerSubTitulo.nextToken();

					int inicioString = token.indexOf("[");
					int terminoString = token.indexOf("]");

					if (inicioString != -1 && terminoString != -1) {
						String termo = token.substring(inicioString + 1, terminoString);
						sbSubT.append(termo.replaceAll("=", " ") + " ");
					} 
				}

				Field fieldSubTitulo = new Field("subTitulo", sbSubT.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES);
				document.add(fieldSubTitulo);
				
				Titulo titulo = n.getTitulo();
				String conteudoTitulo = titulo != null ? titulo.getConteudo() : "";
				StringBuilder sbTit = new StringBuilder();
				
				StringTokenizer tokenizerTitulo = new StringTokenizer(conteudoTitulo, "\n");
				while (tokenizerTitulo.hasMoreElements()) {
					String token = tokenizerTitulo.nextToken();

					int inicioString = token.indexOf("[");
					int terminoString = token.indexOf("]");

					if (inicioString != -1 && terminoString != -1) {
						String termo = token.substring(inicioString + 1, terminoString);
						sbTit.append(termo.replaceAll("=", " ") + " ");
					} 
				}

				Field fieldTitulo = new Field("titulo", sbTit.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES);
				document.add(fieldTitulo);
				
				indexWriter.addDocument(document);
			}
		}

		indexWriter.optimize();
		indexWriter.close();
	}
	
	public void indexarNoticiasSentencasAnotadas(List<Noticia> listaNoticias) throws Exception {

		File index = new File(INDEX_NOTICIA_SENTENCA_ANOTADA);
		IndexWriter indexWriter;

//		Analyzer a = new WhitespaceAnalyzer();
		Analyzer a = new StandardAnalyzer(ParserCETENFolha.listaStopWords("stopwords.txt"));

		index.mkdir();

		indexWriter = new IndexWriter(index, a, true, new MaxFieldLength(10000000));


		for (Noticia n: listaNoticias) {
			List<Paragrafo> listaParagrafo = n.getListaParagrafos();

			int indParagrafo = 0;

			for (Paragrafo p: listaParagrafo) {

				int indFrase = 0; 

				List<Frase> listaFrases = p.getListaFrases();

				for (Frase f: listaFrases) {

					Document document = new Document();
					Field noticia = new Field("noticia", n.getId() + "", Field.Store.YES, Field.Index.NO);
					document.add(noticia);
					Field par = new Field("indParagrafo", indParagrafo + "", Field.Store.YES, Field.Index.NO);
					document.add(par);
					Field frase = new Field("indFrase", indFrase++ + "", Field.Store.YES, Field.Index.NO);
					document.add(frase);
					
					StringBuilder builder = new StringBuilder();
					StringTokenizer tokenizer = new StringTokenizer(f.getConteudo(), "\n");
					while (tokenizer.hasMoreElements()) {
						String token = tokenizer.nextToken();

						int inicioString = token.indexOf("[");
						int terminoString = token.indexOf("]");

						if (inicioString != -1 && terminoString != -1) {
							String termo = token.substring(inicioString + 1, terminoString);
							builder.append(termo.replaceAll("=", " ") + " ");
						} 
					}
					Field fieldConteudo = new Field("sentenca", builder.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES);
					document.add(fieldConteudo);

//					System.out.println(builder);
//					imprimeTransformado(a,  builder.toString());

					indexWriter.addDocument(document);
				}

				indParagrafo++;
			}				
		}

		indexWriter.optimize();
		indexWriter.close();
	}

	

	public static String ponderaPerguntaNaoAnotada(String pergunta, boolean temPeso) {
		StringBuilder builder = new StringBuilder();
		StringTokenizer tokenizer = new StringTokenizer(pergunta, "\n");
		while (tokenizer.hasMoreElements()) {
			String token = tokenizer.nextToken();
			
			int inicioString = token.indexOf("]");
			int terminoString = token.indexOf("[");

			if (terminoString != -1 && (inicioString - terminoString) > 2) {
				String termo = token.substring(0, terminoString);
				
				if (termo.contains("=")) {
					//termo = termo.replaceAll("=", "^5 ");
					termo = termo.replaceAll("=", " ");
					//termo = "\"" + termo + "\"";
					
				}
				
				if (temPeso) {
					if (token.contains(" PROP ")) {
						termo += "^5";
					}
					else if (token.contains(" N ")) {
						termo += "^4";
					}
					else if (token.contains(" ADJ ")) {
						termo += "^3";
					}
					else if ((token.contains(" V ") || token.contains(" VFIN "))) {
						termo += "^2";
					}
				}
				
				
				builder.append(termo + " ");
			} 
		}
		//return builder.substring(0, builder.length());
		return builder.toString();
	}

	public static String ponderaPerguntaAnotada(String pergunta, boolean temPeso) {
		StringBuilder builder = new StringBuilder();
		StringTokenizer tokenizer = new StringTokenizer(pergunta, "\n");
		while (tokenizer.hasMoreElements()) {
			String token = tokenizer.nextToken();			
			
			int inicioString = token.indexOf("[");
			int terminoString = token.indexOf("]");

			if (inicioString != -1 && terminoString != -1 && (terminoString - inicioString) > 2) {
				String termo = token.substring(inicioString + 1, terminoString);
				
				if (termo.contains("=")) {
					termo = termo.replaceAll("=", " ");
				}
				
				if (temPeso) {
					if (token.contains(" PROP ")) {
						termo += "^5";
					}
					else if (token.contains(" N ")) {
						termo += "^4";
					}
					else if (token.contains(" ADJ ")) {
						termo += "^3";
					}
					else if ((token.contains(" V ") || token.contains(" VFIN "))) {
						termo += "^2";
					}
				}
				
				builder.append(termo + " ");
			} 
		}
		//return builder.substring(0, builder.length());
		return builder.toString();
	}
}
