package lbd;

import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.util.Collections;
import java.util.LinkedList;
import java.util.StringTokenizer;

import javax.swing.JTextArea;

import org.apache.lucene.analysis.snowball.SnowballAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.TermFreqVector;
import org.jfree.ui.RefineryUtilities;

import parser.Pdf;
import parser.Xml;
import test.PieChart3D;
import edu.smu.tspell.wordnet.Synset;
import edu.smu.tspell.wordnet.WordNetDatabase;
import gui.settings.ConfigFile;

/**
 * The Class Lbd.
 */
public class Lbd {

	public final static String	DOCS_DIRECTORY		= "documents/" ;
	public final static String	INDEX				= "index" ;

	/**
	 * Indices the given files.
	 * 
	 * @param fileList Vector with the files
	 */
	public static void indeces(File[] fileList, String extension) { 
		
		try{
			
			File stopwords = new File("stopwords\\englishSW.txt");
			CustomAnalyzer analyzer = new CustomAnalyzer(stopwords, false);
			IndexWriter writer = null;
			Document doc = null;
			Field field = null;
			
			writer = new IndexWriter( INDEX, analyzer, true);
			 		 
			for( int i = 0; i < fileList.length ; i ++ )
			{
				 
				doc = new Document();
				
				field= new Field(	"doc_path",
						fileList[i].getPath(),
						Field.Store.YES,
						Field.Index.UN_TOKENIZED,
						Field.TermVector.YES); 
				
				doc.add( field );
				
				field= new Field(	"file_name",
						fileList[i].getName(),
						Field.Store.YES,
						Field.Index.UN_TOKENIZED,
						Field.TermVector.YES);
				
				doc.add( field );
				
				field= new Field(	"text",
									parseFile(fileList[i],extension),
									Field.Store.YES,
									Field.Index.TOKENIZED,
									Field.TermVector.YES);
			 
				doc.add( field );
				
				writer.addDocument(doc);
				writer.optimize();
				
			}
			
			writer.close();
			
		} catch(Exception e) { e.printStackTrace(); }
		
	}

	/**
	 * Indices the given file.
	 * 
	 * @param fileList Vector with the file
	 */
	public static void indeces(File file, String extension) { 
		
		try{
			
			File stopwords = new File("stopwords\\englishSW.txt");
			CustomAnalyzer analyzer = new CustomAnalyzer(stopwords, false);
			IndexWriter writer = null;
			Document doc = null;
			Field field = null;
			
			writer = new IndexWriter( INDEX, analyzer, true);
			 		 
			doc = new Document();
				
			field= new Field(	"doc_path",
					file.getPath(),
					Field.Store.YES,
					Field.Index.UN_TOKENIZED,
					Field.TermVector.YES); 
				
			doc.add( field );
				
			field= new Field(	"file_name",
					file.getName(),
					Field.Store.YES,
					Field.Index.UN_TOKENIZED,
					Field.TermVector.YES);
				
			doc.add( field );
				
			field= new Field(	"text",
								parseFile(file,extension),
								Field.Store.YES,
								Field.Index.TOKENIZED,
								Field.TermVector.YES);
			 
			doc.add( field );
				
			writer.addDocument(doc);
			writer.optimize();	
			writer.close();
			
		} catch(Exception e) { e.printStackTrace(); }
		
	}
	
	/**
	 * Parses the given file with the appropriate parser
	 * 
	 * @param file to parse
	 * @param extension of the file
	 * 
	 * @return string representing the parsed file
	 */
	private static String parseFile(File file, String extension) {
		
		try{
			if(extension.equals(".pdf")) {
				
				return Pdf.getInstance().parseFile(file.getPath());
				
			}
			if(extension.equals(".xml")) {
				
				return Xml.getInstance().parseFile(file.getPath());
				
			}
		} catch(Exception e) { e.printStackTrace(); }
		return "";
	}
	
	/**
	 * Get the given document's terms list order by frequency.
	 * 
	 * @param index The index's name
	 * @param documentNum The document's number
	 * 
	 * @return The ordered terms list
	 * 
	 * @throws IOException The index doesn't exist
	 */
	public static LinkedList<TermFreq> getTermsOrderByFreq(String index, int documentNum) throws IOException {
		
		LinkedList<TermFreq> results = new LinkedList<TermFreq>();
		
		if( IndexReader.indexExists(INDEX) )
		{
			try {
				
				IndexReader reader = IndexReader.open(INDEX);
				
				TermFreqVector terms = reader.getTermFreqVector(documentNum,"text");
							
				for( int i = 0 ; i < terms.getTerms().length ; i++ )
				results.add(new TermFreq(terms.getTerms()[i],terms.getTermFrequencies()[i]));
				
				TermFreqComparator tfc = new TermFreqComparator();
				Collections.sort(results, tfc);
			
			} catch (Exception e) {
				
			}		
		}
		else
		{
			throw new IOException();
		}
		
		return results;
		
	}

	/**
	 * Filter the terms list by frequency and/or by maximum number.
	 * 
	 * @param toFilter The terms list to filter
	 * @param maxNumber The maximum number limit
	 * @param minFreq The minimum frequency limit
	 * 
	 * @return The filtered terms list
	 */
	public static LinkedList<TermFreq> filterTermsList(LinkedList<TermFreq> toFilter, int maxNumber, int minFreq) {
		
		LinkedList<TermFreq> results = new LinkedList<TermFreq>();
		
		for( int i = 0 ; i < toFilter.size() ; i++ )
		{	
			if( results.size() == maxNumber ) {
				return results;
			}
			else
			{
				if( toFilter.get(i).getFreq() >= minFreq )
				{
					if(ConfigFile.getInstance().isSTEMMING_RESULTS())
					{
						int index = existASameStemWord(results, toFilter.get(i));
						
						if( index == -1 ) {
							
							boolean add = true;
							if(ConfigFile.getInstance().isADD_SYNONYMS())
								for(int a= 0 ; a < results.size() ; a++) 
									if(isInTheTermList(results.get(a).synonymTermsList, toFilter.get(i).getTerm()) != -1)
										add = false;
							if(add)			
								results.add(toFilter.get(i));
							
						} else {
							
							results.get(index).setFreq(results.get(index).getFreq() + toFilter.get(i).getFreq());			 
							results.get(index).freqUsed = true;
						}	
					}
					else
					{
						boolean add = true;
						if(ConfigFile.getInstance().isADD_SYNONYMS())
							for(int a= 0 ; a < results.size() ; a++) 
								if(isInTheTermList(results.get(a).synonymTermsList, toFilter.get(i).getTerm()) != -1)
									add = false;
						if(add)			
							results.add(toFilter.get(i));
					}
				}
			}
		}
		
		return results;
		
	}
	
	/**
	 * Checks if the given document's term is in the index.
	 * 
	 * @param index The index's name
	 * @param documentNumber The document's number
	 * @param word The term to check
	 * 
	 * @return true, if the term is in the index
	 */
	private static boolean isInTheIndex(String index, int documentNumber ,String word) {
		 
		if( IndexReader.indexExists(INDEX) )
		{
			try {
				
				IndexReader reader = IndexReader.open(INDEX);
				String terms[] = reader.getTermFreqVector(documentNumber,"text").getTerms();
				for( int i = 0 ; i <terms.length ; i++ )
					if( terms[i].equalsIgnoreCase(word)) {
						return true;
					}				
		
			} catch (Exception e) { e.printStackTrace(); }
		}
		
		return false;
		
	}
	
	/**
	 * Checks if the given term is in the list.
	 * 
	 * @param termFreqList The terms list
	 * @param term The term to check
	 * 
	 * @return true, if the term is in the terms list
	 */
	private static int isInTheTermList(LinkedList<TermFreq> termFreqList, String term) {
		for( int i = 0 ; i < termFreqList.size() ; i++ ) {
			if( termFreqList.get(i).getTerm().equals(term) )
						return i;
		}
		
		return -1;
		
	}
	
	/**
	 * Return the frequency of the specified term
	 * 
	 * @param index
	 * @param documentNumber
	 * @param term
	 * @return the term frequency
	 */
	private static int getTermFreq(String index, int documentNumber, String term) {
		
		if( IndexReader.indexExists(INDEX) )
		{
			try {
				
				IndexReader reader = IndexReader.open(INDEX);
				String terms[] = reader.getTermFreqVector(documentNumber,"text").getTerms();
				int freqs[] = reader.getTermFreqVector(documentNumber,"text").getTermFrequencies();
				for( int i = 0 ; i <terms.length ; i++ )
					if( terms[i].equalsIgnoreCase(term)) {
						return freqs[i];
					}				
		
			} catch (Exception e) { e.printStackTrace(); }
		}
		
		return -1;
		
	}
	
	/**
	 * Checks if there is a term in the list with the same stem
	 * 
	 * @param	termList The terms list
	 * @param	term The term to match
	 * @return	-1 if there isn't a term in the list with the same stem, 0therwise 
	 * 			the same stem term position
	 */
	public static int existASameStemWord(LinkedList<TermFreq> termList, TermFreq term) {
		
		int index = -1;
		
		for( int i = 0 ; i < termList.size() ; i++ ) {			
			if(isTheStemEquals( termList.get(i), term) )
				index = i;			
		}
		
		return index;
	}
	
	/**
	 * Checks if two terms have the same stem
	 * 
	 * @param term1 The first term
	 * @param term2 The second term
	 * @return true if both the terms have the same stem
	 */
	private static boolean isTheStemEquals(TermFreq term1, TermFreq term2) {
		
		boolean isEquals = false;
		SnowballAnalyzer analyzer = new SnowballAnalyzer("English");
		
		try{
			
			isEquals =	analyzer.tokenStream("field",new StringReader(term1.getTerm())).next().termText().equals(
						analyzer.tokenStream("field",new StringReader(term2.getTerm())).next().termText());
			
//			if(isEquals)
//			{
//				System.out.println(analyzer.tokenStream("field",new StringReader(term1.getTerm())).next().termBuffer());
//				System.out.println(analyzer.tokenStream("field",new StringReader(term2.getTerm())).next().termBuffer());
//			}
		
		}catch(Exception e){
			
			e.printStackTrace();
			return isEquals;
			
		}
		
		return isEquals;
	}
	
	/**
	 * Add terms' synonyms to the list
	 * 
	 * @param results The terms list
	 * @param documentNum The document's number
	 */
	public static void addSynonymsToTermsList(LinkedList<TermFreq> results, int documentNum)
	{

		//Loads the Wordnet database
		WordNetDatabase database = WordNetDatabase.getFileInstance();
		Synset[] synsets;
		//Terms list added
		LinkedList<TermFreq> added =  new LinkedList<TermFreq>();
		
		for( int i = 0 ; i < results.size() ; i ++ )
		{	
			synsets = database.getSynsets(results.get(i).getTerm());
			for (int a = 0; a < synsets.length; a++)
			{
				String[] wordForms = synsets[a].getWordForms();
				
				for (int j = 0; j < wordForms.length; j++)
					if( isInTheIndex(INDEX, documentNum, wordForms[j]) && !wordForms[j].equalsIgnoreCase(results.get(i).getTerm()) )
						if(isInTheTermList(added, wordForms[j]) == -1)
						{
							//Adds term's synonyms that occur in the index
							added.add(new TermFreq(wordForms[j],getTermFreq(INDEX, documentNum, wordForms[j])));
							results.get(i).addSynonymTerm(wordForms[j],getTermFreq(INDEX, documentNum, wordForms[j]));
							
						}
			
			}	
		}
		
	}
	
	/**
	 * Update the frequency of the terms in the list adding the synonym frequency
	 * 
	 * @param results contains the term list to update
	 */
	public static void updateFrequencyWithSynonyms(LinkedList<TermFreq> results,JTextArea jTextArea) {
		
		String msg = "";
		if(jTextArea!=null) jTextArea.setText("");
		for( int i = 0 ; i < results.size() ; i++) {
			if(jTextArea != null)
				msg = "Added to the term "+ results.get(i).getTerm() + "("+ results.get(i).getFreq() + "): ";
			
			for( int a = 0 ; a < results.get(i).synonymTermsList.size() ; a++ ) {
				
				if(results.get(i).synonymTermsList.get(a).freqUsed == false) {
					results.get(i).setFreq(results.get(i).getFreq() + results.get(i).synonymTermsList.get(a).getFreq());
					results.get(i).synonymTermsList.get(a).freqUsed = true;
					msg = msg + results.get(i).synonymTermsList.get(a).getTerm() + " " + results.get(i).synonymTermsList.get(a).getFreq() + " ";				}
			}
			if(jTextArea!=null) jTextArea.setText(jTextArea.getText() + msg+ "\n");
		}
		
	}
	
	/**
	 * Adds to every term in the list the corresponding term
	 * 
	 * @param termFreqList The terms list
	 * @param index The index name
	 * @param documentNum The document's number
	 * @throws IOException The index doesn't exist
	 */
	public static void addAdjacentTermsToTermsList( LinkedList<TermFreq> termFreqList, String index, int documentNum ) throws IOException {
		
		if( IndexReader.indexExists(INDEX) )
		{
			try {

				IndexReader reader = IndexReader.open(INDEX);		
				
				Document doc = reader.document(documentNum);
				
				StringTokenizer tokens = new StringTokenizer(doc.get("text"));
				LinkedList<String> tokensList = new LinkedList<String> ();
				
				
				for( int a = 0 ; a < tokens.countTokens() ; a++ )
				{
					
					tokensList.add(	(( (tokens.nextToken())
							.replace(",","") )
							.replace(".","") )
							.toLowerCase() );
					
				}
				
				for ( int i = 0 ; i < termFreqList.size() ; i++ )
				{
					for( int a = 0 ; a < tokensList.size() ; a++ )
					{
						if(termFreqList.get(i).getTerm().equals(tokensList.get(a)))
							if( a > 0 && a < tokensList.size() -1 )
								termFreqList.get(i).addAdjacentTerms(tokensList.get(a-1),tokensList.get(a+1));
					}
				}
				
			} catch (Exception e) {
				
			}		
		}
		else
		{
			throw new IOException();
		}
	}
	
	/**
	 * Return the given document's filename
	 * 
	 * @param index The index name
	 * @param documentNum The document number
	 * @return The document's filename
	 * @throws IOException The index doesn't exist
	 */
	public static String getDocumentFileName( String index, int documentNum ) throws IOException  {
		
		String fileName = "";
		
		if( IndexReader.indexExists(INDEX) )
		{
			try {

				IndexReader reader = IndexReader.open(index);						
				Document doc = reader.document(documentNum);
				fileName = doc.get("file_name");
				
			} catch (Exception e) {
				
				return "";
			
			}		
		}
		else
		{
			throw new IOException();
		}
		
		return fileName;
	}
	
	/**
	 * Print all the terms
	 * 
	 * @param termFreqList The terms list
	 * @param documentNum The document's number
	 * @param synonym If true print synonym terms
	 * @param adjacent If true print adjacent terms
	 */
	public static void printAllTerms( LinkedList<TermFreq> termFreqList, 
										int documentNum,
										boolean synonym,
										boolean adjacent ){
		
		System.out.println( "Document : " + ( documentNum + 1 ) ) ;
		
		System.out.println("");
		
		for( int i = 0 ; i < termFreqList.size() ; i ++ )
		{
			System.out.println(	"[" + termFreqList.get(i).getFreq() + ", " + termFreqList.get(i).getTerm() + "]" );
			
			if(synonym)
			{
				System.out.print("Synonym : ");
				
				for( int a = 0 ; a < termFreqList.get(i).synonymTermsList.size() ; a++ )
					System.out.print(termFreqList.get(i).synonymTermsList.get(a).getTerm() + " ");
				
				System.out.println("");
			}	
			
			if(adjacent)
			{
				System.out.print("Adjacent : ");
				
				for( int a = 0 ; a < termFreqList.get(i).adjacentTermsList.size() ; a++ )
					System.out.print( 	"(" + 
										termFreqList.get(i).adjacentTermsList.get(a).getPre()  +
										", " +
										termFreqList.get(i).adjacentTermsList.get(a).getNext() + 
										") "	);
							
				System.out.println("");
				System.out.println("");
			}
				
		}
			
	}
	
	/**
	 * Create a chart with the document's info
	 * 
	 * @param windowTitle The chart's window title
	 * @param chartTitle The chart's title
	 * @param termList The chart data
	 */
	public static void createChart(String windowTitle, String chartTitle, LinkedList<TermFreq> termList){
		
		PieChart3D pieChart = new PieChart3D(windowTitle, chartTitle, termList);
		pieChart.pack();
        RefineryUtilities.centerFrameOnScreen(pieChart);
        pieChart.setVisible(true);
		
	}
	
	/**
	 * Return the term list that describe the file content
	 * 
	 * @param file The file to indices
	 * @return The term list
	 *  
	 */
	/**
	 * Return the term list that describe the file content
	 * 
	 * @param file
	 * @param extension
	 * @param jTextArea the output area
	 * @return the term list
	 */
	public static LinkedList<TermFreq> getTermList(File file, String extension, JTextArea jTextArea) {
		
		return getTermList( file, extension, ConfigFile.getInstance().getTERM_NUM(), ConfigFile.getInstance().getTERM_MIN_FREQ(), jTextArea);
		
	}
	
	/**
	 * Return the term list that describe the file content
	 * 
	 * @param file to indices
	 * @param extension of the file
	 * @param maxTermNum maximum number of term in the list
	 * @param minTermFreq minimum term's frequency in the list
	 * @param jTextArea the output area
	 * @return the term list
	 */
	public static LinkedList<TermFreq> getTermList(File file, String extension, int maxTermNum, int minTermFreq, JTextArea jTextArea) {
		
		LinkedList<TermFreq> results = null;
		
		//Indecises all the documents in the "documents/" directory
		Lbd.indeces( file, extension ); 
		
		try {
			
			//Checks if the index exists
			if( IndexReader.indexExists(INDEX) )
			{
			
				IndexReader reader = IndexReader.open(INDEX);
				
				//Runs the follow iteration for every document in the list
				for( int d = 0 ; d < reader.numDocs() ; d++ )
				{
					
					//Gets the terms list ordered by frequency
					results = Lbd.getTermsOrderByFreq( INDEX, d );
					//Filters the terms list with the given minimum occurrences
					results = Lbd.filterTermsList( results, maxTermNum, minTermFreq );
					//Adds term's synonyms to the terms list
					if(ConfigFile.getInstance().isADD_SYNONYMS()) {
						Lbd.addSynonymsToTermsList( results, d );
						Lbd.updateFrequencyWithSynonyms(results, jTextArea);
					}					
					TermFreqComparator tfc = new TermFreqComparator();
					//Orders the list by the frequency
					Collections.sort( results, tfc );
					
				}
					
			}
			
		} catch (Exception e) { e.printStackTrace(); }
		
		return results;
	}
}

