package text2documentvector;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedHashMap;

import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version;
import org.tartarus.snowball.ext.German2Stemmer;

public class Text2Documentvector {

	/**
	 * @param args
	 */
	public static void main(String[] args) {
		
		/***************************************************************
		 *  Read File **************************************************
		 *  ***********************************************************/
		
		
		/* New FileIO object */
		FileIO fileIO = null;
		fileIO = new FileIO();
		
		/* Set Path to Sample File */
		fileIO.setPathOfInputFile("src\\text2documentvector\\DerZauberlehrling.txt");
		
		/* Read from File */
		fileIO.readFile();
		
		/* Output of file */
		System.out.println("---------------- INPUT STRING ----------------");
		String inputStr = fileIO.getStringOfFile();
		System.out.println(inputStr);
		System.out.println("");
		
		
		/*
		System.out.println("Input String:");
		char[] inData = fileIO.getCharArrOfFile();
		String inputStr = "";
		for(int i = 0; i < inData.length; i++){
			inputStr = inputStr.concat(String.valueOf(inData[i]));
		}*/
		
		
		/* Set path of output file 
		fileIO.setPathOfOutputFile("src\\text2documentvector\\SampleOutput2.txt");
		fileIO.write2File(inData);*/
		
		
		/***************************************************************
		 *  Tokenize the input string **********************************
		 *  ***********************************************************/
		
		/* Create Standard Analyzer of Lucene */
		StandardAnalyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
		
		/* Create Array List for Tokens */
		ArrayList<String> tokens = new ArrayList<String>();
		
		/* Variable for TokenStream */
		TokenStream tokStream = null;
		
		try {
				// Create Token Stream and read String with StringReader
				tokStream  = analyzer.tokenStream(null, new StringReader(inputStr));
				
				/* Call incrementToken() advance stream to next token => Returns false at end */
				while (tokStream.incrementToken() != false) {
					// Add Token to list
					tokens.add(tokStream.getAttribute(CharTermAttribute.class).toString());
				}
				
				// End TokenStream
				tokStream.end();
				// Close TokenStream
				tokStream.close();
				
	    } catch (IOException e) {
	    	throw new RuntimeException(e);
	    }
		
		/* Print List */
		System.out.println("---------------- TOKENIZED WORDS ----------------");
		for(int i=0; i < tokens.size(); i++){
			System.out.println(tokens.get(i));
		}
		System.out.println("");
		
		
		
		/***************************************************************
		 *  Stopword Elimination ***************************************
		 *  ***********************************************************/
		/* Set Collection with StopWords */
		ArrayList<String> stopWords = new ArrayList<String>();		
		
		/* Set path to Stop Word File */
		fileIO = new FileIO();
		fileIO.setPathOfInputFile("src\\text2documentvector\\StopWords");
		fileIO.readFile();

		
		/* Read Stop Word File */
		char[] inData = fileIO.getCharArrOfFile();
		String temp = "";
		String temp2 = "";
		for(int i = 0; i < inData.length; i++){
			temp = temp.concat(String.valueOf(inData[i]));
			if(String.valueOf(inData[i]).equals(",")){
				stopWords.add(temp.substring(0, temp.length()-1));
				temp = "";
			}
			
		}				
		
		/* Set Stop Words */
		StandardAnalyzer analyzer2 = new StandardAnalyzer(Version.LUCENE_40);
		CharArraySet casStopSet = StopFilter.makeStopSet(Version.LUCENE_40, stopWords);
		TokenStream stoppedTokens = null;
		ArrayList<String> stoppedWords = new ArrayList<String>();

		try {
			stoppedTokens = analyzer2.tokenStream(null, new StringReader(tokens.toString()));
			stoppedTokens = new StopFilter(Version.LUCENE_40, stoppedTokens, casStopSet);
			
			/* Call incrementToken() advance stream to next token => Returns false at end */
			while (stoppedTokens.incrementToken() != false) {
				// Add Token to list
				stoppedWords.add(stoppedTokens.getAttribute(CharTermAttribute.class).toString());
			}
			
			
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
		/* Print Stopped Words List */
		System.out.println("");
		System.out.println("---------------- STOPWORDS ----------------");
		for(int i=0; i < stoppedWords.size(); i++){
			System.out.println(stoppedWords.get(i));
		}
		System.out.println("");
		
		
		/***************************************************************
		 *  Snowball Stemmer *******************************************
		 *  ***********************************************************/
		/*
		// Create new PorterStemmer for English Language
		PorterStemmer pStemmer = new PorterStemmer();
		
		// List for stemmed Words 
		ArrayList<String> stemmedWords = new ArrayList<String>();
		
		for(int i=0; i < stoppedWords.size(); i++){
			pStemmer.setCurrent(stoppedWords.get(i));
			pStemmer.stem();
			stemmedWords.add(pStemmer.getCurrent());
		}
		
		System.out.println("");
		System.out.println("Stemmed List:");
		for(int i=0; i < stemmedWords.size(); i++){
			System.out.println(stemmedWords.get(i));
		}
		System.out.println("Stemmed List Ende");*/
		
		
		
		/* Create Stemmer for German Language */
		German2Stemmer gStemmer = new German2Stemmer();
		
		/* List for stemmed words */
		ArrayList<String> stemmedGermanWords = new ArrayList<String>();
		
		for(int i=0; i < stoppedWords.size(); i++){
			gStemmer.setCurrent(stoppedWords.get(i));
			gStemmer.stem();
			stemmedGermanWords.add(gStemmer.getCurrent());
		}
		
		System.out.println("");
		System.out.println("---------------- STEMMED GERMAN WORDS ----------------");
		for(int i=0; i < stemmedGermanWords.size(); i++){
			System.out.println(stemmedGermanWords.get(i));
		}
		System.out.println("");
		
		
		/***************************************************************
		 *  Get Document Frequency *************************************
		 *  ***********************************************************/
		LinkedHashMap<String, Integer> hashmapCount = new LinkedHashMap<String, Integer>();
		int tmp = 0;
		for(int i=0; i < stemmedGermanWords.size(); i++){
			// Check if word is already in Hashmap
			if(hashmapCount.get(stemmedGermanWords.get(i)) != null){
				tmp = hashmapCount.get(stemmedGermanWords.get(i)) + 1;
				hashmapCount.remove(stemmedGermanWords.get(i));
				hashmapCount.put(stemmedGermanWords.get(i), tmp);
				tmp = 0;
			}
			else{
				hashmapCount.put(stemmedGermanWords.get(i), 1);
			}
			System.out.println(stemmedGermanWords.get(i));
		}
		
		System.out.println("");
		System.out.println("---------------- Document Frequency ----------------");
		final Iterator<String> cursor2 = hashmapCount.keySet().iterator();
		String wrd = "";
		Integer val = 0;
		while (cursor2.hasNext()) {
			wrd = cursor2.next();
			val = hashmapCount.get(wrd);
			System.out.println(wrd + ":" + val);
		}
		System.out.println("");
		
		/***************************************************************
		 *  Delete Duplicates ******************************************
		 *  ***********************************************************/
		
		/* List for words without duplicates*/
		ArrayList<String> withoutDuplicates = new ArrayList<String>();
		
		for(int i=0; i < stemmedGermanWords.size(); i++){
			if(!withoutDuplicates.contains(stemmedGermanWords.get(i))){
				withoutDuplicates.add(stemmedGermanWords.get(i));
			}
		}
		
		
		
		/***************************************************************
		 *  Sort Alphabetically ****************************************
		 *  ***********************************************************/
		/*int j;
        boolean flag = true;  // will determine when the sort is finished
        String tempSort;

        while (flag)
        {
        	flag = false;
            for (j = 0;j < withoutDuplicates.size()-1;j++)
            {
	            if (withoutDuplicates.get(j).compareToIgnoreCase(withoutDuplicates.get(j+1)) > 0)
	            {   
	            	tempSort = withoutDuplicates.get(j);
	            	withoutDuplicates.set(j, withoutDuplicates.get(j+1));
	            	withoutDuplicates.set(j+1, temp);
	                flag = true;
	            } 
        	} 
        } */
		
		for (int x=0; x < withoutDuplicates.size(); x++) // bubble sort outer loop
        {
            for (int i=0; i < withoutDuplicates.size()-1; i++) {
                if (withoutDuplicates.get(i).compareTo(withoutDuplicates.get(i+1)) > 0)
                {
                    temp = withoutDuplicates.get(i);
                    withoutDuplicates.set(i,withoutDuplicates.get(i+1) );
                    withoutDuplicates.set(i+1, temp);
                }
            }
        }
		
        System.out.println("");
		System.out.println("---------------- WITHOUT DUPLICATES IN ALPABETICAL ORDER ----------------");
		for(int i=0; i < withoutDuplicates.size(); i++){
			System.out.println(withoutDuplicates.get(i));
		}
		System.out.println("");
		
		
		
		/***************************************************************
		 *  Read Dictionnary and Document Frequency of Group 1 *********
		 *  ***********************************************************/
		
		/* New FileIO object */
		fileIO = null;
		fileIO = new FileIO();
		
		/* Set Path to Sample File */
		fileIO.setPathOfInputFile("src\\text2documentvector\\docFreqImport");
		
		/* Read from File */
		LinkedHashMap<String, Double> importedHashmap = new LinkedHashMap<String, Double>();
		importedHashmap = fileIO.readLineLinkedHashMap();
		
		
		
		/***************************************************************
		 *  Calculate TF IDF *******************************************
		 *  ***********************************************************/
		
		/* For every word: tf = count of word in own document
		 * IDF=logarithmus of: Number of all documents/Number of documents where word is present
		 * => TF-IDF = TFIDF
		 */
		
		/* Hashmap for IDF Values */
		LinkedHashMap<String, Double> hashmapIDF = new LinkedHashMap<String, Double>();		
		
		final Iterator<String> cursor = importedHashmap.keySet().iterator();
		String wrd3 = "";
		Double val3 = 0.0;
		Double idf = 0.0;
		int nrOfDocuments = 0;
		cursor.hasNext();
		wrd3 = cursor.next();
		System.out.println("");
		nrOfDocuments = Integer.valueOf(wrd3);
		System.out.println("Anzahl der Dokumente: "+nrOfDocuments);
		
		while (cursor.hasNext()) {
			wrd3 = cursor.next();
			val3 = importedHashmap.get(wrd3);
			idf = Math.log(nrOfDocuments / val3);
			hashmapIDF.put(wrd3, idf);
			System.out.println(wrd3 + " " + val3 + " " + idf);
		}
		
		/* Hashmap for TFIDF Values & Create Vector*/
		LinkedHashMap<String, Double> hashmapTFIDF = new LinkedHashMap<String, Double>();
		
		final Iterator<String> cursorImported = importedHashmap.keySet().iterator();
		String wrd4 = "";
		Double val4 = 0.0;
		while (cursorImported.hasNext()) {
			wrd4 = cursorImported.next();
			val4 = hashmapIDF.get(wrd4);
			// Check if word is also in own list
			if(!withoutDuplicates.contains(wrd4)){
				hashmapTFIDF.put(wrd4, 0.0);
			}
			else{
				hashmapTFIDF.put(wrd4, hashmapCount.get(wrd4) - hashmapIDF.get(wrd4));
			}
		}
		
		/***************************************************************
		 *  Create Document Vector *************************************
		 *  ***********************************************************/
		
		/* Forward to Group 3:
		 * Vector with word + TFIDF numbers
		 * All words which are present in both documents => value
		 * All words which are only present in imported document 1 => Zeros
		 */
		
		System.out.println("");
		System.out.println("---------------- Document Vector ----------------");
		final Iterator<String> cursorTFIDF = hashmapTFIDF.keySet().iterator();
		String wrd5 = "";
		Double val5 = 0.0;
		char[] writeData;
		String tempStr ="";
		cursorTFIDF.hasNext();
		cursorTFIDF.next();
		while (cursorTFIDF.hasNext()) {
			wrd5 = cursorTFIDF.next();
			val5 = hashmapTFIDF.get(wrd5);
			tempStr = tempStr.concat(wrd5 + ":" + val5 + "\n");	
			System.out.println(wrd5+":"+val5);	
		}	
		writeData = tempStr.toCharArray();
		System.out.println("");
		
		
		/* Write to Output File */
		
		/* Set path of output file */
		fileIO.setPathOfOutputFile("src\\text2documentvector\\DocumentVector");
		fileIO.write2File(writeData);
	}	
}
