import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang.StringUtils;

public class Trainer {

	
	
	static HashMap<String, ClassDetails> classInfoMap = new HashMap<String, ClassDetails>();
	static HashMap<String, Integer> classTermFreq = new HashMap<String, Integer>();
	static ArrayList<String> vocabulary = new ArrayList<String>();
	static StopWordRemoveResult swr;
	static ClassDetails cd = null;
	static String currentClass;
	
	//this method is from the link http://www.kodejava.org/examples/89.html
	private static int wordcount(String line){
		  int numWords = 0;
		  int index = 0;
		  boolean prevWhiteSpace = true;
		  while(index < line.length()){
		  char c = line.charAt(index++);
		  boolean currWhiteSpace = Character.isWhitespace(c);
		  if(prevWhiteSpace && !currWhiteSpace){
		  numWords++;
		  }
		  prevWhiteSpace = currWhiteSpace;
		  }
		  return numWords;
		  }
	
	private static void computeTermFreq()
	{
		String body;
		StringBuffer textOfDocsInClass = null;
		int vocabSize = 0, numOfTermsInClass = 0, currentTermFreq;
		HashMap<String, Double> conditionalProbs = new HashMap<String, Double>();
		
		
		Set<Map.Entry<String, ClassDetails>> vocabSet = classInfoMap.entrySet();

		//iterate over the entire hashmap containing all details of all classes to compute term frequencies
		for (Map.Entry<String, ClassDetails> vocMe : vocabSet) {
			cd = new ClassDetails();
			cd = vocMe.getValue();
			currentClass = vocMe.getKey();
			textOfDocsInClass = new StringBuffer();// buffer to contain concatenation of text of all docs in a class

			if (cd != null) {

				ArrayList<DocumentEntity> dcList = new ArrayList<DocumentEntity>();
				if (cd.getDocList() != null) {
					dcList = cd.getDocList();
					// iterate through the documents list belonging to a
					// particular class
					Iterator<DocumentEntity> docItr = dcList.iterator();

					while (docItr.hasNext()) {
						DocumentEntity nextElement = docItr.next();
						body = nextElement.getBody();

						if (body != null){
							//append the document body of all docs in that class to form one entity
							textOfDocsInClass.append(body);
						}
						
						
					}
//					System.out.println("Text of all docs in class "+currentClass+" :"+textOfDocsInClass.toString());
					
					// update the field for that class
					cd.setTextOfAllDocsInClass(textOfDocsInClass.toString());
					
					// upload the vocabulary into the class's hashmap
					Iterator<String> vocItr = vocabulary.iterator();
					classTermFreq = cd.getClassTermFreq();
					
					if (classTermFreq == null)
						classTermFreq = new HashMap<String, Integer>();
					
					// for each term in vocab, find its frequency in the class
					while (vocItr.hasNext())
					{
						String term = vocItr.next();
						Integer termCount = StringUtils.countMatches(textOfDocsInClass.toString(), term);
						
						classTermFreq.put(term, termCount);
					}
					//update the field for that class
					cd.setClassTermFreq(classTermFreq);
					
					//number of terms in vocab
					vocabSize = vocabulary.size();
					System.out.println("VocabSize:"+vocabSize);
					
					//number of terms in this class
					numOfTermsInClass = wordcount(textOfDocsInClass.toString());
					System.out.println("No of terms in class "+currentClass+" :"+numOfTermsInClass);
					
					Iterator<String> itrForCondProb = vocabulary.iterator();
					conditionalProbs = cd.getConditionalProbs();
					
					if (conditionalProbs == null)
						conditionalProbs = new HashMap<String, Double>();
					
					// for each term in vocab, compute the conditional probability
					while (itrForCondProb.hasNext())
					{
						String currentTerm = itrForCondProb.next();
						// get the term freq of this term in the class
						currentTermFreq = classTermFreq.get(currentTerm);
						
						//compute the conditional prob of this term given the class
						double currentCondProb = ((double)(currentTermFreq + 1))/(numOfTermsInClass + vocabSize);
						
						//insert the value into the hashmap
						conditionalProbs.put(currentTerm, currentCondProb);
						
					}
					System.out.println("Conditional probs for class "+currentClass+" : "+conditionalProbs.toString());
					//update the field for that class
					cd.setConditionalProbs(conditionalProbs);
				}
			}
			
		}

	}
	
	

	public static TrainerResult preprocessTrainingSet(String trainingSet, String stopWordFile,String classNamesSource){
		String body, modBody, className;
		StringBuffer textOfDocsInClass = null;
		int docsInAClassCount = 0, trainingSetSize = 0;
		double priorProb;

		// initialize the names of classes as keys into the hashmap classInfoMap
		// from the text files having the classnames
		FileReader classNamesFile = null;

		try {
			classNamesFile = new FileReader(classNamesSource);

			BufferedReader br_cnFile = new BufferedReader(classNamesFile);

			while ((className = br_cnFile.readLine()) != null) {
				classInfoMap.put(className, null);
			}

			// parse the set of training articles to fetch the required field
			// values viz. id, topic, place etc.
			Parser parser = new Parser();
			//parser.populateClassDocMap(classNamesSource);// populate the hashmap
															// to assign parsed
															// docs to the
															// appropriate class

			

			ParsedResult result = parser.parseXmlFile(trainingSet,classInfoMap);
			classInfoMap = result.getClassInfoMap();
			trainingSetSize = result.getTrainingSetSize();
			System.out.println("Training size in handler: "+trainingSetSize);
			

			// remove stopwords from the bodies of the articles
		//	stopWordFile = args[1];

			StopwordRemover stopwordRemover = new StopwordRemover();
			Set<Map.Entry<String, ClassDetails>> set = classInfoMap.entrySet();
			
			//iterate over the entire hashmap containing all details of all classes
			for (Map.Entry<String, ClassDetails> me : set) {
				cd = new ClassDetails();
				cd = me.getValue();
				currentClass = me.getKey();
				System.out.println("=============================");
				System.out.println("Current class:" + currentClass);

				if (cd != null) {

					ArrayList<DocumentEntity> dcList = new ArrayList<DocumentEntity>();
					if (cd.getDocList() != null) {
						dcList = cd.getDocList();
						
						// get the count of the number of documents belonging to
						// a class
						docsInAClassCount = dcList.size();
						System.out.println("docsInAClassCount for class "
								+ currentClass + " is:" + docsInAClassCount);
						
						// compute the prior probability of the class
						priorProb = ((double)docsInAClassCount)/trainingSetSize;
						cd.setPriorProbOfClass(priorProb);
						System.out.println("Prior prob of class "+currentClass+": "+priorProb);

						// iterate through the documents list belonging to a
						// particular class to remove the stopwords
						Iterator<DocumentEntity> docItr = dcList.iterator();

						while (docItr.hasNext()) {

							DocumentEntity nextElement = docItr.next();
							body = nextElement.getBody();

							if (body != null){
								//pass the vocabulary to the method to build it up
								swr = stopwordRemover.remove(stopWordFile, body, vocabulary);
								modBody = swr.getModifiedBody();
								//replace the body of the article with the modified body (stopwords removed)
								nextElement.setBody(modBody);
								vocabulary = swr.getVocabulary();
								
							}
							
							
						}
					}
				}
					
			}// complete vocab is formed at the end of this for-loop
			
			System.out.println("Complete vocab: "+vocabulary.toString());
			
			// count the frequency of terms in a class
			computeTermFreq();
			
		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
		return new TrainerResult(classInfoMap,vocabulary);

	}

}
