package com.unidue.foguing.helper;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;

import org.apache.commons.io.FileUtils;
import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
import org.apache.uima.fit.component.JCasAnnotator_ImplBase;
import org.apache.uima.fit.util.JCasUtil;
import org.apache.uima.jcas.JCas;

import com.unidue.foguing.types.ActualSentiment;
import com.unidue.foguing.types.EvaluatedSentiment;

public abstract class BaseLineBase  extends JCasAnnotator_ImplBase{
	
	public String processedTweet = null;
	
	@Override
	public void process(JCas aJCas) throws AnalysisEngineProcessException {
		
		ActualSentiment actualSentiment = JCasUtil.selectSingle(aJCas, ActualSentiment.class);
		int partsOfDocument = actualSentiment.getNumberOfParts();
		
		if(partsOfDocument == 4){
			String document = aJCas.getDocumentText();
			System.out.println("\nThe tweet is : " + document);
		    processedTweet = processTweet(document.toLowerCase() , partsOfDocument);
			processedTweet = removeDuplicates(processedTweet);
			System.out.println("the preprocessed tweet is now : " + processedTweet);
			String sentiment = getSentimentPolarity(processedTweet);
			EvaluatedSentiment evaluatedSentiment = new EvaluatedSentiment(aJCas);
			evaluatedSentiment.setEvaluatedSentiment(sentiment);
			evaluatedSentiment.addToIndexes();
			
		}else if(partsOfDocument == 6){
			String document = aJCas.getDocumentText();
			ActualSentiment sentimentFeature = JCasUtil.selectSingle(aJCas, ActualSentiment.class);
			String subTweet = sentimentFeature.getSubTweet();
			int startWord = sentimentFeature.getStartWordPosition();
			int endWord = sentimentFeature.getEndWordPosition();
			
			System.out.println("\nThe tweet is : " + document);
			System.out.println("The startWord position is : " + startWord + " and the endWord position is : " + endWord);
			System.out.println("The part of this tweet to be  analysed is : " + subTweet);
			String processedSubTweet = processTweet(subTweet.toLowerCase() , partsOfDocument);
			System.out.println("the preprocessed tweet is now : " + processedSubTweet);
			String sentiment = getSentimentPolarity(processedSubTweet);
			EvaluatedSentiment evaluatedSentiment = new EvaluatedSentiment(aJCas);
			evaluatedSentiment.setEvaluatedSentiment(sentiment);
			evaluatedSentiment.addToIndexes();
		}else{
			throw new AnalysisEngineProcessException();
		}

	}
	
	/**
	 * this method just preprocess any subTweet by removing url , hash , and words beginning with the 
	 * "@" symbol since those don't give us any information about the context.
	 * @param tweet
	 * @return SubTweet without url , without hash at the begining of the word , without words begining 
	 * with the "@" symbol
	 */
	protected String processTweet(String tweet , int partsOfDocument) {

		String [] array = tweet.split(" ");
		StringBuilder builder = new StringBuilder();
		for(String str : array){
			if(str.length() >= 1 && ((Character.toString(str.charAt(0)).equals("#")) || (Character.toString(str.charAt(0)).equals("@"))) ){
				str = str.substring(1, str.length());
			}
			if(str.length() == 1 && ((Character.toString(str.charAt(0)).equals("@")) || (Character.toString(str.charAt(0)).equals("#"))) ){
				str = "";
			}
			
			/*
			 * some hack to make sure both Task A and Task B are
			 *  preprocessed trough this unique baseLine
			 */
			if((str.matches(".*http.*")) && partsOfDocument == 6){ 
				builder.append(str + " ");
			}else if(!(str.matches(".*http.*"))){
				builder.append(str + " ");
			}
		}
		return removeDuplicates(builder.toString());
	}
	
	 /**
     * remove all duplicates at the end of words
     * for example : upppppppppppppppppppp -> upp
     * @param string
     * @return tweets without duplicates at the end of the words
     */
	protected String removeDuplicates(String string) {
		final String pattern = "(.)\\1{2,}(\\b)";
		StringBuilder builder = new StringBuilder();
		String [] array = string.split(" ");
		for(String str : array){
			str = str.replaceAll(pattern, "$1$1$2");
			builder.append(str + " ");
		}
		return builder.toString();
	}
	
	/**
	 * read a text file that contains positive words and put those in a hashset
	 * @return a set of positive words
	 */
	protected HashSet<String> getSetOfPositiveWords() throws IOException{
		HashSet<String> set = new HashSet<String>();
		String positiveWordsList = "src/test/resources/test/positive-words.txt";
			File file = new File(positiveWordsList);
			List<String> lines = FileUtils.readLines(file);
			if(lines == null || lines.isEmpty()){
				throw new FileNotFoundException("fail to retrieve the list of positive words. check first wether the list exist or not!!!");
			}
			for(String line : lines ){
				set.add(line.trim());
			}
		return set;
	}
	
	/**
	 * retrieve the list of negative words and pack in a set
	 * @return the negative word-list
	 */
	protected HashSet<String> getSetOfNegativeWords() throws IOException{
		HashSet<String> set = new HashSet<String>();
		String negativeWordsList = "src/test/resources/test/negative-words.txt";
			File file = new File(negativeWordsList);
			List<String> lines = FileUtils.readLines(file);
			if(lines == null || lines.isEmpty()){
				throw new FileNotFoundException("fail to retrieve the list of negative words. check first wether the list exist or not!!!");
			}
			for(String line : lines ){
				set.add(line.trim());
			}
		return set;
	}
	
	/**
	 * make the difference between the number of positive words and the number of 
	 * negative words. from the result set the sentiment polarity of the tweet.
	 * @param processedTweet
	 * @return the sentiment of the tweet
	 */
	public abstract String getSentimentPolarity(String document);

}
