/*
 * UWT May 2014
 */
package labeling;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.HashMap;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.softcorporation.suggester.BasicSuggester;
import com.softcorporation.suggester.Suggestion;
import com.softcorporation.suggester.dictionary.BasicDictionary;
import com.softcorporation.suggester.tools.SpellCheck;
import com.softcorporation.suggester.util.SpellCheckConfiguration;
import com.softcorporation.suggester.util.SuggesterException;

import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.process.CoreLabelTokenFactory;
import edu.stanford.nlp.process.PTBTokenizer;

/**
 * Labels Facebook status updates with sentiment based on the provided lexicon.
 * 
 * @author Matt Adams
 * @version 1.0
 */
public class StatusUpdateLabeler {
	
	private final static Logger logger = LoggerFactory.getLogger(StatusUpdateLabeler.class);
	
	private static final String TOKENIZER_FILE_NAME = "single_status_update.txt";
    /**
     * Tag added to end of original Facebook status update file. Tagged file contains labeled status updates.
     */
    private static final String LABELED_TAG = "_labeled";
    
    /**
     * CSV file extension
     */
    private static final String CSV_EXT = ".csv";
    
    /**
     * Used to build strings that are written to a file.
     */
    private static final StringBuilder SB = new StringBuilder();
    
    private final boolean spellCheckOn;
    /**
     * Map representation of lexicon.
     */
    private Map<String, int[]> lexiconMap;   
    
    /**
     * List of sentiments used in lexicon.
     */
    private String[] lexiconSentiments;
    
    private SpellCheckConfiguration configuration;
    
    private BasicSuggester suggester;
    
    private List<String> tokens;
    
    private final boolean notActive;
    
    private int[] sentimentScores;
    
    
    /**
     * Main to run status update labeler.
     * 
     * @param argFileNames argFileNames of form: <Lexicon file name> <Status Update file name> ... <Status Update file name>
     */
    public static void main(String[] argFileNames) {       
        if (argFileNames.length < 2) {
        	logger.error("Incorrect number of arguments. Correct argument form: <Lexicon file name> <Status Update file name> ... <Status Update file name>");
        	System.exit(1);
        }
        final StatusUpdateLabeler labeler = new StatusUpdateLabeler(argFileNames[0], false, false);
        for (int i = 1; i < argFileNames.length; i++) {
        	labeler.labelUpdatesIntoCSV(argFileNames[i]);
        }

    }
    
    /**
     * Constructs new StatusUpdateLabeler object.
     */
    public StatusUpdateLabeler(final String lexiconFileName, final boolean spellCheckActive, final boolean notActive) {
    	this.lexiconMap = buildLexiconHashMap(lexiconFileName);
    	this.spellCheckOn = spellCheckActive;
    	this.notActive = notActive;
        if (this.spellCheckOn) {
        	setupSpellChecker();
        }
    }
    
    /**
     * Processes a Facebook status update file, and labels each status update with a sentiment
     * score based on a sentiment lexicon.
     * 
     * @param fileName FB Status Update file name (must be format type CSV).
     */
    public void labelUpdatesIntoCSV(final String fileName) {
    	final long startTime = System.nanoTime();
    	try {
    		final BufferedReader bReader = new BufferedReader(new FileReader(fileName + CSV_EXT));
    		final BufferedWriter bWriter = new BufferedWriter(new FileWriter(fileName + LABELED_TAG + CSV_EXT));
    		
    		final int num_lexicon_sentiments = this.lexiconSentiments.length;
    		
    		logger.info("Labeling status updates in file {}", fileName);
    		logger.info("Writing results to file {}", fileName + LABELED_TAG);
    		logger.info("May take a few minutes...");
    		//Write column headings to output file
    		String line = bReader.readLine();
    		final String headings[] = line.split(",");
    		if (headings.length > 0) {
    			SB.append(headings[0]);
    			for (int i = 1; i < headings.length; i++) {
    				SB.append(", " + headings[i]);
    			}
    		}
    		for (int i = 0; i < num_lexicon_sentiments; i++) {
    			SB.append(", " + this.lexiconSentiments[i]);
    		}
    		SB.append("\n");
    		bWriter.write(SB.toString());
    		SB.setLength(0); //Clear StringBuilder
    		
    		//Read in status update and output lexicon sentiment scores to file.
    		int count = 0;
    		while ((line = bReader.readLine()) != null) {
    			final FileWriter tokenWriter = new FileWriter(TOKENIZER_FILE_NAME);
    			this.sentimentScores = new int[num_lexicon_sentiments];
    			final String statusUpdateData[] = line.split(",");
    			//Loop starts at 2 to skip userid and date columns of status update files
    			for (int i = 2; i < statusUpdateData.length; i++) {
    				//final String[] words = statusUpdateData[i].replaceAll("[^a-zA-Z ]", "").toLowerCase().split("\\s+");
    				SB.append(statusUpdateData[i] + " ");        
    			}
    			tokenWriter.write(SB.toString());
    			tokenWriter.close();
    			SB.setLength(0);
    			this.tokens = getTokenList();
    			if (this.notActive) {
    				for (int j = 0; j < this.tokens.size(); j++) {
						final String topSpellCheckSuggestion = getSpellCheckSuggestion(this.tokens.get(j));
						this.tokens.set(j, topSpellCheckSuggestion);
						if (!topSpellCheckSuggestion.equals(this.tokens.get(j))) {
							logger.warn("SPELL CHECK: Changing token {} to {}", this.tokens.get(j), topSpellCheckSuggestion);
							this.tokens.set(j, topSpellCheckSuggestion);
						}
						if (topSpellCheckSuggestion.equals("n't") || topSpellCheckSuggestion.equals("nt")) {
							this.tokens.set(j, "not");
						}
    				}
    				final List<Integer> removeIndexes = new ArrayList<Integer>();
    				for (int j = 0; j < this.tokens.size() - 1; j++) {
    					if (this.tokens.get(j).equals("not") && this.tokens.get(j + 1).equals("not")) {
    						logger.error("REMOVING NOTS");
    						removeIndexes.add(j);
    						removeIndexes.add(j + 1);
    					}
    				}
    				for (int j = 0; j < removeIndexes.size(); j++) {
    					this.tokens.remove(removeIndexes.get(j));
    				}
    				setSentiment();
    			} else {
				for (int j = 0; j < this.tokens.size(); j++) {
					if (this.lexiconMap.containsKey(this.tokens.get(j))) {
						final int[] lexiconSentimentScores = this.lexiconMap.get(this.tokens.get(j));
						for (int k = 0; k < num_lexicon_sentiments; k++) {
							sentimentScores[k] += lexiconSentimentScores[k];
						}
					} else if (this.spellCheckOn) {
						final String topSpellCheckSuggestion = getSpellCheckSuggestion(this.tokens.get(j));
						this.tokens.set(j, topSpellCheckSuggestion);
						if (!topSpellCheckSuggestion.equals(this.tokens.get(j))) {
							logger.warn("SPELL CHECK: Changing token {} to {}", this.tokens.get(j), topSpellCheckSuggestion);
							this.tokens.set(j, topSpellCheckSuggestion);
						}
						if (topSpellCheckSuggestion.equals("n't") || topSpellCheckSuggestion.equals("nt")) {
							this.tokens.set(j, "not");
						}
						if (this.lexiconMap.containsKey(topSpellCheckSuggestion)) {
							final int[] lexiconSentimentScores = this.lexiconMap.get(topSpellCheckSuggestion);
							for (int k = 0; k < num_lexicon_sentiments; k++) {
								sentimentScores[k] += lexiconSentimentScores[k];
							}
						}
					}
				}  
    			}
    			//index 0 contains userid
    			SB.append(statusUpdateData[0].replace("\"", ""));
    			//index 1 contains time of post
    			SB.append(", " + statusUpdateData[1]);
    			//index 2 contains status message
    			SB.append(", " + statusUpdateData[2]);
    			//int targetIndex = 0;
    			//int max = 0;
    			for (int i = 0; i < num_lexicon_sentiments; i++) {
    				SB.append(", " + sentimentScores[i]);
//    				if (sentimentScores[i] > max) {
//    					max = sentimentScores[i];
//    					targetIndex = i + 1;
//    				}
    			}
    			//SB.append(", " + targetIndex + "\n");
    			SB.append("\n");
    			bWriter.write(SB.toString());
    			SB.setLength(0);
    			count++;
    			if (count % 100000 == 0) {
    				logger.info("{} entries processed", count);
    			}
    		
    		}
    		bReader.close();
    		bWriter.close();
    		long estimatedTime = System.nanoTime() - startTime;
    		int seconds = (int) (estimatedTime / 1000000000.0);
    		logger.info("Labeling complete. {} status updates labeled", count);
    		logger.info("Total time: {} minutes {} seconds\n", (seconds / 60), (seconds % 60));
    	} catch (FileNotFoundException e) {
    		logger.error("Status update file not found. File must be of type CSV. Error: {}", e);
    	} catch (IOException e) {
    		logger.error("IO exception during status update labeling. Error: {}", e);
    	}
    }
    
    private void setSentiment() {
    	String prevToken = "";
    	String prevPrevToken = "";
    	for (int j = 0; j < this.tokens.size(); j++) {
    		if (lexiconMap.containsKey(this.tokens.get(j))) {
    			int multiplier = 1;
    			if (prevToken.equals("not") & !prevPrevToken.equals("not")) {
    				multiplier = -1;
    			}
    			final int[] lexiconSentimentScores = this.lexiconMap.get(this.tokens.get(j));
    			for (int k = 0; k < this.lexiconSentiments.length; k++) {
    				sentimentScores[k] += lexiconSentimentScores[k] * multiplier;
    			}
    		}
    		prevPrevToken = prevToken;
    		prevToken = this.tokens.get(j);
    	}
    }
    
    public List<String> getFinalTokens() {
    	return this.tokens;
    }
    
    public int[] getFinalSentimentScores() {
    	return this.sentimentScores;
    }
    
    private String getSpellCheckSuggestion(final String word) {
    	String result = word;
    	try {
    		//Reduces 3 of same letters in a row to 1.
    		final String checkWord = word.replaceAll("(.)\\1{2,}", "$1");
    		//System.out.print("<" + checkWord + ">");
    		SpellCheck spellCheck = new SpellCheck(configuration);
    		spellCheck.setSuggester(suggester);
    		spellCheck.setSuggestionLimit(3);
    		spellCheck.setText(checkWord);
    		spellCheck.check();
    		ArrayList suggestions = spellCheck.getSuggestions();
    		if (suggestions != null && suggestions.size() > 0) {
    			Suggestion suggestWord = (Suggestion) suggestions.get(0);
    			result = suggestWord.word;
    		}
    	} catch (SuggesterException e) {
    		logger.error("Error during spell checking process: {}", e);
    	}
		return result;
    }
    
    private void setupSpellChecker() {
    	try {
    		String dictFileName = "file://References/english.jar";
    		BasicDictionary dictionary = new BasicDictionary(dictFileName);
    		this.configuration = new SpellCheckConfiguration("file://spellCheck.config");

    		this.suggester = new BasicSuggester(this.configuration);
    		this.suggester.attach(dictionary);

    	} catch (Exception e) {
    		logger.error("Error during spell checker setup: {}", e);
    	}
    }
    
    private List<String> getTokenList() {
    	final List<String> tokenList = new ArrayList<String>();
		try {
			@SuppressWarnings({"rawtypes", "unchecked"})
			PTBTokenizer ptbt = new PTBTokenizer(new FileReader(TOKENIZER_FILE_NAME), new CoreLabelTokenFactory(), "untokenizable=noneKeep");
	        while(ptbt.hasNext()) {
	        	//Get next token from tokenizer and set all characters to lowercase
	        	tokenList.add(((CoreLabel) ptbt.next()).word().toLowerCase());
	        }
		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return tokenList;
    }
    
    /**
     * Builds a lexicon hash map that maps words to corresponding sentiment scores.
     * 
     * @param fileName Lexicon file name (must be format type CSV).
     * @return Map of words to corresponding lexicon sentiment scores.
     */
    private Map<String, int[]> buildLexiconHashMap(final String fileName) {
    	final Map<String, int[]> resultLexiconMap = new HashMap<String, int[]>();
        try {
        	final BufferedReader bReader = new BufferedReader(new FileReader(fileName + CSV_EXT));

        	
        	String line = bReader.readLine();
        	final String lexiconHeadings[] = line.split(",");
        	this.lexiconSentiments = new String[lexiconHeadings.length - 1];
        	//Store lexicon sentiment headings. Loop starts at 1 to skip word column of lexicon.
        	for (int i = 1; i < lexiconHeadings.length; i++) {
        		this.lexiconSentiments[i - 1] = lexiconHeadings[i];
        	}
        	//Map word to array of sentiment scores.
        	int count = 0;
        	while ((line = bReader.readLine()) != null) {
        		final int[] sentimentScores = new int[this.lexiconSentiments.length];
        		final String values[] = line.split(",");
        		for (int i = 1; i < values.length; i++) {
        			sentimentScores[i - 1] = Integer.parseInt(values[i]);
        		}
        		resultLexiconMap.put(values[0], sentimentScores);
        		count++;
        	}
        	bReader.close();
        	logger.info("Lexicon hash map generated successfully. {} total entries.", count);
        } catch (FileNotFoundException e) {
        	logger.error("Lexicon file not found. File must be of type CSV. Error: {}", e);
        } catch (IOException e) {
        	logger.error("IO exception while generating lexicon hash map. Error: {}", e);
        }

        return resultLexiconMap;
    }
}
