package com.seo.textgen.markov;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Map.Entry;
import java.util.regex.Pattern;

import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.mutable.MutableInt;

import com.seo.textgen.TextGenerator;
import com.seo.textgen.tokenizer.Token;
import com.seo.textgen.tokenizer.TokenType;
import com.seo.textgen.tokenizer.Tokenizer;
import com.seo.textgen.tokenizer.TokenizerCallback;

public class MarkovTextGenerator implements TextGenerator, TokenizerCallback {

	private static final int CHAIN_LENGTH = 3;

	private static final Pattern UPPERCASE_PATTERN = Pattern.compile("\\p{Lu}+");

	private MarkovModel model;

	private Collection<String> trainingTexts = new ArrayList<String>();

	private Tokenizer tokenizer = new Tokenizer();

	private Map<Chain, MutableInt> subChainOccurences;
	private Map<Chain, MutableInt> chainOccurences;
	private Map<String, CaseStats> caseStats;

	public MarkovTextGenerator(MarkovModel model) {
		this.model = model;
	}

	public void addTrainingText(String text) {
		trainingTexts.add(text);
	}

	public void train() throws IOException {
		subChainOccurences = new HashMap<Chain, MutableInt>();
		chainOccurences = new HashMap<Chain, MutableInt>();
		caseStats = new HashMap<String, CaseStats>();

		for (String text : trainingTexts) {
			trainInternal(text);
		}
		createModel();
	}

	private void trainInternal(String text) throws IOException {
		tokenizer.tokenize(text, this);
	}

	public void processSentence(Token[] sentence) {
		if (sentence[0].getTokenType() != TokenType.WORD) {
			System.out.println(Arrays.toString(sentence));
		}
		
		int size = sentence.length;
		for (int i = 0; i < sentence.length; i++) {
			calcCaseStats(sentence[i], i == 0);
		}

		for (int i = -CHAIN_LENGTH + 1; i < size - CHAIN_LENGTH + 1; i++) {
			Chain chain = new Chain(sentence, i, CHAIN_LENGTH);
			Chain subChain = new Chain(sentence, i, CHAIN_LENGTH - 1);

			incOccurence(subChainOccurences, subChain);
			incOccurence(chainOccurences, chain);
		}

	}

	private void calcCaseStats(Token token, boolean startOfSentence) {
		if (token.getTokenType() != TokenType.WORD) {
			return;
		}
		CaseStats caseStat = caseStats.get(token.getLower());
		if (caseStat == null) {
			caseStat = new CaseStats();
			caseStats.put(token.getLower(), caseStat);
		}

		if (UPPERCASE_PATTERN.matcher(token.getToken()).matches()) {
			caseStat.incUpper();
			return;
		}

		if (!startOfSentence) {
			if (UPPERCASE_PATTERN.matcher(token.getToken().subSequence(0, 1)).matches()) {
				caseStat.incCapitalized();
			} else {
				caseStat.incLower();
			}
		}
	}

	private void createModel() {
		for (Entry<Chain, MutableInt> entry : chainOccurences.entrySet()) {
			Chain subChain = entry.getKey().getSubChain();
			int subChainOccurances = subChainOccurences.get(subChain).intValue();
			float probability = entry.getValue().floatValue() / subChainOccurances;
			Token token = entry.getKey().getLastToken();
			token.setProbability(probability);
			token.setTokenInProperCase(makeProperCase(token));
			model.addChain(subChain, token);
		}
	}

	private String makeProperCase(Token token) {
		if (token.getTokenType() != TokenType.WORD) {
			return token.getToken();
		}
		CaseStats caseStat = caseStats.get(token.getLower());
		if (caseStat.getLower() >= caseStat.getUpper() && caseStat.getLower() >= caseStat.getCapitalized()) {
			return token.getToken();
		}
		if (caseStat.getCapitalized() >= caseStat.getUpper()) {
			return StringUtils.capitalize(token.getToken());
		}
		return token.getToken().toUpperCase();
	}

	private void incOccurence(Map<Chain, MutableInt> occurences, Chain chain) {
		MutableInt current = occurences.get(chain);
		if (current == null) {
			current = new MutableInt(1);
			occurences.put(chain, current);
		} else {
			current.increment();
		}
	}

	public String generateSentence() {
		LinkedList<Token> chain = new LinkedList<Token>();

		for (int i = 0; i < CHAIN_LENGTH - 1; i++) {
			chain.add(Token.EMPTY_TOKEN);
		}

		StringBuilder sentence = new StringBuilder();
		Token prevToken = null;
		for (;;) {
			Token token = model.getNextToken(new Chain(chain.toArray(new Token[chain.size()]), 0, chain.size()));
			if (token == null) {
				break;
			}

			if (isAppendSpace(prevToken, token)) {
				sentence.append(" ");
			}

			if (sentence.length() != 0) {
				sentence.append(token.getTokenInProperCase());
			} else {
				sentence.append(StringUtils.capitalize(token.getTokenInProperCase()));
			}

			if (token.getTokenType() == TokenType.END_OF_SENTENCE) {
				break;
			}

			chain.removeFirst();
			chain.add(token);

			prevToken = token;
		}
		return sentence.toString();
	}

	private boolean isAppendSpace(Token prevToken, Token token) {
		if (prevToken == null) {
			return false;
		}
		
		if (token.getTokenType() == TokenType.WORD) {
			return true;
		}
		
		if (token.getTokenType() == TokenType.END_OF_SENTENCE) {
			return false;
		}
		
		if (prevToken.getTokenType() != TokenType.PUNKT && token.getTokenType() == TokenType.PUNKT && isDoubleSpacePunkt(token.getToken())) {
			return true;
		}
		
		return false;
	}

	private boolean isDoubleSpacePunkt(String token) {
		return "-".equals(token) || "—".equals(token);
	}
}
