package br.mikhas.lab.scanner.factory;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;

import br.mikhas.lab.scanner.AcceptCharTransition;
import br.mikhas.lab.scanner.Lexer;
import br.mikhas.lab.scanner.State;
import br.mikhas.util.ExceptionUtils;
import br.mikhas.util.IOUtils;

/**
 * Creates a Lexer from a text definition
 * 
 * @author Mikhail Domanoski
 * 
 */
@SuppressWarnings("unused")
public class LexerFactory {
	/**
	 * Sparates each token in the file
	 */
	private static final char TOKEN_SEPARATOR = ';';

	/**
	 * Single quote encloses the token definition
	 */
	private static final char SINGLE_QUOTE = '\'';

	private static final char DOT = '.';

	private static final char STAR = '*';

	private static final char EXCLAMATION = '!';

	private static final char PIPE = '|';

	/**
	 * Possible whitespaces chars
	 */
	private static final char[] WHITE_SPACE = new char[] { ' ', '\n', '\r' };

	/**
	 * Number of white psace chars
	 */
	private static final int WHITE_SPACE_COUNT = WHITE_SPACE.length;

	public LexerFactory() {
		// TODO Auto-generated constructor stub
	}

	/**
	 * Creates a lexer from the definition file
	 * 
	 * @param file
	 *            File which contains the lexer definition
	 * @return The lexer for the definition
	 */
	public Lexer createLexer(File file) {
		try {
			return createLexer(new FileReader(file));
		} catch (FileNotFoundException e) {
			throw ExceptionUtils.wrapChecked(e);
		}
	}

	/**
	 * Creates a lexer from the stream
	 * 
	 * @param stream
	 *            Strem which contains the lexer definition
	 * @return The lexer for the definition
	 */
	public Lexer createLexer(InputStream stream) {
		return createLexer(new InputStreamReader(stream));
	}

	/**
	 * Creates a lexer from the reader
	 * 
	 * @param reader
	 *            Reader which contains the lexer definition
	 * @return The lexer for the definition
	 */
	public Lexer createLexer(Reader reader) {
		State initialState = new State();

		Lexer lexer = new Lexer(initialState);

		createStateTree(reader, initialState);

		return lexer;
	}

	/**
	 * Creates the lexer state machine tree by reading tokens definitions
	 * 
	 * @param reader
	 *            The reader which contains the definition
	 * @param initialState
	 *            The lexer initial state
	 */
	private void createStateTree(Reader reader, State initialState) {
		// Read all file since it should not be so big
		char[] file = IOUtils.read(reader).toCharArray();
		int start = 0, end = 0, len, fileLen = file.length;

		// Gets each token definition line from the array for memory and
		// performance
		end = nextTokenEnd(file, start);

		while (end > 0 && end < fileLen) {
			len = end - start;

			parseToken(initialState, file, start, len);

			start = end + 1;
			end = nextTokenEnd(file, end + 1);
		}

	}

	/**
	 * Gets the end of the next token definition
	 * 
	 * @param file
	 *            Char array which contains the lexer definition
	 * @param start
	 *            The start index to read the next token definition
	 * @return The end index of the next token definition
	 */
	private int nextTokenEnd(char[] file, int start) {
		char c;
		boolean onQuotes = false;
		int len = file.length - 1;

		if (start >= len)
			return -1;

		while ((c = file[++start]) != TOKEN_SEPARATOR || onQuotes) {
			if (c == SINGLE_QUOTE)
				onQuotes = !onQuotes;

			if (start >= len)
				return -1;
		}

		return start;
	}

	/**
	 * Parses a token definition and adds it to the tree
	 * 
	 * @param initialState
	 *            The tree initial state
	 * @param file
	 *            The file char array to be analysed
	 * @param start
	 *            The start index of the current token
	 * @param len
	 *            The length of the token definition in the char array
	 */
	private void parseToken(State initialState, char[] def, int start, int len) {
		// Fix token length trimming it
		while (isWhiteSpace(def[start])) {
			start++;
			len--;
		}

		String tokenName = getTokenName(def, start, len);

		int nameLength = tokenName.length();

		// Trim it again
		start += nameLength;
		len -= nameLength;
		while (isWhiteSpace(def[start])) {
			start++;
			len--;
		}

		createTokenTree(initialState, tokenName, def, start, len);
	}

	/**
	 * Creates the token tree from the initial state
	 * 
	 * @param initialState
	 *            The token initial state
	 * @param def
	 *            The char array with the token definition
	 * @param start
	 *            The start index of the toekn definition
	 * @param len
	 *            The length of the token definition
	 * @return
	 */
	private void createTokenTree(State initialState, String tokenType,
			char[] def, int start, int len) {
		State state = null;
		if (isSingleChar(def, start, len) || isSequence(def, start, len)) {
			createSequenceTree(initialState, def, start, len).setTokenType(
					tokenType);
		}
	}

	private boolean isSingleChar(char[] def, int start, int len) {
		return def[start + 0] == SINGLE_QUOTE && len > 2
				&& def[start + 2] == SINGLE_QUOTE && def[start + 3] != DOT;
	}

	private State createSequenceTree(State initialState, char[] def, int start,
			int len) {
		start++;
		int end = indexOf(def, SINGLE_QUOTE, start, len) - 1;

		if (end < 0)
			throw new SyntaxException("Invalid char sequence syntax: "
					+ new String(def, start - 1, len));
		State state, prevState = initialState;

		for (int i = start; i <= end; i++) {
			char c = def[i];

			state = prevState.nextState(c);

			if (state == null) {
				state = new State();
				prevState.addTransition(new AcceptCharTransition(state, c));
			} else {
			}

			prevState = state;
		}

		return prevState;

	}

	/**
	 * Checks if the next token definition is a sequence
	 * 
	 * @param def
	 *            The char array
	 * @param start
	 *            The start index
	 * @param len
	 *            The length
	 * @return <code>true</code> if the next token is a sequence of chars
	 */
	private boolean isSequence(char[] def, int start, int len) {
		return def[start + 0] == SINGLE_QUOTE && len > 2
				&& def[start + 2] != SINGLE_QUOTE;
	}

	/**
	 * Gets the token name from the definition
	 * 
	 * @param file
	 *            The Char array with the token definition
	 * @param start
	 *            The start index of the token definition in the array
	 * @param len
	 *            The length of the token definition in the char array
	 * @return the token's name
	 */
	private String getTokenName(char[] file, int start, int len) {
		int end = start + 1;

		while (!isWhiteSpace(file[end++]))
			;

		return new String(file, start, end - start);
	}

	/**
	 * Checks if the providen char is a white space char
	 * 
	 * @param c
	 *            The char to check
	 * @return <tt>true</tt> if the <code>char</code> is a whitespace
	 */
	private static boolean isWhiteSpace(char c) {
		int i = 0;
		while (i < WHITE_SPACE_COUNT) {
			if (c == WHITE_SPACE[i])
				return true;
			i++;
		}

		return false;
	}

	private static int indexOf(char[] cs, char c, int start, int len) {
		for (int i = start; i < start + len; i++)
			if (cs[i] == c)
				return i;
		return -1;
	}
}
