package hustlas.ball.preparse;

import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.ArrayList;
import java.util.Stack;
import java.util.regex.Matcher;
/**
 * This class is responsible for taking the inputfile and outputting it as a
 * tokenized string. It also takes into account the tokens that are allowed/
 * disallowed. Also whitespace isn't important so this might not work for
 * python style languages 
 * 
 * @author Steven
 *
 */
public class Lexer {
	
	private HashMap<String,Tokens> lexTable;
	private EXTRule[] ruleArray; 
	private EnumMap<Tokens,Boolean> validMap;
	private int maxKeySize;
	
	/** The maximum characters in the buffer will be 80 we will not handle
	 *  Identifiers or anything else over 80 characters because at that point
	 *  it's stupid
	 */
	public static final int MAX_TOKEN_SIZE = 80;
	private static final int TINY_MAX_KEY_SIZE = 5;
	
	public Lexer(HashMap<String,Tokens> lexTable, EXTRule[] ruleArray, EnumMap<Tokens,Boolean> validMap, int maxKeySize) {
		this.lexTable = lexTable;
		this.ruleArray = ruleArray;
		this.validMap = validMap;
		this.maxKeySize = maxKeySize;
	}
	/* TODO: check and see if you could constructor chain this, its big enough
	 * that I'd say you really can't do it. This second constructor is 
	 * pretty much a built in default anyway
	 */
	public Lexer(EnumMap<Tokens,Boolean> validMap) {
		
		EXTRule idRule = new EXTRule("_?[a-zA-Z]\\w*",Tokens.ID);
		EXTRule intnumRule = new EXTRule("0|[1-9]\\d*", Tokens.INTNUM);
		EXTRule[] ruleArray = new EXTRule[2];
		ruleArray[0] = idRule;
		ruleArray[1] = intnumRule;
		
		this.maxKeySize = TINY_MAX_KEY_SIZE;
		this.lexTable = tinyMapGen();
		this.ruleArray = ruleArray;
		this.validMap = validMap;
	}
	public static void toFile(Stack<Tokens> tokens){
	    BufferedWriter out;
        try {
            out = new BufferedWriter(new FileWriter("Lexer.txt"));
            out.write(formatTokenOutput(tokens));
            out.close();
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
	    
	    
	}
	public static String formatTokenOutput(Stack<Tokens> tokens) {
	    String lol = "";
        while(!tokens.isEmpty()){
            lol = tokens.pop() + lol;           
        }
        
		return lol.replaceAll("[\\[|,|\\]]", "");
	}
	
	private HashMap<String,Tokens> tinyMapGen() {
		
		HashMap<String,Tokens> lexTable = new HashMap<String,Tokens>();
		lexTable.put("begin", Tokens.BEGIN);
		lexTable.put("BEGIN", Tokens.BEGIN);
		lexTable.put("end", Tokens.END);
		lexTable.put("END", Tokens.END);
		lexTable.put("print", Tokens.PRINT);
		lexTable.put("PRINT", Tokens.PRINT);
		lexTable.put("(", Tokens.LEFTPAR);
		lexTable.put(")", Tokens.RIGHTPAR);
		lexTable.put(";", Tokens.SEMICOLON);
		// ID is handled by a regex
		lexTable.put(":=", Tokens.ASSIGN);
		lexTable.put("read", Tokens.READ);
		lexTable.put("READ", Tokens.READ);
		lexTable.put(",", Tokens.COMMA);
		// INTNUM is handled by a regex
		lexTable.put("+", Tokens.PLUS);
		lexTable.put("*", Tokens.MULTIPLY);
		lexTable.put("%", Tokens.MODULO);
		return lexTable;
	}
	
	private Tokens getTableToken(String check){
		Tokens token = lexTable.get(check);
		if(validMap.get(token) == null){
			return null;
		}
		else {
			return token;
		}
	}
	
	//TODO make a method that takes in a string and outputs the tokenization
	public Stack<Tokens> tokenize(String inputString) {
		Stack<Tokens> toker = new Stack<Tokens>();
		ArrayList<Tokens> tokenizedString = new ArrayList<Tokens>();
		char[] buffer = new char[MAX_TOKEN_SIZE];
		int buffIndex = -1;
		int regexMatch = 0;
		boolean prevMatch = false;
		boolean intGroup = false;
		Tokens addToken;
		Matcher m;
		inputString = inputString.replaceAll("\\s", "");
		
		//Remember to remove the try-catch when you're not testing
		//try {
			
			for(int i = 0; i < inputString.length(); i++) {
				// first put the character in the buffer
				buffer[++buffIndex] = inputString.charAt(i);
				
				if(buffIndex != 0 && buffer[buffIndex - 1] >= 48 && buffer[buffIndex - 1] <= 57
						&& i != inputString.length() - 1) {
					if(buffer[buffIndex] >= 48 && buffer[buffIndex] <= 57) {
						prevMatch = false;
						intGroup = true;
						continue;
					}
					else if(intGroup){
						prevMatch = false;
						intGroup = false;
						buffer[buffIndex] = 0;
						buffIndex--;
						i--;
					}
				}
				
				regexMatch = 0;
				// first check if a regex is triggered 
				// this allows for a way to find when longer regex things
				// like identifiers and > 1 char keywords safely 
				for(EXTRule r : ruleArray) {
					m = r.getTokenRegex().matcher(new String(buffer).substring(0,buffIndex + 1));
					if(m.matches() && validMap.get(r.getRuleToken()) == true) { regexMatch++; }
				}
				
				if(regexMatch == 0) {
					
					if(prevMatch) {
						// since a regex wasn't triggered but last time it was
						// we should first check if it's a keyword, and then
						// check if it matches any other regex
						// also we'll be using the substring that doesn't include
						// the recently added character for obvious reasons
						addToken = getTableToken(new String(buffer).substring(0,buffIndex));
						if(addToken != null) {
							tokenizedString.add(addToken);
						}
						else {
							for(EXTRule r : ruleArray) {
								m = r.getTokenRegex().matcher(new String(buffer).substring(0,buffIndex));
								if(m.matches() && validMap.get(r.getRuleToken()) == true) { 
									tokenizedString.add(r.getRuleToken()); 
								}
							}
						}
						Arrays.fill(buffer, (char)0);
						buffIndex = -1;
						i--;
						prevMatch = false;
					}
					else{
						addToken = getTableToken(new String(buffer).substring(0, buffIndex + 1));
						if(addToken != null) {
							tokenizedString.add(addToken);
							Arrays.fill(buffer, (char)0);
							buffIndex = -1;
						}
						else {
							//if its the last character or the buffer has the max
							//key size then this can't be tokenized
							if(i == inputString.length() - 1 || buffIndex == maxKeySize - 1) {
								throw new ArrayIndexOutOfBoundsException();
							}
						}

						prevMatch = false;
					}
				}
				else if(regexMatch > 0) {
					if(i == inputString.length() - 1) {
						//last char (do or die)
						addToken = getTableToken(new String(buffer).substring(0, buffIndex + 1));
						if(addToken != null) {
							tokenizedString.add(addToken);
						}
						else {
							for(EXTRule r : ruleArray) {
								m = r.getTokenRegex().matcher(new String(buffer).substring(0, buffIndex + 1));
								if(m.matches() && validMap.get(r.getRuleToken()) == true) { 
									tokenizedString.add(r.getRuleToken()); 
								}
							}
						}
						Arrays.fill(buffer, (char)0);
						buffIndex = -1;
						prevMatch = false;
					}
					else {
						// a regex was triggered so just have prevMatch = true
						prevMatch = true;
					}
				}
				else {
					throw new ArrayIndexOutOfBoundsException(); 
				}
			}
		/*} catch(ArrayIndexOutOfBoundsException e){
			System.err.println("Inputted string could not be tokenized due" +
					" to a buffer overflow, Parse failed");
			System.err.println("Dumping Buffer to console:");
			System.err.println(buffer);
			System.err.println("Dumping partially tokenized string " +
					"to console:");
			System.err.println(tokenizedString);
			System.err.println("Dumping offending string to console:");
			System.err.println(inputString);
			System.exit(1);
		}*/
		if(tokenizedString.size() == 0){
			throw new ArrayIndexOutOfBoundsException(); 
		}
		
		for(int i = tokenizedString.size() - 1; i > -1; i--){
			toker.push(tokenizedString.get(i));
		}
		return toker;
	}
		
		//TODO make a compareto and a tostring
}
