package edu.towson.cosc.cosc455.cannon.compiler;

import edu.towson.cosc.cosc455.interfaces.LexicalAnalyzer;

public class MyLexicalAnalyzer implements LexicalAnalyzer {
	
	//variable initializations
	private char[] lexeme = new char[1000];
	private char nextChar;
	private int lexLength;
	private int position;
	private String token;
	
	//default constructor that sets the position to 0
	public MyLexicalAnalyzer() {
		position = 0;
	}
	
	//gets the first character that isn't white space and adds it
	//if it's a hashtag it processes the tag
	//if it's legal char it processes text
	@Override
	public void getNextToken() {
		if (position >= MyCompiler.document.length()-1) MyCompiler.token = null;
		else {
			lexLength = 0;
			
			//needs to initially get character only if it's at the beginning of the doc
			if (position == 0)
				getCharacter();
		
			getNonBlank();
			addCharacter();
			if (isHashtag(nextChar)) {
				token = processTag();
				if (lookupToken()) {
					MyCompiler.token = token;
				}
				else {
					System.err.println("Lexical error! \"" + token + "\" is not a legal tag.");
					System.exit(0);
				}
			}
			else if (isLegal(nextChar)) {
				token = processText();
				MyCompiler.token = token;
			}
			else {
				System.err.println("Lexical error! \"" + nextChar + "\" is not a legal character.");
				System.exit(0);
			}
		}
	}
	
	//Gets the next char in the document after checking to see if there is a char to get
	//If there isn't a next char it prints an error
	@Override
	public void getCharacter() {
		if (position < MyCompiler.document.length()) {
			nextChar = MyCompiler.document.charAt(position++);
		}
		else {
			System.err.println("Error! MyLexicalAnalyzer is trying to get a character when there are no characters left to get!");
			System.exit(0);
		}
	}

	//adds a char to the list of chars
	@Override
	public void addCharacter() {
		if (isLegal(nextChar)) {
			if (lexLength <= 998)
				lexeme[lexLength++] = nextChar;
			else {
				System.err.println("Error! The lexeme is too long! This compiler only allows for a maximum of 1000 characters in a lexeme.");
				System.exit(0);
			}
		}
		else {
			System.err.println("Lexical error! \"" + nextChar + "\" is not a legal character.");
			System.exit(0);
		}
	}

	@Override
	public boolean isSpace(String c) {
		return isSpace(c.charAt(0));
	}

	//returns whether or not token is a legal tag
	@Override
	public boolean lookupToken() {
		if (	(token.equalsIgnoreCase("#HAI")) || (token.equalsIgnoreCase("#KTHXBYE")) || (token.equalsIgnoreCase("#OBTW")) || 
				(token.equalsIgnoreCase("#TLDR")) || (token.equalsIgnoreCase("#MAEK HEAD")) || (token.equalsIgnoreCase("#OIC")) || 
				(token.equalsIgnoreCase("#GIMMEH TITLE")) || (token.equalsIgnoreCase("#MKAY")) || (token.equalsIgnoreCase("#MAEK PARAGRAF")) || 
				(token.equalsIgnoreCase("#GIMMEH BOLD")) || (token.equalsIgnoreCase("#GIMMEH ITALICS")) || (token.equalsIgnoreCase("#MAEK LIST")) || 
				(token.equalsIgnoreCase("#GIMMEH ITEM")) || (token.equalsIgnoreCase("#GIMMEH NEWLINE")) || (token.equalsIgnoreCase("#GIMMEH SOUNDZ")) || 
				(token.equalsIgnoreCase("#GIMMEH VIDZ")) || (token.equalsIgnoreCase("#I HAS A")) || (token.equalsIgnoreCase("#ITZ")) || 
				(token.equalsIgnoreCase("#VISIBLE")) )
			return true;
		else return false;
	}
	
	//disregard white spaces, acquire chars
	private void getNonBlank() {
		while (isWhiteSpace(nextChar)) getCharacter();
	}
	
	private boolean isSpace(char c) {
		return (c == ' ');
	}
	
	private boolean isWhiteSpace(char c) {
		return (isSpace(c) || c == '\t' || c == '\n');
	}
	
	private boolean isHashtag(char c) {
		return (c == '#');
	}
	
	private boolean isLegal(char c) {
		return ((c != '<') && (c != '>'));
	}
	
	private String processTag() {
		getCharacter();
		while (!isWhiteSpace(nextChar) && position < MyCompiler.document.length()) {
			addCharacter();
			getCharacter();
		}
		//This line is necessary because if it reaches the end of the MyCompiler.document it will not add the last char to the lexeme
		if (position == MyCompiler.document.length() && !isWhiteSpace(nextChar)) addCharacter();
		
		token = new String(lexeme);
		token = token.substring(0, lexLength);
		if (lookupToken()) return token;
		else if(position < MyCompiler.document.length()) {
			addCharacter();
			getCharacter();
			while (!isWhiteSpace(nextChar) && position < MyCompiler.document.length()) {
				addCharacter();
				getCharacter();
			}
			token = new String(lexeme);
			token = token.substring(0, lexLength);
			if (lookupToken()) return token;
			else if(position < MyCompiler.document.length()) {
				addCharacter();
				getCharacter();
				while (!isWhiteSpace(nextChar) && position < MyCompiler.document.length()) {
					addCharacter();
					getCharacter();
				}
				token = new String(lexeme);
				token = token.substring(0, lexLength);
				return token;
			}
		}
		return token;
	}
	
	private String processText() {
		getCharacter();
		while (!isHashtag(nextChar) && position < MyCompiler.document.length()) {
			addCharacter();
			getCharacter();
		}
		if (position == MyCompiler.document.length()) addCharacter();
		token = new String(lexeme);
		return token.substring(0, lexLength-whiteSpaceCounter(token, lexLength-1));
	}
	
	//method to figure out how much white space there is at the end of a token
	//the returned number is subtracted from lexLength to send the token without white space at the end
	private int whiteSpaceCounter(String s, int i) {
		if (!isWhiteSpace(s.charAt(i)))
			return 0;
		else
			return 1 + whiteSpaceCounter(s, i-1);
	}
}
