superstrict

import BRL.LinkedList

import "../charutil/charutil.bmx"
import "SourceFile.bmx"
import "Token.bmx"
import "CharDefs.bmx"
import "Error.bmx"
'import "Symbol.bmx"
'import "SymbolTable.bmx"

type DBLexer

	'// -------------------------------------------------------------------------------------------------------------
	'// Lexer states.
	'// -------------------------------------------------------------------------------------------------------------
	global STATE_START:short = 1; '// Start looking for characters.
	global STATE_INT_LITERAL:short = 2;
	global STATE_REAL_LITERAL:short = 3;
	global STATE_CHAR_LITERAL:short = 7;
	global STATE_STR_LITERAL:short = 8;
	global STATE_IDENT:short = 9; '// Assume it's an identifier.
	
	'// -------------------------------------------------------------------------------------------------------------
	'// Create, setup and return a new parser.
	'// -------------------------------------------------------------------------------------------------------------
	function create:DBLexer(source:SourceFile)
	
		if (not source) ..
			return null;
		
		local l:DBLexer = new DBLexer;
		
		l.m_source = source;
		l.initKeywords();
		
		return l;
	
	end function
	
	'// -------------------------------------------------------------------------------------------------------------
	'// Perform lexical analysis on the translation unit associated with this lexer, and create a token stream for
	'// the parser to use.
	'// -------------------------------------------------------------------------------------------------------------
	method analyse(errors:TList)
	
		m_errorList = errors;
		
		local curLine:SourceLine;
		local nextToken:Token;
		
		'// Loop through the translation unit, one line at a time, to perform lexical analysis on each line.
		for curLine = eachin m_source.m_lines
		
			nextToken = null;
			
			m_pos = -1;
			
			repeat
			
				nextToken = lexNextToken(curLine);
				
				if (nextToken) ..
					m_lastToken = nextToken;
				
				if (nextToken.m_type <> Token.TYPE_NONE) ..
					m_tokenStream.addLast(nextToken);
			
			until (nextToken.m_type = Token.TYPE_NONE)
		
		next
		
		'// Add the end-of-file token.
		m_tokenStream.addLast(Token.create(curLine, Token.TYPE_EOF, "<eof>"));
		
		'// Report an error if there are any bracket mismatches.
		reportBracketMismatches(curLine);
	
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// Report an error if there are any bracket mismatches.
	'// -------------------------------------------------------------------------------------------------------------
	method reportBracketMismatches(curLine:SourceLine)
	
		if (m_braceDepth > 0) then
		
			m_errorList.addLast(Error.createError( ..
				Error.UNTERMINATED_BRACKET, ..
				curLine, ..
				"{"));
		
		else if (m_braceDepth < 0) then
		
			m_errorList.addLast(Error.createError( ..
				Error.OPENING_BRACKET_MISSING, ..
				curLine, ..
				"{}"));
		
		end if
		if (m_squareDepth > 0) then
		
			m_errorList.addLast(Error.createError( ..
				Error.UNTERMINATED_BRACKET, ..
				curLine, ..
				"["));
		
		else if (m_squareDepth < 0) then
		
			m_errorList.addLast(Error.createError( ..
				Error.OPENING_BRACKET_MISSING, ..
				curLine, ..
				"[]"));
		
		end if
		if (m_parenDepth > 0) then
		
			m_errorList.addLast(Error.createError( ..
				Error.UNTERMINATED_BRACKET, ..
				curLine, ..
				"("));
		
		else if (m_parenDepth < 0) then
		
			m_errorList.addLast(Error.createError( ..
				Error.OPENING_BRACKET_MISSING, ..
				curLine, ..
				"()"));
		
		end if
	
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// Find the next token in the translation unit, and return that token.
	'// -------------------------------------------------------------------------------------------------------------
	method lexNextToken:Token(curLine:SourceLine)
	
		local nextToken:Token = Token.create(curLine, Token.TYPE_NONE, "");
		local lineSize:int = curLine.m_data.length;
		
		m_state = STATE_START;
		
		while (true)
		
			'// Move along.
			m_pos :+ 1;
			
			if (m_pos >= lineSize) ..
				exit;
			
			'// Peek at the next four characters.
			lexPeek(curLine);
			
			'// Check the state of the lexical anaylsis, and perform the appropriate action(s).
			select (m_state)
			
				case STATE_START
					select (lex_state_start(curLine, nextToken))
					
						case 0
						case 1
							continue;
						case 2
							exit;
					
					end select
				case STATE_INT_LITERAL
					if (lex_state_int_literal(curLine, nextToken)) ..
						exit;
				case STATE_REAL_LITERAL
					if (lex_state_real_literal(curLine, nextToken)) ..
						exit;
				case STATE_CHAR_LITERAL
					if (lex_state_char_literal(curLine, nextToken)) ..
						exit;
				case STATE_STR_LITERAL
					if (lex_state_str_literal(curLine, nextToken)) ..
						exit;
				case STATE_IDENT
					if (lex_state_ident(curLine, nextToken)) ..
						exit;
				default
					if (not charutil.isWhitespace(m_peek[0])) then
					
						m_errorList.addLast(Error.createError( ..
							Error.INVALID_TOKEN, ..
							nextToken.m_line, ..
							nextToken.m_lexeme));
					
					end if
			
			end select
		
		end while
		
		return nextToken;
	
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// State: Start forming a token.
	'// -------------------------------------------------------------------------------------------------------------
	method lex_state_start:byte(curLine:SourceLine, nextToken:Token)
	
		'// Numerical literals.
		if (charutil.isDigit(m_peek[0])) then
		
			if (m_peek[0] = CHAR_ZERO) then
			
				if (m_peek[1] = CHAR_DOT) then
				
					if (charutil.isDigit(m_peek[2])) then
					
						nextToken.m_type = Token.TYPE_REAL;
						nextToken.poke(m_peek[0]);
						nextToken.poke(m_peek[1]);
						m_state = STATE_REAL_LITERAL;
						m_pos :+ 1;
						return 1;
					
					else
					
						nextToken.poke(m_peek[0]);
						nextToken.poke(m_peek[1]);
						m_errorList.addLast(Error.createError( ..
							Error.INVALID_TOKEN, ..
							nextToken.m_line, ..
							nextToken.m_lexeme));
						m_pos :+ 1;
						return 0;
					
					end if
				
				else if (charutil.isDigit(m_peek[1])) then
				
					m_errorList.addLast(Error.createError( ..
						Error.INVALID_TOKEN, ..
						curLine, ..
						chr(m_peek[0])));
					return 0;
				
				else '// In case it's a single 0.
				
					nextToken.m_type = Token.TYPE_INTEGER;
					nextToken.poke(m_peek[0]);
					return 2;
				
				end if
			
			else
			
				nextToken.m_type = Token.TYPE_INTEGER;
				m_state = STATE_INT_LITERAL;
				nextToken.poke(m_peek[0]);
				return 1;
			
			end if
		
		end if
		
		'// Character literal.
		if (m_peek[0] = CHAR_APOS) then
		
			nextToken.is_literal = true;
			nextToken.m_type = Token.TYPE_CHAR;
			m_state = STATE_CHAR_LITERAL;
			return 1;
		
		end if
		
		'// String literal.
		if (m_peek[0] = CHAR_QUOTE) then
		
			nextToken.is_literal = true;
			nextToken.m_type = Token.TYPE_STRING;
			m_state = STATE_STR_LITERAL;
			return 1;
		
		end if
		
		'// Identifier.
		if (charutil.isLetter(m_peek[0]) or m_peek[0] = CHAR_UNDERSCR) then
		
			nextToken.is_identifier = true;
			nextToken.m_type = Token.TYPE_IDENT;
			m_state = STATE_IDENT;
			lex_state_ident(curLine, nextToken);
			return 1;
		
		end if
		
		'// Punctuator.
		if (isPunctuatorChar(m_peek[0])) then
		
			nextToken.is_punctuator = true;
			
			select (m_peek[0])
			
				case CHAR_PLUS
					nextToken.m_type = Token.TYPE_PLUS;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_MINUS
					nextToken.m_type = Token.TYPE_MINUS;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_ASTERISK
					nextToken.m_type = Token.TYPE_ASTERISK;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_SLASH
					nextToken.m_type = Token.TYPE_DIVIDE;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_PERCENT
					nextToken.m_type = Token.TYPE_PERCENT;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_CARET
					nextToken.m_type = Token.TYPE_CARET;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_LESS
					nextToken.m_type = Token.TYPE_OPEN_ANGEL;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_GREATER
					nextToken.m_type = Token.TYPE_CLOSE_ANGEL;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_EQUAL
					nextToken.m_type = Token.TYPE_ASSIGN;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_AMPERSAND
					nextToken.m_type = Token.TYPE_AMPERSAND;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_VBAR
					nextToken.m_type = Token.TYPE_VBAR;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_DOT
					nextToken.m_type = Token.TYPE_DOT;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_QUESTION
					nextToken.m_type = Token.TYPE_QUESTION;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_COMMA
					nextToken.m_type = Token.TYPE_COMMA;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_COLON
					nextToken.m_type = Token.TYPE_COLON;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_SEMICOLON
					nextToken.m_type = Token.TYPE_SEMICOLON;
					nextToken.poke(m_peek[0]);
					return 2;
				case CHAR_OPEN_BRACE
					nextToken.m_type = Token.TYPE_OPEN_BRACE;
					nextToken.poke(m_peek[0]);
					m_braceDepth :+ 1;
					return 2;
				case CHAR_CLOSE_BRACE
					nextToken.m_type = Token.TYPE_CLOSE_BRACE;
					nextToken.poke(m_peek[0]);
					m_braceDepth :- 1;
					return 2;
				case CHAR_OPEN_SQUARE
					nextToken.m_type = Token.TYPE_OPEN_SQUARE;
					nextToken.poke(m_peek[0]);
					m_squareDepth :+ 1;
					return 2;
				case CHAR_CLOSE_SQUARE
					nextToken.m_type = Token.TYPE_CLOSE_SQUARE;
					nextToken.poke(m_peek[0]);
					m_squareDepth :- 1;
					return 2;
				case CHAR_OPEN_PAREN
					nextToken.m_type = Token.TYPE_OPEN_PAREN;
					nextToken.poke(m_peek[0]);
					m_parenDepth :+ 1;
					return 2;
				case CHAR_CLOSE_PAREN
					nextToken.m_type = Token.TYPE_CLOSE_PAREN;
					nextToken.poke(m_peek[0]);
					m_parenDepth :- 1;
					return 2;
			
			end select
		
		end if
	
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// Get the next four characters from the current position. We always look ahead three additional characters
	'// (even when there aren't any characters left on the current line).
	'// -------------------------------------------------------------------------------------------------------------
	method lexPeek(curLine:SourceLine)
	
		for local i:int = m_pos until m_pos+4
		
			try
			
				m_peek[i - m_pos] = curLine.m_data[i];
			
			catch e:TArrayBoundsException
			
				m_peek[i - m_pos] = 0;
			
			end try
		
		next
	
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// State: Integer literal.
	'// -------------------------------------------------------------------------------------------------------------
	method lex_state_int_literal:byte(curLine:SourceLine, nextToken:Token)
	
		if (charutil.isDigit(m_peek[0])) then
		
			nextToken.poke(m_peek[0]);
		
		else
		
			if (m_peek[0] = CHAR_DOT) then
			
				if (charutil.isDigit(m_peek[1])) then
				
					nextToken.m_type = Token.TYPE_REAL;
					nextToken.poke(m_peek[0]);
					m_state = STATE_REAL_LITERAL;
				
				else
				
					nextToken.poke(m_peek[0]);
					nextToken.poke(m_peek[1]);
					m_errorList.addLast(Error.createError( ..
						Error.INVALID_TOKEN, ..
						curLine, ..
						nextToken.m_lexeme));
					m_pos :+ 1;
				
				end if
			
			else
			
				m_pos :- 1;
				return true;
			
			end if
		
		end if
		
		return false;
	
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// State: Real literal token.
	'// -------------------------------------------------------------------------------------------------------------
	method lex_state_real_literal:byte(curLine:SourceLine, nextToken:Token)
	
		if (charutil.isDigit(m_peek[0])) then
		
			nextToken.poke(m_peek[0]);
			return false;
		
		else
		
			m_pos :- 1;
			return true;
		
		end if
	
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// State: Character literal token.
	'// -------------------------------------------------------------------------------------------------------------
	method lex_state_char_literal:byte(curLine:SourceLine, nextToken:Token)
	
		if (m_peek[0] <> CHAR_APOS) then
		
			if (m_peek[0] = CHAR_BACKSLASH) then
			
				select (m_peek[1])
				
					case CHAR_BACKSLASH, ..
					     CHAR_LC_N, ..
					     CHAR_UC_N, ..
					     CHAR_LC_R, ..
					     CHAR_UC_R, ..
					     CHAR_QUOTE, ..
					     CHAR_APOS, ..
					     CHAR_ZERO, ..
					     CHAR_LC_A, ..
					     CHAR_UC_A, ..
					     CHAR_LC_F, ..
					     CHAR_UC_F, ..
					     CHAR_LC_T, ..
					     CHAR_UC_T, ..
					     CHAR_LC_V
							nextToken.poke(m_peek[0]);
							nextToken.poke(m_peek[1]);
							m_pos :+ 1;
					default
						m_errorList.addLast(Error.createError( ..
							Error.UNKNOWN_ESCAPE_SEQUENCE, ..
							curLine, ..
							chr(m_peek[0]) + chr(m_peek[1])));
						m_pos :+ 1;
				
				end select
			
			else
			
				nextToken.poke(m_peek[0]);
			
			end if
		
		else
		
			if (nextToken.m_lexeme.length > 2) then
			
				if (not (m_peek[0] = CHAR_BACKSLASH and nextToken.m_lexeme.length = 3)) then
				
					m_errorList.addLast(Error.createError( ..
						Error.INVALID_TOKEN, ..
						curLine, ..
						nextToken.m_lexeme));
				
				end if
			
			end if
			
			return true;
		
		end if
		
		return false;
	
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// State: String literal token.
	'// -------------------------------------------------------------------------------------------------------------
	method lex_state_str_literal:byte(curLine:SourceLine, nextToken:Token)
	
		if (m_peek[0] <> CHAR_QUOTE) then
		
			if (m_peek[0] = CHAR_BACKSLASH) then
			
				select (m_peek[1])
				
					case CHAR_BACKSLASH, ..
					     CHAR_LC_N, ..
					     CHAR_UC_N, ..
					     CHAR_LC_R, ..
					     CHAR_UC_R, ..
					     CHAR_QUOTE, ..
					     CHAR_APOS, ..
					     CHAR_ZERO, ..
					     CHAR_LC_A, ..
					     CHAR_UC_A, ..
					     CHAR_LC_F, ..
					     CHAR_UC_F, ..
					     CHAR_LC_T, ..
					     CHAR_UC_T, ..
					     CHAR_LC_V
							nextToken.poke(m_peek[0]);
							nextToken.poke(m_peek[1]);
							m_pos :+ 1;
					default
						m_errorList.addLast(Error.createError( ..
							Error.UNKNOWN_ESCAPE_SEQUENCE, ..
							curLine, ..
							chr(m_peek[0]) + chr(m_peek[1])));
						m_pos :+ 1;
				
				end select
			
			else
			
				nextToken.poke(m_peek[0]);
			
			end if
		
		else
		
			return true;
		
		end if
		
		return false;
	
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// State: Identifier token.
	'// -------------------------------------------------------------------------------------------------------------
	method lex_state_ident:byte(curLine:SourceLine, nextToken:Token)
	
		if (charutil.isLetterOrDigit(m_peek[0]) or m_peek[0] = CHAR_UNDERSCR) then
		
			nextToken.poke(m_peek[0]);
		
		else
		
			m_pos :- 1;
			lexPeek(curLine);
		
		end if
		
		if ((not charutil.isLetterOrDigit(m_peek[1]) and m_peek[1] <> CHAR_UNDERSCR) ..
		or m_pos >= curLine.m_data.length-1) then
		
			if (m_keywords.contains(nextToken.m_lexeme)) then
			
				local kword:Token = Token(m_keywords.valueForKey(nextToken.m_lexeme));
				
				nextToken.is_keyword = true;
				nextToken.m_type = kword.m_type;
				
				select (kword.m_type)
				
					case Token.RID_TRUE, ..
					     Token.RID_FALSE
						nextToken.is_literal = true;
				
				end select
			
			end if
			
			return true;
		
		end if
	
		return false;
	
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// Initialize the keyword lookup table.
	'// -------------------------------------------------------------------------------------------------------------
	method initKeywords()
	
		addKeyword("bool", Token.RID_BOOL);
		addKeyword("char", Token.RID_CHAR);
		addKeyword("int", Token.RID_INT);
		addKeyword("real", Token.RID_REAL);
		addKeyword("string", Token.RID_STRING);
		
		addKeyword("true", Token.RID_TRUE);
		addKeyword("false", Token.RID_FALSE);
		
		addKeyword("database", Token.RID_DATABASE);
		addKeyword("table", Token.RID_TABLE);
		addKeyword("row", Token.RID_ROW);
		
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// Add a keyword to the keyword lookup table.
	'// -------------------------------------------------------------------------------------------------------------
	method addKeyword(name:string, tokenType:short)
	
		local keyword:Token = Token.create(null, tokenType, name);
		
		m_keywords.insert(name, keyword);
		
		'local symb:Symbol = new Symbol;
		
		'symb.m_lexeme = name;
		'symb.m_type = tokenType;
		
		'SymbolTable.m_ridTable.m_declarationTable.insert(name, symb);
		'SymbolTable.m_ridTable.m_declarationList.addLast(symb);
	
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// Return true if 'char' is a valid punctuator character.
	'// -------------------------------------------------------------------------------------------------------------
	method isPunctuatorChar:byte(char:byte)
	
		return char = CHAR_PLUS ..
			or char = CHAR_MINUS ..
			or char = CHAR_ASTERISK ..
			or char = CHAR_SLASH ..
			or char = CHAR_PERCENT ..
			or char = CHAR_CARET ..
			or char = CHAR_LESS ..
			or char = CHAR_GREATER ..
			or char = CHAR_EQUAL ..
			or char = CHAR_AMPERSAND ..
			or char = CHAR_VBAR ..
			or char = CHAR_DOT ..
			or char = CHAR_COLON ..
			or char = CHAR_QUESTION ..
			or char = CHAR_COMMA ..
			or char = CHAR_SEMICOLON ..
			or char = CHAR_OPEN_PAREN ..
			or char = CHAR_CLOSE_PAREN ..
			or char = CHAR_OPEN_SQUARE ..
			or char = CHAR_CLOSE_SQUARE ..
			or char = CHAR_OPEN_BRACE ..
			or char = CHAR_CLOSE_BRACE;
	
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// Dump the token stream to disc.
	'// -------------------------------------------------------------------------------------------------------------
	method dumpToDisc()
	
		local url:string = m_source.m_path + "_token_stream.dump";
		
		if (fileType(url) = FILETYPE_FILE) then
		
			deleteFile(url);
			if (fileType(url) = FILETYPE_FILE) ..
				return;
		
		end if
		
		local fileStream:TStream = writeStream(url);
		
		for local t:Token = eachin m_tokenStream
		
			fileStream.writeLine(t.tokenInfo());
		
		next
		
		fileStream.close();
	
	end method
	
	'// -------------------------------------------------------------------------------------------------------------
	'// The state of the lexer.
	'// -------------------------------------------------------------------------------------------------------------
	field m_state:short;
	
	'// -------------------------------------------------------------------------------------------------------------
	'// The character position in the current line.
	'// -------------------------------------------------------------------------------------------------------------
	field m_pos:int;
	
	'// -------------------------------------------------------------------------------------------------------------
	'// The next four characters (starting from 'm_pos').
	'// -------------------------------------------------------------------------------------------------------------
	field m_peek:short[4];
	
	'// -------------------------------------------------------------------------------------------------------------
	'// The token stream that contains all the tokens that the lexer tokenized.
	'// -------------------------------------------------------------------------------------------------------------
	field m_tokenStream:TList = new TList;
	
	'// -------------------------------------------------------------------------------------------------------------
	'// A keyword lookup table (keys are strings representing the keyword names and values are 'Token' objects).
	'// -------------------------------------------------------------------------------------------------------------
	field m_keywords:TMap = new TMap;
	
	'// -------------------------------------------------------------------------------------------------------------
	'// Bracket depths.
	'// -------------------------------------------------------------------------------------------------------------
	field m_braceDepth:int = 0;
	field m_squareDepth:int = 0;
	field m_parenDepth:int = 0;
	
	'// -------------------------------------------------------------------------------------------------------------
	'// The source file to perform a lexical analysis on.
	'// -------------------------------------------------------------------------------------------------------------
	field m_source:SourceFile;
	
	'// -------------------------------------------------------------------------------------------------------------
	'// A pointer to the errorlist.
	'// -------------------------------------------------------------------------------------------------------------
	field m_errorList:TList;
	
	'// -------------------------------------------------------------------------------------------------------------
	'// A pointer to the last token we tokenized.
	'// -------------------------------------------------------------------------------------------------------------
	field m_lastToken:Token;

end type
