#include "StdAfx.h"
#include "JackTokenizer.h"


string kw[21] = { "class", "method", "function", "constructor", "int", "boolean", "char", "void", 
	"var", "static", "field", "let", "do", "if", "else", "while", "return", "true", 
	"false", "null", "this"};               
char Symbol[19] = {'{', '}', '(', ')', '[', ']', '.', ',', ';', '+', '-', '*', '/', '&', '|', '<', 
	'>', '=', '_'}; 

JackTokenizer::JackTokenizer(char* filename)
{
	// open the jack file as input file
	infile.open(filename, ios::in);
	// chec if success and let user know
	if(infile.fail()){
		cout <<"Error opening file: "<< filename << "\n"
			<< "Check your input\n";
		getchar();
		exit(0);
	}else
	{
		cout << filename << " opened successfully... \n";
	}
	// set the fstream to automatically skip white spaces
	infile.unsetf(ios::skipws);
	// set the pointer of current place of tokenized to 0
	tokenptr=0;
}


JackTokenizer::~JackTokenizer()
{
	infile.close();
}

void JackTokenizer::tokenize()
{
	// the current char we handle
	char ch;
	// indicate we are in the middle of a word and we need to put space after the word
	bool bProcessingWord=0;
	// indicate we dont want to procces next char because we didnt finish handle current char
	bool bDontProcessNextChar=0;
	// go over all chars in file till you get to eof
	while(!infile.fail()){
		// check if we get new char
		if(!bDontProcessNextChar)
		{
			infile >> ch;
		}
		// handle next char because we already handle currnt one
		bDontProcessNextChar=false;
		// add space at the end of the word
		if(bProcessingWord==true&&(!(isalnum(ch)||ch=='_'))){
			tokenizedFile+=' ';
			// we are done proccessing the word
			bProcessingWord=false;
		}
		// skip whitespaces if any
		while(ch==' '||ch=='\t'||ch=='\n'||ch=='\r'){
			infile >> ch;
			// if eof we are done!
			if(infile.fail()) 
			{
				return;
			}
		}
		// handle decimal numbers
		while(isdigit(ch)){
			tokenizedFile+=ch;
			infile >> ch;
			// if we got to the end of the number
			if(!isdigit(ch))
			{
				// mark it as number (so we can know when we compile)
				// it can be any symbol as long as we make sure the compiler knows it.
				tokenizedFile+='@';
				// if its part of a word dont forget to put space
				if(bProcessingWord==true)
				{
					tokenizedFile+=' '; 
					bProcessingWord=0;
				}
			}
		}
		// add the next char to the tokenized file
		tokenizedFile+=ch;
		/* this block create different length of strings and try matching to known word*//////
		// create all strings
		string s1=tokenizedFile;
		string s2=tokenizedFile; 
		string s3=tokenizedFile;
		string s4=tokenizedFile;
		string s5=tokenizedFile;
		if(tokenizedFile.size()>=6) // string length 6 for "static" and "method"
			s1.erase(0,s1.size()-6);
		if(tokenizedFile.size()>=5) // string length 6 for "field"
			s2.erase(0,s2.size()-5);
		if(tokenizedFile.size()>=3) // string length 3 for "var"
			s3.erase(0,s3.size()-3);
		if(tokenizedFile.size()>=8)  // string length 8 for "function"
			s4.erase(0,s4.size()-8);
		if(tokenizedFile.size()>=11) // string length 11 for "constructor"
			s5.erase(0,s5.size()-11);
		// compare to known words or handle a place where a word aupose to be.
		if(s1=="static"||s1=="method"||s2=="field"||s3=="var"||s4=="function"||s5=="constructor"||
			tokenizedFile[tokenizedFile.size()-1]=='('||tokenizedFile[tokenizedFile.size()-1]==',')
		{
			// init to ' '  to skip current and all white spaces after
			ch=' ';
			while(ch==' '||ch=='\t'||ch=='\n'||ch=='\r')
				infile >> ch;
			// we are handling a word
			bProcessingWord=true;
			// we have a char we didnt handle yet so dont skip to next
			bDontProcessNextChar=true;		
		}

		// handle commenting///////////////
		// line comment
		if((tokenizedFile.size()>1)&&tokenizedFile[tokenizedFile.size()-1]=='/'&&tokenizedFile[tokenizedFile.size()-2]=='/')
		{
			tokenizedFile.erase(tokenizedFile.size()-2, 2);
			infile >> ch;
			while(ch!='\n') infile >> ch;
		}
		// region comment
		else if((tokenizedFile.size()>1)&&tokenizedFile[tokenizedFile.size()-2]=='/'&&tokenizedFile[tokenizedFile.size()-1]=='*')
		{
			char ch1,ch2;
			tokenizedFile.erase(tokenizedFile.size()-2, 2);
			infile >> ch1 >> ch2;
			while(!(ch1=='*'&&ch2=='/'))
			{
				ch1=ch2;
				infile >> ch2;                              
			}
		}
		// TBD Added
		else if(tokenizedFile[tokenizedFile.size()-1]=='"')
		{
			infile >> ch;
			while(ch!='"'){tokenizedFile+=ch; infile >> ch;}
			tokenizedFile+=ch;
		}
	}
}

tokenType JackTokenizer::tokenType()
{
	for(int i=0;i<21;i++)
		if(currentToken==kw[i]) return KEYWORD;

	if(currentToken.size()==0){
		for(int i=0;i<19;i++)
			if(currentToken[0]==Symbol[i]) return SYMBOL;
	}

	bool numericOnly = true;
	bool alphanumeric = true;

	for(unsigned int i=0;i<currentToken.size();i++)
	{
		if(currentToken[i]<'0'||currentToken[i]>'9') 
		{
			numericOnly=false;
			break;
		}
	}
	if(numericOnly) return INT_CONST;
	if(currentToken[0]=='"'&&currentToken[currentToken.size()-1]=='"') return STRING_CONST;
	if(currentToken[0]=='_'||isalpha(currentToken[0]))
	{
		for(unsigned int i=1;i<currentToken.size();i++)
		{
				if((!isalnum(currentToken[i]))&&(currentToken[i]!='_'))
				{
					alphanumeric=false;
					break;
				}
		}
		if(alphanumeric) return IDENTIFIER;
	}
	// the token type in not known (probably a phrase like "x,y,z")
	return OTHER;
}

// check int value limit
int JackTokenizer::intVal()
{
	int num=0;
	for(unsigned int i=0;i<currentToken.size();i++)
		num=(num*10)+currentToken[i]-'0';
	if(num>=0&&num<=32767) return num;
	cout << "Integer constant is not in the range 0-32767\n";
	exit(0);
}

// get the tokenized file
std::string JackTokenizer::getTokenizedFile()
{
	return tokenizedFile;
}

// erase the next space of current token
// make sure the string you give is actually the current token
// we use it to erase spaces between "(" and a word
void JackTokenizer::eraseNextSpace(string current)
{
	int ptr=current.find(" ");
	tokenizedFile.erase(tokenptr+ptr,1);
}
