#include "Interpreter.hpp"

Token::Token()
{
	this->quoted = false;
}

Token::Token(int type)
{
	this->type = type;
	this->quoted = false;
}

Token::Token(int type, const char *text, bool quoted)
{
	this->type = type;
	this->text = text;
	this->quoted = quoted;
}

/////////////////////////////////////////////////////////////////////////////

Tokenization::Tokenization(ShellState *shellState)
{
	this->shellState = shellState;
	this->truncated = false;
}

void Tokenization::addToken(int type)
{
	tokens.push_back(Token(type));
}

void Tokenization::addToken(int type, const char *text, bool quoted)
{
	tokens.push_back(Token(type, text, quoted));
}

/////////////////////////////////////////////////////////////////////////////

ParserContext::ParserContext(const char *text, ShellState *state)
	:tokenization(state)
{
	this->text = strdup(text);
	this->currentToken = 0;
	tokenization.end = this->text;
}

ParserContext::~ParserContext()
{
	free(text);
}

int ParserContext::yylex(YYSTYPE *val, YYLTYPE *loc)
{
	int tokIndex = currentToken++;
	
	if(tokIndex >= tokenization.tokens.size())
	{
		tokenizeMore();
		if(tokIndex >= tokenization.tokens.size())
			return 0;
	}
	
	val->tokIndex = tokIndex;
	//val->loc = //TODO
	
	return tokenization.tokens[tokIndex].type;
}

Token ParserContext::getToken(int index) const
{
	return tokenization.tokens[index];
}

Tokenization *ParserContext::getTokenization()
{
	return &tokenization;
}

void ParserContext::tokenizeMore()
{
	if(*tokenization.end == 0)
		return;
	
	tokenize(tokenization.end, &tokenization, CONTEXT_ROOT);
	
	if(*tokenization.end == 0)
		tokenization.tokens.push_back(Token(TOK_EOF));
}

/////////////////////////////////////////////////////////////////////////////

static void separateIfNeeded(const char *pos, Tokenization *state);
static void finishToken(Tokenization *state, bool quoted);
static void handleFirstToken(Tokenization *state);
static const char *tokenizeVar(const char *pos, Tokenization *state, bool quoted);
static const char *tokenizeArithmeticExpansion(const char *pos, Tokenization *state);
static const char *tokenizeCommandExpansion(const char *pos, Tokenization *state, int context, bool quoted);
static const char *tokenizeSingleQuoted(const char *pos, Tokenization *state);
static const char *tokenizeDoubleQuoted(const char *pos, Tokenization *state);
static const char *tokenizeCString(const char *pos, Tokenization *state);

static int lookupKeyword(const char *word);
static bool isSingleCharVarname(char ch);
static bool isHexDigit(char ch);
static int hexDigitToInt(char ch);

/////////////////////////////////////////////////////////////////////////////

void tokenize(const char *cmd, Tokenization *state, int context)
{
	int parenDepth = 0;
	state->pendingToken = "";
	state->truncated = false;
	
	const char *pos;
	for(pos=cmd; ; pos++)
	switch(*pos)
	{
		case 0:
			finishToken(state, false);
			state->end = pos;
			state->truncated = true;
			return;
			
		case '\n':
			finishToken(state, false);
			state->addToken(TOK_NEWLINE);
			
			if(context == CONTEXT_ROOT && parenDepth == 0) {
				state->end = pos+1;
				return;
			}
			break;
			
		case '#': {
			finishToken(state, false);
			while(*(pos+1) && *(pos+1)!='\n')
				pos++;
			break;
		}
			
		case '$':
			finishToken(state, false);
			separateIfNeeded(pos, state);
			pos = tokenizeVar(pos, state, false);
			break;
			
		case '`':
			finishToken(state, false);
			separateIfNeeded(pos, state);
			
			if(context == CONTEXT_BACKQUOTED) {
				state->end = pos+1;
				return;
			} else {
				pos = tokenizeCommandExpansion(pos, state, CONTEXT_BACKQUOTED, false);
			}
			break;
			
		case '\'':
			finishToken(state, false);
			separateIfNeeded(pos, state);
			pos = tokenizeSingleQuoted(pos, state);
			break;
			
		case '\"':
			finishToken(state, false);
			separateIfNeeded(pos, state);
			pos = tokenizeDoubleQuoted(pos, state);
			break;
			
		case '\\':
			finishToken(state, false);
			separateIfNeeded(pos, state);
			
			pos++;
			if(*pos == 0) {
				state->end = pos;
				state->truncated = true;
				return;
			} else {
				char toktext[2] = {*pos,0};
				state->addToken(TOK_TEXT, toktext, true);
			}
			break;
			
		case '(':
			parenDepth++;
			finishToken(state, false);
			state->addToken(TOK_OPENPAREN);
			break;
			
		case ')':
			finishToken(state, false);
			if(parenDepth==0 && context == CONTEXT_COMMANDEXPANSION) {
				state->end = pos+1;
				return;
			} else {
				parenDepth--;
				state->addToken(TOK_CLOSEPAREN);
			}
			break;
			
		case '|':
			finishToken(state, false);
			if(*(pos+1) == '|') {
				state->addToken(TOK_PIPEPIPE);
				pos++;
			} else {
				state->addToken(TOK_PIPE);
			}
			break;
			
		case '&':
			finishToken(state, false);
			if(*(pos+1) == '&') {
				state->addToken(TOK_ANDAND);
				pos++;
			} else {
				state->addToken(TOK_AND);
			}
			break;
			
		case ';':
			finishToken(state, false);
			state->addToken(TOK_SEMICOLON);
			break;
			
		case '!':
			finishToken(state, false);
			state->addToken(TOK_BANG);
			break;
			
		case '>':
			finishToken(state, false);
			state->addToken(TOK_GREATER);
			break;
			
		case '<':
			finishToken(state, false);
			state->addToken(TOK_LESS);
			break;
			
		default:
			if(isSeparator(*pos)) {
				finishToken(state, false);
			} else {
				separateIfNeeded(pos, state);
				state->pendingToken += *pos;
			}
			break;
	}
}

void separateIfNeeded(const char *pos, Tokenization *state)
{
	if(!state->tokens.size())
		return;
	
	if(!isSeparator(*(pos-1)))
		return;
	
	int prevTok = state->tokens[state->tokens.size()-1].type;
	switch(prevTok) {
		case TOK_TEXT:
		case TOK_VAR:
		case TOK_LOCALIZED_TEXT:
		case TOK_COMMAND_SUBSTITUTE:
		case TOK_ARITH_SUBSTITUTE:
			state->addToken(TOK_SEPARATOR);
			break;
			
		default:
			break;
	}
}

void finishToken(Tokenization *state, bool quoted)
{
	// Only quoted strings can be empty
	if(!state->pendingToken.length() && !quoted)
		return;
	
	if(!quoted)
	{
		// For regular (unquoted) text, if it's the first word of a command
		// or in a pair of parentheses, check if it's a keyword or an alias
		int prevTok;
		if(state->tokens.size())
			prevTok = state->tokens[state->tokens.size()-1].type;
		else
			prevTok = -1;
		
		switch(prevTok)
		{
			case -1:
			case TOK_NEWLINE:
			case TOK_OPENPAREN:
			case TOK_SEMICOLON:
			case TOK_ANDAND:
			case TOK_PIPEPIPE:
				handleFirstToken(state);
				return;
			default:
				break;
		}
	}
	state->addToken(TOK_TEXT, state->pendingToken.c_str(), quoted);
	state->pendingToken = "";
}

void handleFirstToken(Tokenization *state)
{
	ShellState *shellState = state->shellState;
	if(shellState)
	{
		if(shellState->aliases.find(state->pendingToken) != shellState->aliases.end())
		{
			string aliasExpansion = shellState->aliases[state->pendingToken];
			tokenize(aliasExpansion.c_str(), state, CONTEXT_ROOT);
			state->pendingToken = "";
			return;
		}
	}
	
	int keyword = lookupKeyword(state->pendingToken.c_str());
	if(keyword)
		state->addToken(keyword);
	else
		state->addToken(TOK_TEXT, state->pendingToken.c_str(), false);
	
	state->pendingToken = "";
}

const char *tokenizeVar(const char *pos, Tokenization *state, bool quoted)
{
	pos++; // Skip over the leading '$' sign
	
	if(isSingleCharVarname(*pos))
	{
		char varname[2] = {*pos, 0};
		state->addToken(TOK_VAR, varname, quoted);
		return pos;
	}
	else if(isalpha(*pos) || *pos=='_')
	{
		const char *start = pos;
		do {
			pos++;
		} while(isalnum(*pos) || *pos=='_');
		string varname = string(start, pos-start);
		
		state->addToken(TOK_VAR, varname.c_str(), quoted);
		return pos-1;
	}
	else if(isdigit(*pos))
	{
		const char *start = pos;
		do {
			pos++;
		} while(isdigit(*pos));
		string varname = string(start, pos-start);
		
		state->addToken(TOK_VAR, varname.c_str(), quoted);
		return pos-1;
	}
	else switch(*pos)
	{
		case 0:
			state->pendingToken += '$';
			return pos-1;
			
		case '{': {
			const char *start = pos;
			do {
				pos++;
			} while(*pos && *pos!='}');
			string varname = string(start, pos-start);
			
			state->addToken(TOK_VAR, varname.c_str(), quoted);
			return pos-1;
		}
			
		case '(':
			if(pos[1] == '(')
				return tokenizeArithmeticExpansion(pos, state);
			else
				return tokenizeCommandExpansion(pos, state, CONTEXT_COMMANDEXPANSION, quoted);
			
		case '\'': // ANSI C string
			return tokenizeCString(pos, state);
			
		case '\"': // Localized string
			// TODO: Localize
			return tokenizeDoubleQuoted(pos, state);
		
		default:
			state->pendingToken += '$';
			return pos-1;
	}
}

const char *tokenizeCString(const char *pos, Tokenization *state)
{
	pos++; // Skip the opening quote
	
	for(; ; pos++)
	switch(*pos)
	{
		case 0:
			finishToken(state, true);
			state->truncated = true;
			return pos-1;
			
		case '\'':
			finishToken(state, true);
			return pos;
			
		case '\\':
			pos++;
			switch(*pos) {
				case 0:
					finishToken(state, true);
					state->truncated = true;
					return pos-1;
				case 'a': state->pendingToken += '\a'; break;
				case 'b': state->pendingToken += '\b'; break;
				case 'e': state->pendingToken += '\e'; break;
				case 'f': state->pendingToken += '\f'; break;
				case 'n': state->pendingToken += '\n'; break;
				case 'r': state->pendingToken += '\r'; break;
				case 't': state->pendingToken += '\t'; break;
				case 'v': state->pendingToken += '\v'; break;
				case '\\': state->pendingToken += '\\'; break;
				case '\'': state->pendingToken += '\''; break;
				case '\"': state->pendingToken += '\"'; break;
				case '0': case '1': case '2': case '3':
				case '4': case '5': case '6': case '7': {
					int ch = (*pos-'0');
					if(*(pos+1)>='0' && *(pos+1)<='7') {
						pos++;
						ch = (ch<<3) + (*pos-'0');
						if(*(pos+1)>='0' && *(pos+1)<='7') {
							pos++;
							ch = (ch<<3) + (*pos-'0');
						}
					}
					state->pendingToken += (char)ch;
					break;
				}
				case 'x': {
					if(isHexDigit(*(pos+1))) {
						pos++;
						int ch = hexDigitToInt(*pos);
						if(isHexDigit(*(pos+1))) {
							pos++;
							ch = (ch<<4) + hexDigitToInt(*pos);
						}
						state->pendingToken += (char)ch;
					} else {
						state->pendingToken += "\\x";
					}
					break;
				}
				/*case 'c': {
					//TODO
					break;
				}*/
			}
			break;
			
		default:
			state->pendingToken += *pos;
			break;
	}
}

const char *tokenizeArithmeticExpansion(const char *pos, Tokenization *state)
{
	pos+=2; // Skip the two opening parens
	// TODO
}

const char *tokenizeCommandExpansion(const char *pos, Tokenization *state, int context, bool quoted)
{
	pos++; // Skip the opening backquote or paren
	
	Tokenization subtokenization(state->shellState);
	tokenize(pos, &subtokenization, context);
	
	if(subtokenization.truncated)
		state->truncated = true;
	
	string substText = string(pos, subtokenization.end-pos-1);
	state->addToken(TOK_COMMAND_SUBSTITUTE, substText.c_str(), quoted);
	
	return subtokenization.end-1;
}

const char *tokenizeSingleQuoted(const char *pos, Tokenization *state)
{
	pos++; // Skip the opening quote
	
	for(; ; pos++)
	switch(*pos)
	{
		case 0:
			finishToken(state, true);
			state->truncated = true;
			return pos-1;
			
		case '\'':
			finishToken(state, true);
			return pos;
			
		default:
			state->pendingToken += *pos;
			break;
	}
}

const char *tokenizeDoubleQuoted(const char *pos, Tokenization *state)
{
	pos++; // Skip the opening quote
	
	for(; ; pos++)
	switch(*pos)
	{
		case 0:
			finishToken(state, true);
			state->truncated = true;
			return pos-1;
			
		case '\"':
			finishToken(state, true);
			return pos;
			
		case '$':
			finishToken(state, true);
			pos = tokenizeVar(pos, state, true);
			break;
			
		case '`':
			finishToken(state, true);
			pos = tokenizeCommandExpansion(pos, state, CONTEXT_BACKQUOTED, true);
			break;
			
		case '!':
			finishToken(state, true);
			state->addToken(TOK_BANG);
			break;
			
		case '\\':
			switch(*(pos))
			{
				case '$':
				case '`':
				case '\"':
				case '\\':
					state->pendingToken += *(++pos);
					break;
				case '\n':
					pos++;
					break;
				default:
					state->pendingToken += '\\';
					break;
			}
			break;
			
		default:
			state->pendingToken += *pos;
			break;
	}
}

/////////////////////////////////////////////////////////////////////////////

struct KeywordTableEntry
{
	const char *text;
	int tok;
};

KeywordTableEntry keywordsTable[] = {
	{ "if", TOK_IF },
	{ "then", TOK_THEN },
	{ "else", TOK_ELSE },
	{ "elif", TOK_ELIF },
	{ "fi", TOK_FI },
};
const int numKeywords = sizeof(keywordsTable) / sizeof(KeywordTableEntry);

int lookupKeyword(const char *word)
{
	for(int ii=0; ii<numKeywords; ii++)
	{
		if(!strcmp(keywordsTable[ii].text, word))
			return keywordsTable[ii].tok;
	}
	
	return 0;
}

bool isSeparator(char ch)
{
	switch(ch)
	{
		case ' ':
		case '\t':
		case '\n':
			return true;
		default:
			return false;
	}
}

bool isSingleCharVarname(char ch)
{
	switch(ch)
	{
		case '#':
		case '$':
		case '!':
			return true;
			
		default:
			return false;
	}
}

bool isHexDigit(char ch)
{
	if(ch>='0' && ch<='9')
		return true;
	else if(ch>='a' && ch<='z')
		return true;
	else if(ch>='A' && ch<='Z')
		return true;
	else
		return false;
}

int hexDigitToInt(char ch)
{
	if(ch>='0' && ch<='9')
		return ch-'0';
	else if(ch>='a' && ch<='z')
		return ch+10-'a';
	else if(ch>='A' && ch<='Z')
		return ch+10-'A';
	else
		return -1;
}

/////////////////////////////////////////////////////////////////////////////

const char *tokenTypeName(int tokenType)
{
	switch(tokenType)
	{
		case TOK_SEPARATOR: return "separator";
		case TOK_TEXT: return "text";
		case TOK_LOCALIZED_TEXT: return "localized_text";
		case TOK_COMMAND_SUBSTITUTE: return "command_substitute";
		case TOK_ARITH_SUBSTITUTE: return "arith_substitute";
		case TOK_VAR: return "var";
		case TOK_PIPE: return "|";
		case TOK_PIPEPIPE: return "||";
		case TOK_AND: return "&";
		case TOK_ANDAND: return "&&";
		case TOK_SEMICOLON: return ";";
		case TOK_NEWLINE: return "newline";
		case TOK_EOF: return "eof";
		case TOK_OPENPAREN: return "(";
		case TOK_CLOSEPAREN: return ")";
		case TOK_BANG: return "!";
		case TOK_GREATER: return ">";
		case TOK_LESS: return "<";
		case TOK_IF: return "if";
		case TOK_THEN: return "then";
		case TOK_ELSE: return "else";
		case TOK_ELIF: return "elif";
		case TOK_FI: return "fi";
		default: return "???";
	}
}

string Token::toString() const
{
	string ret = tokenTypeName(type);
	if(text.length() > 0)
		ret += " " + quoteProtectString(text);
	if(quoted)
		ret += " (quoted)";
	return ret;
}

string tokenizationToString(const Tokenization *tokenization)
{
	string ret = "";
	
	for(int ii=0; ii<(int)tokenization->tokens.size(); ii++)
	{
		const Token *token = &tokenization->tokens[ii];
		string line = string("\t") + token->toString() + "\n";
		ret += line;
	}
	
	if(tokenization->truncated)
		ret += "\tTRUNCATED\n";
	if(*tokenization->end)
		ret += "\tLeftover text: " + string(tokenization->end);
	
	return ret;
}

/////////////////////////////////////////////////////////////////////////////

#ifdef UNIT_TEST

void interactiveTokenizerTest()
{
	char buf[512];
	while(!feof(stdin))
	{
		bool first = true;
		string line = "";
		
		Tokenization tokenization(NULL);
		do {
			if(first) {
				printf("> ");
				first = false;
			} else {
				printf("  ");
			}
			buf[0] = 0;
			fgets(buf, 512, stdin);
			line += buf;
			tokenize(line.c_str(), &tokenization, CONTEXT_ROOT);
		} while(!feof(stdin) && tokenization.truncated);
		
		string output = tokenizationToString(&tokenization);
		puts(output.c_str());
	}
}

#endif

