'''
Created on 29/09/2012

@author: Barrios - Correa - Felippone
'''

from Exceptions import *

class Context:
	def __init__(self):
		self.ast = ""
		self.tokens = ""
		self.tokens_def = ""
		self.tokens_list = []
		self.rules = ""
		self.assoc = ""
		self.start = ""
		self.d_reserved = dict()
		self.d_ast = dict()
		self.d_categories = dict()
		self.d_tokens = dict()
		self.d_ignore = dict()
		self.last_token_id = 0
		self.last_rule_id = 0

	def get_next_token(self):
		self.last_token_id = self.last_token_id + 1
		return "TOKEN" + "{:03d}".format(self.last_token_id)

	def get_next_rule(self):
		self.last_rule_id = self.last_rule_id + 1
		return "RULE" + "{:03d}".format(self.last_rule_id)

class Expressions:
	def __init__(self, expr):
		self.expressions = [expr]

	def add(self, expr):
		self.expressions.append(expr)

	def build_parser(self):
		context = Context()
		for exp in self.expressions:
			exp.build_parser(context)
		concat_function = ("def __concat__(x,y):\n" + 
   			"\treturn (x if type( x ) is list else [x]) + (y if type( y ) is list else [y])\n\n")
		code = "from collections import namedtuple\n\n"
		code += ("### RESERVED WORDS ###\n" +
			"reserved = {\n\t" + ",\n\t".join([a + ": '" + b + "'" for a, b in context.d_reserved.iteritems()]) + "\n\t}\n\n")
		code += ("### TOKENS ###\n" +
				"tokens = list(reserved.values())+ [" +",\n\t".join([a for a in context.tokens_list]) + "\t]\n\n"  +
				context.tokens + "\n" +
				context.tokens_def +
				"def t_error(t):\n" +
    			"\tprint(\"Illegal character '%s'\" % t.value[0])\n" + 
    			"\tt.lexer.skip(1)\n" +
				"\n### PRECEDENCE ###\n")
		if len(context.assoc[0:-2])>2:
			code += "precedence = (\n" + context.assoc[0:-2] + "\n    )\n"
		else:
			code += "precedence = ()\n"
		code += ("\nstart = \'" + context.start + "\'\n"+
				"\n### RULES ###\n" +
				concat_function + 
				context.rules +
				"def p_error(t):\n" +
				"\tif hasattr(t, 'value'):\n" +
				"\t\tprint(\"Syntax error at '%s'\" % t.value)\n" +
				"\telse:\n" +
				"\t\tprint (\"Syntex error\")\n" +
				"\n### AST ###\n" +
				context.ast+
                "\n#Build the parser\n" + 
				"import ply.lex as lex\n" +
				"lex.lex()\n" +
				"import ply.yacc as yacc\n" + 
				"yacc.yacc()\n")
		return code

class Category:
	def __init__(self, ident, islist):
		self.ident = ident 
		self.islist = islist

class Expression:
	def __init__(self, start, iden, category, productions):
		self.start = start
		self.iden = iden
		self.category = category
		self.productions = productions

	def build_parser(self, context):
		if self.start == True:
			if context.start == "":
				context.start = self.iden
			else:
				print "Warning: Hay dos no terminales %start: \'" + context.start +"\' y \'"+ self.iden + "\'. Se usa el primero como %start."
		if not context.d_categories.has_key(self.category.ident):
			context.ast += "class %s:\n\tpass\n\n" % self.category.ident
			context.d_categories[self.category.ident] = True
		for prod in self.productions.productions:
			prod.build_parser(context, self.iden, self.category)

class Productions:
	def __init__(self, expr):
		self.productions = [expr]

	def add(self, expr):
		self.productions.append(expr)

class Production:
	def __init__(self, vocabulary, action):
		self.vocabulary = vocabulary
		self.action = action

	def build_parser(self, context, iden, category):
		self.vocabulary.build_parser(context, iden, self.action)
		self.action.build_parser(context, iden, category, self.vocabulary)

class Vocabulary:
	def __init__(self, lexem):
		self.lexems = [lexem]

	def add(self, lexem):
		self.lexems.append(lexem)

	def to_print(self, context):
		return " ".join([lex.to_print(context) for lex in self.lexems])

	def to_print_tokens(self, context):
		return " ".join([lex.to_print_tokens(context) for lex in self.lexems])

	def build_parser(self, context, iden, action):
		for lex in self.lexems:
			lex.build_parser(context, iden, action)

class ER:
	def __init__(self, er):
		self.er = er

	def cleanstr(self, string):
		return string[1:-1]

	def build_parser(self, context, iden, action):
		if context.d_tokens.has_key(self.er):
			lexname = context.d_tokens[self.er]
		else:
			lexname = context.get_next_token()
			context.d_tokens[self.er] = lexname
			#context.tokens += "t_" + lexname + " = r'" + self.cleanstr(self.er) + "'\n"
			context.tokens_list += ["\'" + lexname + "\'"]
			context.tokens_def += (
				"def t_" + lexname + "(t):\n" +
				"\tr'" + self.cleanstr(self.er) + "'\n" +
				"\tt.type = reserved.get(t.value, '" + lexname + "')\n" +
				"\treturn t\n\n")

	def to_print_clean(self, context):
		return self.cleanstr(self.er)

	def to_print(self, context):
		return self.er

	def to_print_tokens(self, context):
		return context.d_tokens[self.er]

class Token:
	def __init__(self, token):
		self.token = token

	def cleanstr(self, string):
		trin = string[1:-1]
		special = list("()[]{}*+.$^?|")
		return "".join(["\\" + c if c in special else c for c in trin])

	def build_parser(self, context, iden, action):
		if context.d_tokens.has_key(self.token):
			lexname = context.d_tokens[self.token]
		else:
			lexname = context.get_next_token()
			context.d_tokens[self.token] = lexname
			context.tokens += "t_" + lexname + " = r'" + self.cleanstr(self.token) + "'\n"
		if not context.d_reserved.has_key(self.token):
			context.d_reserved[self.token] = lexname

	def to_print(self, context):
		return self.token

	def to_print_clean(self, context):
		return self.cleanstr(self.token)

	def to_print_tokens(self, context):
		return context.d_tokens[self.token]

class Ident:
	def __init__(self, ident):
		self.ident = ident

	def build_parser(self, context, iden, action):
		pass

	def to_print(self, context):
		return self.ident

	def to_print_tokens(self, context):
		return self.ident

class ActionEmptyList:
	def __init__(self):
		pass

	def to_print(self, context, category, vocabulary):
		return ""
		
	def to_print_comment(self, context, category):
		return ""
		
class ActionList:
	def __init__(self, action):
		self.action = action

	def to_print(self, context, category, vocabulary):
		return "[" + self.action.to_print(context, category, vocabulary) + "]"
		
	def to_print_comment(self, context, category):
		return "[" + self.action.to_print_comment(context, category) + "]"
		
	def build_parser(self, context, iden, category, vocabulary):
		rulename = context.get_next_rule()
		context.rules += ("def p_" + rulename + "(p): # " + 
			iden + " : " + vocabulary.to_print(context) + 
			" > " + self.to_print_comment(context, category) + "\n")
		context.rules += ("\t\"" + iden + " : " + vocabulary.to_print_tokens(context) +
			"\"\n")
		context.rules += "\tp[0] = " + self.to_print(context, category, vocabulary) + "\n\n"

class Ref:
	def __init__(self, ref):
		self.ref = ref

	def to_print(self, context, category, vocabulary):
		if int(self.ref[1:]) > len(vocabulary.lexems):
			raise MlgMsgExc("Referencia invalida", "No existe el lexema al que se hace referencia")
		return "p[" + "".join(self.ref[1:]) + "]"
		
	def to_print_comment(self, context, category):
		return self.ref
		
	def build_parser(self, context, iden, category, vocabulary):
		rulename = context.get_next_rule()
		context.rules += ("def p_" + rulename + "(p): # " + 
			iden + " : " + vocabulary.to_print(context) + 
			" > " + self.to_print_comment(context, category) + "\n")
		context.rules += ("\t\"" + iden + " : " + vocabulary.to_print_tokens(context) +
			"\"\n")
		context.rules += "\tp[0] = " + self.to_print(context, category, vocabulary) + "\n\n"

class Action:
	def __init__(self, action_name, attributes):
		self.action_name = action_name
		self.attributes = attributes

	def to_print(self, context, category, vocabulary):
		self.add_to_ast(context, category)
		if self.attributes:
			return self.action_name + "(" + self.attributes.build_call_params(context, category, vocabulary) + ")"
		else:
			return self.action_name + "()"

	def to_print_comment(self, context, category):
		if self.attributes:
			return "(" + self.action_name + " " + self.attributes.to_print(context, category) + ")"
		else:
			return "(" + self.action_name + ")"

	def add_to_ast(self, context, category):
		if not context.d_ast.has_key(self.action_name):
			context.d_ast[self.action_name] = category
			if self.attributes:
				context.ast += "class %s(namedtuple('%s', '%s'), %s):\n\tpass\n\n" % (
					self.action_name,
					self.action_name,
					self.attributes.build_parser(context),
					category.ident)
			else:
				context.ast += "class %s(namedtuple('%s', ''), %s):\n\tpass\n\n" % (
					self.action_name,
					self.action_name,
					category.ident)
		else:
			if context.d_ast[self.action_name].ident != category.ident:
				raise MlgMsgExc("Categoria sintactica redefinida", "Cada clase puede pertenecer a una sola categoria sintactica")

	def build_parser(self, context, iden, category, vocabulary):
		rulename = context.get_next_rule()

		self.add_to_ast(context, category)

		context.rules += ("def p_" + rulename + "(p): # " + 
			iden + " : " + vocabulary.to_print(context) + 
			" > " + self.to_print_comment(context, category) + "\n")
		context.rules += ("\t\"" + iden + " : " + vocabulary.to_print_tokens(context) +
			"\"\n")

		context.rules += "\tp[0] = " + self.to_print(context, category, vocabulary) + "\n\n"

class Concat:
	def __init__(self, action1, action2):
		self.action1 = action1
		self.action2 = action2

	def to_print(self, context, category, vocabulary):
		return ("__concat__(" + self.action1.to_print(context, category, vocabulary) +
				", " + self.action2.to_print(context, category, vocabulary) + ")")
		
	def to_print_comment(self, context, category):
		return self.action1.to_print_comment(context, category) + ":" + self.action2.to_print_comment(context, category)
		
	def build_parser(self, context, iden, category, vocabulary):
		rulename = context.get_next_rule()
		context.rules += ("def p_" + rulename + "(p): # " + 
			iden + " : " + vocabulary.to_print(context) + 
			" > " + self.to_print_comment(context, category) + "\n")
		context.rules += ("\t\"" + iden + " : " + vocabulary.to_print_tokens(context) +
			"\"\n")
		context.rules += "\tp[0] = " + self.to_print(context, category, vocabulary) + "\n\n"
				
class Attributes:
	def __init__(self, attribute):
		self.attributes = [attribute]

	def to_print(self, context, category):
		return " ".join([a.to_print(context, category) for a in self.attributes])

	def add(self, attribute):
		self.attributes.append(attribute)

	def build_parser(self, context):
		return " ".join([a.build_parser(context) for a in self.attributes])

	def build_call_params(self, context, category, vocabulary):
		return ", ".join([a.build_call_param(context, category, vocabulary) for a in self.attributes])

class Attribute:
	def __init__(self, identifier, reference):
		self.identifier = identifier
		self.reference = reference

	def to_print(self, context, category):
		return self.identifier + "=" + self.reference.to_print_comment(context, category)

	def build_call_param(self, context, category, vocabulary):
		return self.identifier + "=" + self.reference.to_print(context, category, vocabulary)

	def build_parser(self, context):
		return self.identifier
	
class Assoc:
	def __init__(self, type, assoc):
		self.type = type
		self.assoc = assoc
		
	def build_parser(self, context):
		tokens =""
		for a in self.assoc:
			if tokens == "":
				tokens += "\'" + context.d_tokens[a] + "\'"
			else: 
				tokens += ", " + "\'" + context.d_tokens[a] + "\'"
		context.assoc = "    (\'" + self.type + "\', " + tokens +"), \n" + context.assoc
		
				
class Ignore:
	def __init__(self, igns):
		self.igns = igns 
	
	def cleanstr(self, string):
		trin = string[1:-1]
		special = list("()[]*+.$^?|")
		return "".join(["\\" + c if c in special else c for c in trin])

	def build_parser(self, context):
		for ign in self.igns:
			ignsymbol = ign.to_print(context)
			if context.d_tokens.has_key(ignsymbol):
				print "Warning: Token " + ignsymbol + " definido y luego ignorado"
			if context.d_ignore.has_key(ignsymbol):
				lexname = context.d_ignore[ignsymbol]
			else:
				lexname = context.get_next_token()
				context.d_ignore[ignsymbol] = lexname
			context.tokens += "t_ignore_" + lexname + " = r'" + ign.to_print_clean(context) + "'\n"

	def to_print(self, context):
		return self.token

	def to_print_tokens(self, context):
		return context.d_ignore[self.token]
	
	