import re
import shlex
import string

from lexer     import *
from writer    import Writer
from tokentype import TokenType
from code      import Code

from node.message  import Message
from node.integer  import Integer
from node.color    import Color
from node.var      import Var
from node.binexpr  import BinExpr
from node.assign   import Assign
from node.minus    import Minus
from node.bool     import Bool
from node.ifstmt   import If, Elif
from node.block    import Block
from node.select   import Select
from node.goto     import Goto
from node.label    import Label
from node.function import Function
from node.define   import Define

from node.primitive.draw_bg  import DrawBg
from node.primitive.clear_bg import ClearBg

class Parser:
	""" 構文解析クラス """
	def __init__(self, lexer):
		self.lexer = lexer
		# シンボル定義
		self.globals = {
			"draw_bg": DrawBg(),
			"clear_bg": ClearBg()
		}
		self.defines = {
			"EF_NORMAL"    : Code.EF_NORMAL,
			"EF_SCROLL_L"  : Code.EF_SCROLL_L,
			"EF_SCROLL_R"  : Code.EF_SCROLL_R,
			"EF_SCROLL_U"  : Code.EF_SCROLL_U,
			"EF_SCROLL_D"  : Code.EF_SCROLL_D,
			"EF_SHUTTER_L" : Code.EF_SHUTTER_L,
			"EF_SHUTTER_R" : Code.EF_SHUTTER_R,
			"EF_SHUTTER_U" : Code.EF_SHUTTER_U,
			"EF_SHUTTER_D" : Code.EF_SHUTTER_D,
			"EF_ALPHA"     : Code.EF_ALPHA,
			"EF_GRAY"      : Code.EF_GRAY,
			"EF_SEPIA"     : Code.EF_SEPIA,
			"EF_NEGA"      : Code.EF_NEGA
		}
	def hasGlobals(self, name):
		return self.globals.has_key(name)
	def getGlobals(self, name):
		""" シンボルの取得 """
		glob = self.globals[name]
		return glob.getInstance()
	def hasDefines(self, name):
		return self.defines.has_key(name)
	def getDefines(self, name):
		""" 定数の取得 """
		define = self.defines[name]
		return define
	def getTokenType(self):
		return self.lexer.type
	def lookAhead(self):
		""" 先読みする """
		if self.lexer.advance():
			return self.lexer.type
		else:
			return None
	def unread(self, token):
		""" 字句を１文字戻す """
		self.lexer.unread(token)
	def warning(self, msg):
		""" 警告書き込み """
		message = "Warning :%s"%msg
		self.writeLog(message)
	def fatal(self, msg):
		""" エラー書き込む """
		message = "Fatal :%s"%msg
		self.writeLog(message)
		raise "Fatal Error"
	def writeLog(self, msg):
		""" ログ書き込み """
		message = "%s(%d, %d) %s"%(
			self.lexer.getFinename(),
			self.lexer.getLineno(),
			self.lexer.getColumnno(),
			msg)
		print message
	def skipCrlf(self):
		""" 改行をスキップ """
		type = self.getTokenType()
		while type == TokenType.CRLF:
			if type is None:
				break
			type = self.lookAhead()
		return type
	def parse(self, filepath):
		""" 解析 """
		writer = Writer(filepath, self)
		self.writer = writer
		while not(self.lookAhead() is None):
#		try:
#			node = self.parseProgram()
#		except:
#			pass
			node = self.parseProgram()
			if not(node is None):
				node.run(writer)
		writer.finalize()
		print "convert done."
	def parseProgram(self):
		""" プログラムの解析
		# プログラム ← 文 '\n'
		"""
		node = self.parseStatement()
		if not(node is None):
			if self.getTokenType() == TokenType.CRLF:
				pass
			else:
				node.run(self.writer)
				self.fatal("Illigal TokenType'%s', Token'%s'"%(self.getTokenType(), self.lexer.token))
		return node
	def parseStatement(self):
		""" 文の解析
		# 文 ← if文
		#    ｜ select文
		#    ｜ labelブロック
		#    ｜ funブロック
		#    ｜ 式
		#    ｜ ブロック文
		"""
		node = None
		type = self.getTokenType()
		if type == TokenType.IF:
			node = self.parseStatementIf()
		elif type == TokenType.SELECT:
			node = self.parseStatementSelect()
		elif type == TokenType.DEF:
			node = self.parseStatementLabel()
		elif type == TokenType.FUN:
			node = self.parseStatementFunction()
		elif type == '{':
			node = self.parseStatementBlock()
		else:
			node = self.parseExpression()
		return node
	def parseStatementIf(self):
		""" if文の解析
		# if文 ← 'if' '(' 式 ')' 文 ['elif' '(' 式 ')' 文]* ['else' 文]?
		"""
		type = self.lookAhead()
		if type != '(':
			self.fatal("Illigal grammar 'if'statement need '('")
		self.lookAhead() # skip '('
		cond = self.parseExpression()
		if self.getTokenType() != ')':
			self.fatal("Illigal grammar 'if'statement need ')'")
		self.lookAhead() # skip ')'
		type = self.skipCrlf()
		bodyThen = self.parseStatement()
		type = self.skipCrlf()
		
		# elifリスト
		bodyElifList = []
		type = self.getTokenType()
		while type == TokenType.ELIF:
			type = self.lookAhead()
			if type != '(':
				self.fatal("Illigal grammar 'elif'statement need '('")
			self.lookAhead() # skip '('
			condElif = self.parseExpression()
			if self.getTokenType() != ')':
				self.fatal("Illigal grammar 'elif'statement need ')'")
			self.lookAhead() # skip ')'
			type = self.skipCrlf()
			bodyElif = self.parseStatement()
			bodyElifList.append(Elif(condElif, bodyElif))
			type = self.getTokenType()
			if type is None:
				break
		type = self.skipCrlf()

		# else
		bodyElse = None
		if type == TokenType.ELSE:
			self.lookAhead() # skip 'else'
			type = self.skipCrlf()
			bodyElse = self.parseStatement()
		
		self.unread("\n")
		self.lookAhead()
		return If(cond, bodyThen, bodyElifList, bodyElse)
	def parseStatementSelect(self):
		""" select文の解析
		# select文 ← 'select' '(' "問題文" ')' ['{' "選択肢" '}']+
		"""
		type = self.lookAhead()
		if type != '(':
			self.fatal("Illigal grammar 'select'statement need '('")
		
		questionList = self.parseSelectQuestion()
		selectList = []
		bodyList   = []
		type = self.skipCrlf()
		if type != '{':
			self.fatal("Illigal grammar 'select'statement need '{'")
		while type == '{':
			self.lookAhead()
			type = self.skipCrlf()
			if type != TokenType.QUOTES:
				self.fatal("Illigal grammar 'select'statement not found 'chioses'")
			selectList.append(self.lexer.token.strip('"'))
			body = self.parseStatementBlock()
			bodyList.append(body)
			self.lookAhead()
			type = self.skipCrlf()
		
		self.unread(self.lexer.token)
		self.unread("\n")
		self.lookAhead()
		return Select(questionList, selectList, bodyList)

	def parseSelectQuestion(self):
		""" select問題文の解析
		# select問題文 ← '(' (文字列)? ['\n' (文字列)]* ')'
		"""
		type = self.getTokenType()
		if type != '(':
			self.fatal("Illigal grammar parameter need '('")
		type = self.lookAhead() # skip '('
		type = self.skipCrlf()
		
		strList = []
		while type != ')':
			if type is None:
				break
			if type != TokenType.QUOTES:
				self.fatal("Illigal grammar parameter need \"message\" ")
			
			strList.append(self.lexer.token.strip('"'))
			
			self.lookAhead()
			type = self.skipCrlf()
		
		self.lookAhead()
		return strList
		
	def parseStatementLabel(self):
		""" labelブロックの解析
		# labelブロック ← 'def' ラベル名 ブロック文
		"""
		name = self.lexer.token
		type = self.lookAhead()
		type = self.skipCrlf()
		if type != '{':
			self.fatal("Illigal grammar  'define label' need '{'")
		body = self.parseStatementBlock()
		return Label(name, body)
	def parseStatementFunction(self):
		""" funブロックの解析
		# funブロック ← 'fun' ファンクション名 ブロック文
		"""
		name = self.lexer.token
		type = self.lookAhead()
		type = self.skipCrlf()
		if type != '{':
			self.fatal("Illigal grammar 'define funtion' need '{'")
		body = self.parseStatementBlock()
		return Function(name, body)
	def parseStatementBlock(self):
		""" ブロック文の解析
		# ブロック文 ← '{' [文 '\n']* '}'
		"""
		stmtList = None
		type = self.lookAhead() # skip '{'
		while self.isNextStatementBlock():
			stmt = self.parseStatement()
			if self.lexer.lexReader.isEof():
				self.fatal("Illigal grammar file EOF before '\\n' or '}' ")
			
			if self.getTokenType() == TokenType.CRLF:
				type = self.lookAhead()
			elif self.getTokenType() == '}':
				type = self.getTokenType()
			else:
				self.fatal("Illigal grammar need '\\n' or '}' ->TokenType '%s'"%self.getTokenType())
			if stmtList is None:
				stmtList = []
			stmtList.append(stmt)
		self.lookAhead() # skip '}'
		return Block(stmtList)
	def isNextStatementBlock(self):
		""" 次もブロック文かどうか """
		type = self.getTokenType()
		if type == '}':
			return False
		if type is None:
			return False
		return True
	def isExpression(self):
		""" 式かどうか """
		type = self.getTokenType()
		return (type == TokenType.EQ
			or type == TokenType.NE
			or type == '<'
			or type == TokenType.LE
			or type == '>'
			or type == TokenType.GE)
	def parseExpression(self):
		"""
		# 式 ← 単純式 [('==' | '!=' | '<' | '<=' | '>' | '>=') 単純式]*
		"""
		node = self.parseSimpleExpression()
		if self.isExpression():
			node = self.createExpression(node)
		return node
	def createExpression(self, nodeL):
		""" 式の構文木の生成 """
		result = None
		while self.isExpression():
			op = self.getTokenType()
			self.lookAhead()
			nodeR = self.parseSimpleExpression()
			if result is None:
				result = BinExpr(op, nodeL, nodeR)
			else:
				result = BinExpr(op, result, nodeR)
		return result
	def isSimpleExpression(self):
		""" 単純式かどうか """
		type = self.getTokenType()
		return type == '+' or type == '-' or type == TokenType.OR
	def parseSimpleExpression(self):
		"""
		# 式 ← 項 [('+' | '-' | '||') 項]*
		"""
		node = self.parseTerm()
		if self.isSimpleExpression():
			node = self.createSimpleExpression(node)
		return node
	def createSimpleExpression(self, nodeL):
		""" 単純式の構文木の生成 """
		result = None
		while self.isSimpleExpression():
			op = self.getTokenType()
			self.lookAhead()
			nodeR = self.parseTerm()
			if result is None:
				result = BinExpr(op, nodeL, nodeR)
			else:
				result = BinExpr(op, result, nodeR)
		return result
	def isTerm(self):
		""" 項かどうか """
		type = self.getTokenType()
		return type == '*' or type == '/' or type == TokenType.AND
	def parseTerm(self):
		""" 項
		# 項 ← 因子 [('*' | '/' | '&&') 因子]*
		"""
		node = self.parseFactor()
		if self.isTerm():
			node = self.createTerm(node)
		return node
	def createTerm(self, nodeL):
		""" 項の構文木の生成 """
		result = None
		while self.isTerm():
			op = self.getTokenType()
			self.lookAhead()
			nodeR = self.parseFactor()
			if result is None:
				result = BinExpr(op, nodeL, nodeR)
			else:
				result = BinExpr(op, result, nodeR)
		return result
	def parseFactor(self):
		""" 因子の解析
		# 因子 ← 数値
		#      ｜ 色（#ffffff）
		#      ｜ 真
		#      ｜ 偽
		#      ｜ '-' 因子
		#      ｜ '!' 因子
		#      ｜ '(' 式 ')'
		#      ｜ 変数
		#      ｜ 変数 '=' 式
		#      ｜ メッセージ表示
		#      ｜ goto ラベル
		#      ｜ 関数呼び出し
		"""
		type = self.getTokenType()
		if type == TokenType.INT:
			node = Integer(self.lexer.token)
			self.lookAhead() # skip INT
		elif type == TokenType.COLOR:
			node = Color(self.lexer.token)
			self.lookAhead() # skip COLOR
		elif type == TokenType.TRUE:
			node = Bool(True)
			self.lookAhead() # skip 'true'
		elif type == TokenType.FALSE:
			node = Bool(False)
			self.lookAhead() # skip 'false'
		elif type == '!':
			self.lookAhead() # skip '!'
			node = Not(self.parseFactor())
		elif type == '-':
			self.lookAhead() # skip '-'
			node = Minus(self.parseFactor())
		elif type == '(':
			self.lookAhead() # skip '('
			node = self.parseExpression()
			if self.getTokenType() != ')':
				self.fatal("Illigal grammar not pair ')'")
			self.lookAhead() # skip ')'
		elif type == TokenType.VAR:
			sym = self.parseVar()
			type = self.lookAhead()
			if type == '=':
				self.lookAhead() # skip '='
				node = Assign('=', sym, self.parseExpression())
			elif type == TokenType.IADD:
				self.lookAhead() # skip '+='
				node = Assign(TokenType.IADD, sym, self.parseExpression())
			elif type == TokenType.ISUB:
				self.lookAhead() # skip '-='
				node = Assign(TokenType.ISUB, sym, self.parseExpression())
			elif type == TokenType.IMUL:
				self.lookAhead() # skip '*='
				node = Assign(TokenType.IMUL, sym, self.parseExpression())
			elif type == TokenType.IDIV:
				self.lookAhead() # skip '/='
				node = Assign(TokenType.IDIV, sym, self.parseExpression())
			else:
				node = sym
		elif type == TokenType.QUOTES:
			node = self.parseMessage()
			self.lookAhead()
		elif type == TokenType.GOTO:
			node = Goto(self.lexer.token)
			self.lookAhead()
		elif type == TokenType.SYMBOL:
			token = self.lexer.token
			type = self.lookAhead()
			if type == '(':
				# 関数呼び出しの解析
				node = self.parseMethodCall(token)
			elif self.hasDefines(token):
				# 定数の解析
				node = Define(self.getDefines(token))
			else:
				self.fatal("Illigal grammar define symbol :TokenType '%s'"%type)
#			self.lookAhead()
		elif type == TokenType.CRLF:
			# 空行
			node = None
		elif type == '}':
			# ブロックの終了
			node = None
		else:
			self.fatal("Illigal TokenType '%s'"%type)
		return node
	def parseMessage(self):
		""" メッセージの解析 """
		token = self.lexer.token.strip('"')
		suffix = token[len(token)-1]
		if suffix == "\\":
			# 改行エスケープ
			message = token[:len(token)-1]
			while True:
				type = self.lookAhead()
				if type != TokenType.CRLF:
					self.fatal("Message in Illigal TokenType '%s'"%type)
				type = self.lookAhead()
				if type != TokenType.QUOTES:
					self.fatal("Message in Illigal TokenType '%s'"%type)
				token = self.lexer.token.strip('"')
				suffix = token[len(token)-1]
				if suffix == "\\":
					message += token[:len(token)-1]
				else:
					message += token
					break
		else:
			message = token
		return Message(message)
	def parseVar(self):
		""" 変数の解析 """
		no = self.lexer.token.lstrip("$")
		return Var(no)
	def parseMethodCall(self, token):
		""" 関数呼び出しの解析 """
		self.lookAhead() # skip '('
		args = self.parseArgs()
		if self.getTokenType() != ')':
			self.fatal("Illigal TokenType '%s'"%self.getTokenType())
		self.lookAhead()
		
		if self.hasGlobals(token):
			result = self.getGlobals(token)
			result.initialize(args)
		else:
			self.fatal("Not define symbol '%s'"%token)
		return result
	def parseArgs(self):
		""" 引数リストの解析
		# 引数の並び ← [ 式 [',' 式]* ]
		"""
		result = []
		if self.getTokenType() != ')':
			result.append(self.parseExpression())
			type = self.getTokenType()
			while type != ')':
				if type != ',':
					self.fatal("Illigal args statement '%s'"%type)
				type = self.lookAhead() # skip ','
				result.append(self.parseExpression())
				type = self.getTokenType()
		return result
