#  This file is part of codeorge
#  Copyright (C) 2009 Johan Jordaan (www.johanjordaan.co.za)
#
#  This program is free software: you can redistribute it and/or modify
#  it under the terms of the GNU General Public License as published by
#  the Free Software Foundation, either version 3 of the License, or
#  (at your option) any later version.
#
#  This program is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License
#  along with this program.  If not, see <http://www.gnu.org/licenses/>.

from tokeniser import Token
import re
import string

class SyntaxTree:
  def __init__(self,root):
    self.root = root
    
class SyntaxNode:
  def __init__(self,token=None,parent=None,children=[]):
    self.token = token
    self.parent = parent
    self.children = children
    for child in self.children:
      child.parent = self

    
def apply_syntax_rule(syntax_nodes,syntax_rule):
  '''
  '''

  pos = 0
  while pos<len(syntax_nodes):
    syntax_str = string.join([syntax_node.token.name for syntax_node in syntax_nodes[pos:]],' ')
    print syntax_str
    match = syntax_rule[1].match(syntax_str)
    if match != None:
      offset = len(match.group().split())
      print pos,offset,[n.token.name for n in syntax_nodes[pos:pos+offset]]
      syntax_nodes[pos:pos+offset] = [SyntaxNode(token=Token(syntax_rule[0],''),children=syntax_nodes[pos:pos+offset])]
      pos = 0
    else:
      pos += 1
    raw_input()  
    
  return syntax_nodes 
    
    
def reduce(syntax_nodes,syntax):
  '''
  '''

  # Apply the syntax rules in order
  #
  for syntax_rule in syntax:
    syntax_nodes = apply_syntax_rule(syntax_nodes,syntax_rule)
  
  return syntax_nodes
    
def parse_syntax(tokens,syntax):
  '''
  tokens : A list of Tokens created by the tokeniser.
  syntax : A syntax. In the form of [ ['REDUCTION','REDUCTION REGULAR EXPRESSION'],...]
  '''

  # Create SyntaxNode for each Token from the input
  #
  syntax_nodes = [SyntaxNode(token=token) for token in tokens]

  # Compile all the syntax expressions and save the comipled regular 
  # expressions. Adding the _number to the end of the regular expression
  # since we willbe adding _numbers in the next step to identify which
  # token the string refers to
  #
  for syntax_item in syntax:
    syntax_item.insert(1,re.compile(syntax_item[1]))

  
  # Continue reducing the syntax nodes until only one remains
  # or a syntax error is encountered.
  # TODO : What about grammars that that doesn't reduce to one 
  #        node?
  #
  while len(syntax_nodes)>1:  
    syntax_nodes = reduce(syntax_nodes,syntax)

  # Return a SyntaxTree with the remaining node as the root node
  #
  ret_val = SyntaxTree(syntax_nodes[0])
  return ret_val
      
    
    



'''
syntax = [  ['EXPRESSION',re.compile('NUMBER OPERATOR NUMBER'),adder]
          , ['EXPRESSION',re.compile('EXPRESSION OPERATOR NUMBER')]
          , ['EXPRESSION',re.compile('EXPRESSION OPERATOR EXPRESSION')]
          , ['EXPRESSION',re.compile('NUMBER')]
          , ['EXPRESSION',re.compile('OPEN_BRACKET EXPRESSION CLOSE_BRACKET'),lambda a,b,c: b ]
          , ['ROOT',re.compile('^EXPRESSION$')]
         ]  


        
def _reduce(tokens,syntax):
  tokens_str = string.join([token.name for token in tokens],' ')

  for rule in syntax:
    reduction = rule[0]
    re = rule[1]
    function = None
    if len(rule)>2:
      function = rule[2]

    res = re.match(tokens_str)
    if res != None:
      m = res.group(0)
      c = len(m.split())
      children = tokens[:c]
      newToken = Token(reduction)
      newToken.function = function
      newToken.children = children  
      for child in children:
        child.parent = newToken
      tokens = tokens[c:]
      tokens.insert(0,newToken)
      return tokens

  if len(tokens)>1:    
    return [tokens[0]] + _reduce(tokens[1:],syntax)    
  else:
    return [tokens[0]]

          
  
def reduce(tokens,syntax):
  while len(tokens)>1:
    print string.join([token.name for token in tokens],' ')
    raw_input('')
    tokens = _reduce(tokens,syntax)
    print '-'*40
  print tokens[0].name
  print '-'*40
  return tokens[0]  

#----------------------------------------------------------------
'''
