#  This file is part of codeogre
#  Copyright (C) 2009 Johan Jordaan (www.johanjordaan.co.za)
#
#  This program is free software: you can redistribute it and/or modify
#  it under the terms of the GNU General Public License as published by
#  the Free Software Foundation, either version 3 of the License, or
#  (at your option) any later version.
#
#  This program is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License
#  along with this program.  If not, see <http://www.gnu.org/licenses/>.

import re
import string

class Token:
  def __init__(self,name,text=''):
    self.name = name
    self.text = text
 
class TokenDefinition:
  def __init__(self,name,regular_expression_str,functions):
    self.name = name
    self.regular_expression_str = regular_expression_str
    self.regular_expression = re.compile(self.regular_expression_str,re.DOTALL)
    self.functions = functions

    
class Tokeniser:
  def __init__(self,token_definitions):
    self.token_definitions = token_definitions

  def default_handler(self):
    self.ret_val.append(Token(name=self.current_token_definition.name,text=self.current_match))
    self.current_match = ''
    return False
    
  def ignore_handler(self):
    self.current_match = ''
    return False
  
  def tokenise(self,input):
    self.ret_val = []
    self.current_terminal = ''
    self.input = input
    self.current_match = ''
    self.input_index = 0
    
    # Continue tokenizing as long as we have inut to consume
    #
    while self.input_index<len(self.input):
      tokenised = False
      # Try and match each token_definition at the satrt of input
      #
      for token_definition in self.token_definitions:
        self.current_token_definition = token_definition
        match = self.current_token_definition.regular_expression.match(self.input[self.input_index:])
        # If a match is found we run the token_definition functions
        #
        if match != None:
          
          self.current_match += match.group()
          
          # If there is residual terminal data then handle them now
          #
          if self.current_terminal != '':
            self.ret_val.append(Token(name=self.current_terminal,text=self.current_terminal))
            self.current_terminal = ''
            
          # Advance the input by the match length
          #
          self.input_index += len(match.group())

          # Now handle the actual match
          #
          continue_bubbling = True
          for function in self.current_token_definition.functions:  
            if continue_bubbling:
              continue_bubbling = function(self)
          if continue_bubbling:
            if self.current_token_definition.name != None:
              self.default_handler()
            else:
              self.ignore_handler()
            
          # Finish the match by signalling that we tokenised and breaking the 
          # loop since we don't need to continue
          #
          tokenised = True
          break;
          
      # If the input stream didn't match any of the token definitions then then a 
      # "terminal"/"constant" token is returned
      #
      if not tokenised:
        self.current_terminal += self.input[self.input_index]
        self.input_index += 1   

    # Clean up any terminals that might have been left behind
    #
    if self.current_terminal != '':
      self.ret_val.append(Token(name=self.current_terminal,text=self.current_terminal))
      self.current_terminal = ''
  
    return self.ret_val

    

def tokenise(input,token_definition_array):
  token_definitions = []  
  for token_definition in token_definition_array:
    token_definitions.append(TokenDefinition(token_definition[0],token_definition[1],[] if len(token_definition)<3 else [token_definition[2]]))
  t = Tokeniser(token_definitions)
  return t.tokenise(input)
    
    
 
def tokenise_old(input,token_definitions):
  '''
  '''
  
  ret_val = []  
  terminal = ''
  
  # Compile the regular expression and insert them in the token definition array
  #
  for token_def in token_definitions:
    token_def.insert(1,re.compile(token_def[1],re.DOTALL))

  while len(input)>0:
    tokenised = False
    # Try and match each token to the input stream
    #
    for token_def in token_definitions:
      match = token_def[1].match(input)
      if match != None:
        # If a match is found then the create a token class for the token and add it to the return array
        # unless if the token name is None then we ignore the token
        if terminal != '':
          ret_val.append(Token(name=terminal,text=terminal))
          terminal = ''
          # If there is a function attached to the token definition then execute it now
          #
          if len(token_def) > 3:
            token_def[3](ret_val[-1])
        if token_def[0] != None:
          # If the previous token is open then don't create new token but append the current
          # text to the existing token
          if len(ret_val) == 0 or not ret_val[-1].open:
            ret_val.append(Token(name=token_def[0],text=match.group()))
          else:
            ret_val[-1].text += match.group()
          
          # Advance the input
          #
          input = input[len(match.group())-1:]
          
          # If there is a function attached to the token definition then execute it now
          #
          if len(token_def) > 3:
            token_def[3](ret_val[-1])
            
        tokenised = True
        break;
    # If the input stream didn't match any of the token definitions then then a 
    # "terminal"/"constant" token is returned
    #
    if not tokenised:
      terminal += input[0]

    # Advance the input stream
    #  
    input = input[1:]    


  # Clean up any terminals that might have been left behind
  #
  if terminal != '':
    ret_val.append(Token(name=terminal,text=terminal))
    terminal = ''
  
  return ret_val
      
  
  