#!/usr/bin/env python

'''Tokenizes individual lines

TODO(encinarus):  Add comments and link to a state diagram hosted on code.google
'''

TEXT = 'TEXT'
COMMAND = 'COMMAND'
LITERAL = 'LITERAL'
STRING = 'STRING'
BLANK_LINE = 'BLANK_LINE'
SYMBOL = 'SYMBOL'

def add_token(type, characters, tokens):
  if len(characters) > 0:
    tokens.append((type, ''.join(characters)))

def has_error(tokens):
  for (token_type, value) in tokens:
    if token_type == PARSE_ERROR:
      return True
  
  return False

def tokenize(line):
  tokens = []
  current_token = []
  state = 0
  
  symbols = ['=', '<', '>', '-', ':', ',', '|']

  if not line.strip():
    return [(BLANK_LINE, '')]
  
  #Add the extra end line character to allow us to easily tell when to close
  #the tokens without cleanup logic after
  line = line + '\n'
  
  index = 0
  for ch in line:
    index += 1
    
    if state==0:
      #just starting
      if ch == '%':
        current_token.append(ch)
        state = 1
      else:
        tokens = [(TEXT, line)]
        break
      
    elif state==1:
      # We're parsing a command
      if ch.isspace():
        # This was a comment, we'll decide the rest of the line is text
        add_token(COMMAND, current_token, tokens)
        current_token = []
        state = 2
      else:
        # This is a normal command, we'll parse the rest
        current_token.append(ch)
        state = 3
        
    elif state==2:
      # Just adding the rest of the line to the token
      if ch == '\n':
        add_token(TEXT, current_token, tokens)
        current_token = []
      else:
        current_token.append(ch)
        
    elif state==3:
      # Parsing a command for real
      if ch.isspace():
        # finished!
        add_token(COMMAND, current_token, tokens)
        current_token = []
        state = 4
      else:
        current_token.append(ch)
        
    elif state==4:
      if ch.isspace():
        continue
      
      current_token.append(ch)
      if ch == 'r':
        #might be starting a string...
        state = 5
      elif ch == '"':
        #starting a double quoted string
        state = 6
      elif ch == '\'':
        # starting a single quoted string
        state = 7
      elif ch in symbols:
        # processing a symbol
        add_token(SYMBOL, current_token, tokens)
        current_token = []
        state = 8
      else:
        # processing a literal
        state = 9
        
    elif state==5:
      #maybe string
      
      if ch == '"':
        # We are starting a double quoted string
        current_token.append(ch)
        state = 6
      elif ch == '\'':
        # We are starting a single quoted string
        current_token.append(ch)
        state = 7
      elif ch.isspace():
        # The r was a literal
        add_token(LITERAL, current_token, tokens)
        current_token = []
        state = 4
      elif ch in symbols:
        # Symbol, need to push on two tokens
        add_token(LITERAL, current_token, tokens)
        add_token(SYMBOL, ch, tokens)
        current_token = []
        state = 8
      else:
        # We're Processing a literal after all
        current_token.append(ch)
        state = 9
    
    elif state==6:
      # Processing a doublely quoted string
      if ch == '\\':
        # need to escape the next character
        current_token.append(ch)
        state = 10
      elif ch == '"':
        # Ending the string!
        current_token.append(ch)
        add_token(STRING, current_token, tokens)
        current_token = []
        state = 4
      elif ch == '\n':
        # \n ends the line, also ending the string
        add_token(STRING, current_token, tokens)
        current_token = []
      else:
        # Keep processing the string
        current_token.append(ch)
        
    elif state == 7:
      # Processing a singlely quoted string
      if ch == '\\':
        # need to escape the next character
        current_token.append(ch)
        state = 11
      elif ch == '\'':
        # Ending the string!
        current_token.append(ch)
        add_token(STRING, current_token, tokens)
        current_token = []
        state = 4
      elif ch == '\n':
        # \n ends the line, also ending the string
        add_token(STRING, current_token, tokens)
        current_token = []
      else:
        # Keep processing the string
        current_token.append(ch)
      
    elif state == 8:
      # Odd state, we just processed a symbol...
      if ch.isspace():
        # back to the whitespace pump
        state = 4
      elif ch in symbols:
        # got another symbol
        add_token(SYMBOL, ch, tokens)
      elif ch == 'r':
        # might be starting a string
        current_token.append(ch)
        state = 5
      elif ch == '"':
        # starting another singlely quoted string
        current_token.append(ch)
        state = 6
      elif ch == '\'':
        current_token.append(ch)
        state = 7
      else:
        current_token.append(ch)
        state = 9
        
    elif state == 9:
      # Processing a literal!
      if ch.isspace():
        add_token(LITERAL, current_token, tokens)
        current_token = []
        state = 4
      elif ch in symbols:
        add_token(LITERAL, current_token, tokens)
        add_token(SYMBOL, ch, tokens)
        current_token = []
        state = 8
      else:
        current_token.append(ch)
    
    elif state == 10:
      #processing an escape character for a doublely quoted string
      current_token.append(ch)
      state = 6
      
    elif state == 11:
      #processing an escape character for a singly quoted string
      current_token.append(ch)
      state = 7

  return tokens

def main():
  print tokenize('this should all be text')
  print tokenize('this "too" should all be text')
  print tokenize(' %as should this')
  print tokenize('%but not this')
  print tokenize('% this should all be a command and text')
  print tokenize('%this should be a command with literals')
  print tokenize('%this "should" r"be" "a\'" \' command\' r\'with\' "only \\" " ')
  print tokenize('%this should=have<symbols>between!literals')
  print tokenize('%this "should"=r"have"<\'symbols\' > r\'between\' , "" |>= r"\\"strings"')
  print tokenize('%this should=r"have" a good Mix\t=of \'stuff in the string\'')
  print tokenize('%this ends with a symbol =')
  print tokenize('%this ends with a literal HI')
  print tokenize('%this ends with a string "doncha kno"')
  print tokenize('%this ends with a string "doncha kno')
  print tokenize('%this ends with a string \'doncha kno\'')
  print tokenize('%this ends with a string \'doncha kno')
  print tokenize('') # this should be a blank line
  print tokenize('  ') # this should be a blank line too
  
if __name__ == '__main__':
  main()