'''
    Copyright (C) 2007  Brian Will

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.

Use http://code.google.com/p/pigeon-programming-language/issues/list for contacting the author.
'''

'''
    Externally used: parseFile()

    Call paresFile() with the name of a file to parse and then it will return the syntax 
    tree, which is a hierarchy of lists, each representing an s-expression. Each s-expression
    list consists of tokens and sub-s-expressions. (Whitespace and parentheses tokens are not included.) 
'''

import re

from pigeonKeywords import *
TT = TokenTypes

NUM_SPACES_PER_TAB = 6

class Token (object):
    def __init__(self, text, lineNum, tokenType):
        self.text = text
        self.lineNum = lineNum
        self.tokenType = tokenType
        
class TokenLine (object):
    def __init__(self, tokens, leadingWhitespace, lineNum):
        self.tokens = tokens
        indentationScore = 0
        for ch in leadingWhitespace:
            if ch == ' ': indentationScore += 1
            if ch == '\t': indentationScore += NUM_SPACES_PER_TAB
        self.indentation = indentationScore
        self.lineNum = lineNum

TOKEN_REGEXES = {             
    TT.WHITESPACE: re.compile(r'\s+'),
    TT.COMMENT: re.compile(r'#.*'),
    TT.ALPHANUMERIC: re.compile(r'([a-zA-Z]\w*|=)'),
    TT.STRING_LITERAL: re.compile(r'"(\\"|[^"])*"'),
    TT.NUMBER_LITERAL: re.compile(r'-?\d+(\.\d+)?'),
    TT.START_PAREN: re.compile(r'\('),
    TT.END_PAREN: re.compile(r'\)')
}

def buildTree(tokens):

    def buildSExpression(tokens, idx):
        '''
            idx is place in tokens where the sExpression to build starts
        
            returns (sExpression, idx), where idx is one past the closing parenthesis of the sExpression
        '''

        assert tokens[idx].tokenType == TT.START_PAREN, 'Unbalanced parentheses somewhere in or before line ' + tokens[idx].lineNum

        sExpression = []
        idx += 1
        while idx < len(tokens):
            token = tokens[idx]
            tokenType = token.tokenType
            if tokenType == TT.END_PAREN:  # end of the s-expression
                return (sExpression, idx + 1)
            elif tokenType == TT.START_PAREN:  # sub expression
                subExpression, idx = buildSExpression(tokens, idx)
                sExpression.append(subExpression)
            else:  # other type
                sExpression.append(token)
                idx += 1

    topLevel = []
    idx = 0
    while idx < len(tokens):
        sExpression, idx = buildSExpression(tokens, idx)
        topLevel.append(sExpression)

    assert idx == len(tokens)

    return topLevel

def categorizeAlphanumericTokens(tokens):
    for token in tokens:
        if token.tokenType == TT.ALPHANUMERIC:
            text = token.text
            if text in VALUE_LITERALS:
                token.tokenType = TT.VALUE_LITERAL
                token.text= {'false': 'False', 'true': 'True', 'null': 'None'}[token.text]
            elif text in STATEMENT_KEYWORDS:
                token.tokenType = TT.STATEMENT_KEYWORD
            elif text in RESERVED_WORDS and text not in PIGEON_KEYWORDS:
                raise '"' + token.text + '" in line ' + str(token.lineNum) + ' is illegal because it is a reserved word.'
            else:
                token.tokenType = TT.IDENTIFIER

def parenthesizeByIndentation(tokenLines):
    
    
    indLevels = []
    for line in tokenLines:
        indScore = line.indentation
        if indScore % NUM_SPACES_PER_TAB != 0: raise 'Improper indentation of line ' + line.lineNum
        indLevels.append(indScore / NUM_SPACES_PER_TAB)
    indLevels.append(0) # for logic below
        
    i = 0
    while i < len(tokenLines):
        line = tokenLines[i]
        tokens = line.tokens
        
        numParensToAppend = 0        
        # prepend '(' to any line not starting with '('
        if tokens[0].tokenType != TT.START_PAREN:
            tokens[0:0] = [ Token('(', line.lineNum, TT.START_PAREN) ]
            numParensToAppend = 1            
        
        indLevel = indLevels[i]
        nextIndLevel = indLevels[i + 1]
        if indLevel == nextIndLevel and tokens[-1].tokenType != TT.END_PAREN:
            numParensToAppend = 1
        elif indLevel < nextIndLevel:
            numParensToAppend = 0
        else:
            numParensToAppend += indLevel - nextIndLevel
            
        for j in range(numParensToAppend):
            tokens.append( Token(')', line.lineNum, TT.END_PAREN) )
                
        i += 1

def parseFile(filename):
    file = open(filename, 'r')
    lines = file.readlines()
    file.close()
    
    tokenLines = tokenize(lines)
    parenthesizeByIndentation(tokenLines)
    tokens = tokenLinesToTokens(tokenLines)
    categorizeAlphanumericTokens(tokens)
    verifyWhitespaceBetweenTokens(tokens)
    tokens = stripWhitespace(tokens)
    verifyParenthesesBalance(tokens)
    syntaxTree = buildTree(tokens)
    return syntaxTree

def stripWhitespace(tokens):
    return [token for token in tokens if token.tokenType != TT.WHITESPACE]

def tokenize(lines):
    tokensByLine = []
    lineNum = 1
    for line in lines:
        leadingWhitespace = line[:len(line) - len(line.lstrip())] 
        line = line.strip()        
        if len(line) > 0 and (TOKEN_REGEXES[TT.COMMENT].match(line, 0) == None):
            tokens = tokenizeLine(line, lineNum)
            tokensByLine.append( TokenLine(tokens, leadingWhitespace, lineNum) )
        lineNum += 1
    return tokensByLine

def tokenizeLine(line, lineNum):
    tokens = []
    i = 0
    while i < len(line):
        for type, regex in TOKEN_REGEXES.items():
            result = regex.match(line, i)
            if result != None:
                if type != TT.COMMENT:
                    token = Token(result.group(), lineNum, type)
                    tokens.append(token)
                i = result.end()
                break
        else:
            raise 'Bad token at character ' + str(i + 1) + ' in line ' + str(lineNum)
    if tokens[-1].tokenType == TT.WHITESPACE:
        tokens = tokens[:-1]
    return tokens

def tokenLinesToTokens(tokenLines):
    tokens = []
    for line in tokenLines:
        for t in line.tokens:
            tokens.append(t)
    return tokens

def verifyParenthesesBalance(tokens):
    '''
        raise error if parentheses are imbalanced, e.g. (()()) is balanced but ((()) is imbalanced
    '''

    count = 0
    for token in tokens:
        tokenType = token.tokenType
        if tokenType == TT.START_PAREN:
            count += 1
        elif tokenType == TT.END_PAREN:
            count -= 1
            assert count >= 0, 'Line ' + str(token.lineNum) + ': closing parenthesis creates imbalance of parentheses.'
    assert count == 0, 'End of file reached with parentheses left open: ' + str(count)

def verifyWhitespaceBetweenTokens(tokens):
    '''
        raise error if whitespace is missing between certain adjacent tokens, for instance, 
        between an alphanumeric token and a number_literal token
    '''
    
    for i in range(len(tokens)):
        
        token = tokens[i]
        tokenType = token.tokenType
        
        if i + 1 < len(tokens):
            nextType = tokens[i + 1].tokenType
        else:
            nextType = None    
        
        if (tokenType == TT.ALPHANUMERIC or tokenType == TT.STRING_LITERAL or tokenType == TT.NUMBER_LITERAL):
            assert nextType != TT.STRING_LITERAL, 'Line ' + str(token.lineNum) + ': missing whitespace.'
            assert nextType != TT.NUMBER_LITERAL, 'Line ' + str(token.lineNum) + ': missing whitespace.'
            assert nextType != TT.ALPHANUMERIC, 'Line ' + str(token.lineNum) + ': missing whitespace.'

if __name__ == '__main__':
    # test
    syntaxTree = parseFile('testPigeon.pig') # shouldn't see any exceptions if this goes well
