#!/bin/env python

from grammar   import *
from generator import *
from lexer     import Mapper
from reader    import *

cDigit   = SymbolType("cDigit")
cAlpha   = SymbolType("cAlpha")
cSpace   = SymbolType("cSpace")
cComma   = SymbolInstance("c,")
cColon   = SymbolInstance("c:")
cNewLine = SymbolInstance("cNew Line")
cOpen    = SymbolInstance("c(")
cClose   = SymbolInstance("c)")
cAlt     = SymbolInstance("c|")
cEq      = SymbolInstance("c=")
cGt      = SymbolInstance("c>")
cMinus   = SymbolInstance("c-")
cQuote   = SymbolInstance("c\"")
cBSlash  = SymbolInstance("c\\")
cN       = SymbolInstance("cn")
cOther   = SymbolType("cOther")
cEnd     = SymbolType("cEnd")

classes = {
    cDigit          : "0123456789",
    cAlpha          : "_abcdefghijklmopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ",
    cSpace          : " \t\r",
    cComma          : ",",
    cColon          : ":",
    cNewLine        : "\n",
    cOpen           : "(",
    cClose          : ")",
    cMinus          : "-",
    cAlt            : "|",
    cEq             : "=",
    cGt             : ">",
    cQuote          : "\"",
    cBSlash         : "\\",
    cN              : "n",
    SymbolType.tEnd : ""
}

tId      = SymbolType("[Id]")
tStr     = SymbolType("[String]")
tIs      = SymbolInstance("=>")
tComma   = SymbolInstance(",")
tAlt     = SymbolInstance("|")
tColon   = SymbolInstance(":")
tOpen    = SymbolInstance("(")
tClose   = SymbolInstance(")")
tNewLine = SymbolInstance("\\n")
tSpace   = SymbolType("[Space]")
tEnd     = SymbolType.tEnd
lElement = SymbolType("LexerElement")
tIdcont  = SymbolType("[Id-cont]")
tStrcont = SymbolType("[Str-cont]")

clId     = SymbolClass("clId", cAlpha, cN, cMinus)
clIdCont = SymbolClass("clIdCont", cAlpha, cN, cMinus, cDigit)
clEsc    = SymbolClass("clEsc", cQuote, cBSlash, cN)

lexerN = N(tId,tStr,tIs,tColon,tComma,tAlt,tOpen,tClose,tNewLine,tSpace,tEnd,lElement,tIdcont,tStrcont)
lexerT = T(cDigit,cAlpha,cSpace,cComma,cColon,cNewLine,cOpen,cClose,cAlt,cEq,cGt,cQuote,cEnd,cMinus,cBSlash,cN,tEnd)

clStr  = SymbolClass("clStr", cDigit,cAlpha,cSpace,cComma,cColon,cNewLine,cOpen,cClose,cAlt,cEq,cGt,cEnd,cMinus,cN)

lexerGrammar = Grammar(
    # lexerN, lexerT,
    P(
        Rule(lElement, [tId                      ], lambda value: tId.create(value)   ),
        Rule(lElement, [tStr                     ], lambda value: tStr.create(value)  ),
        Rule(lElement, [tIs                      ], lambda value: tIs.create(value)   ),
        Rule(lElement, [tColon                   ], lambda value: tColon.create(value)),
        Rule(lElement, [tComma                   ], lambda value: tColon.create(value)),
        Rule(lElement, [tNewLine                 ], lambda value: tNewLine.create(value)),
        Rule(lElement, [tOpen                    ], lambda value: tOpen.create(value) ),
        Rule(lElement, [tClose                   ], lambda value: tClose.create(value)),
        Rule(lElement, [tSpace                   ], lambda value: tSpace.create(value)),
        Rule(lElement, [                         ], lambda value: tEnd.create(value)  ),
        Rule(tId,      [clId    , tIdcont        ]),
        Rule(tIdcont,  [                         ], lambda value: tId.create(value)   ),
        Rule(tIdcont,  [clIdCont, tIdcont        ]),
        Rule(tStr   ,  [cQuote, tStrcont         ]),
        Rule(tStrcont, [cQuote                   ], lambda value: tStr.create(value)  ),
        Rule(tStrcont, [cBSlash, clEsc, tStrcont ]),
        Rule(tStrcont, [clStr, tStrcont          ]),
        Rule(tIs,      [cEq, cGt                 ], lambda value: tIs.create(value)   ),
        Rule(tComma,   [cComma                   ], lambda value: tComma.create(value)),
        Rule(tColon,   [cColon                   ]),
        Rule(tNewLine, [cNewLine                 ]),
        Rule(tOpen,    [cOpen                    ]),
        Rule(tClose,   [cClose                   ]),
        Rule(tAlt,     [cAlt                     ]),
        Rule(tSpace,   [cSpace                   ], lambda value: tSpace.create(value)),
        Rule(tSpace,   [cBSlash, cNewLine        ], lambda value: tSpace.create(value)),
        Rule(tSpace,   [cSpace , tSpace          ]),
        Rule(tSpace,   [cBSlash, cNewLine, tSpace])
    ),
    lElement
)

nDefinition = SymbolType("<Definition>")
nGrammar    = SymbolType("<Grammar>")
nRule       = SymbolType("<Rule>")
nRight      = SymbolType("<Right>")
nAtom       = SymbolType("<Atom>")

parserGrammar = Grammar(
    # N(nDefinition, nGrammar, nRule, nRight, nAtom),
    # T(tEnd, tId, tIs, tComma, tNewLine, tStr),
    P(
        Rule(nDefinition, [nGrammar                 ], lambda context, g: g.value),
        Rule(nGrammar   , [nRule                    ], lambda context, r: r.value),
        Rule(nGrammar   , [nRule, tNewLine, nGrammar], lambda context, r, n, g: r.value),
        Rule(nRule      , [                         ], lambda context: None),
        Rule(nRule      , [tId, tIs, nRight         ], lambda context, l, i, r: l.value),
        Rule(nRight     , [nAtom                    ], lambda context, s: s.value),
        Rule(nRight     , [nAtom, tComma, nRight    ], lambda context, s, c, r: s.value),
        Rule(nAtom      , [tId                      ], lambda context, s: s.value),
        Rule(nAtom      , [tStr                     ], lambda context, s: s.value)
    ),
    nDefinition,
    I(tSpace)
)

lexerGenerator = LexerGenerator(lexerGrammar)
parserGenerator = ParserGenerator(parserGrammar)
mapper = Mapper(classes, cOther)
# print "\n\n",  repr(lexerGenerator.states)
# print "\n\n",  repr(parserGenerator.states)

print parserGenerator.get(lexerGenerator.get(mapper)).parse(String('a => b , "c"'), None)
