module LexPieces where
-- En teoria solo exporta lexer y el data Token
import Utils (spanL, isSpace, isUpper, isAlpha, isDigit)

data Token
     = TokenLet
     | TokenLetrec 
     | TokenIn 
     | TokenCase 
     | TokenOf
     | TokenIf 
     | TokenThen 
     | TokenElse
     | TokenInt Int 
     | TokenVar String
     | TokenStr String
     | TokenConstr String
     | TokenDefault
     | TokenEq 
     | TokenLamb
     | TokenWhere

     | TokenPlus 
     | TokenMinus
     | TokenTimes 
     | TokenDiv
     | TokenMod

     | TokenGr 
     | TokenGrEq 
     | TokenLt 
     | TokenLtEq 
     | TokenEqR 
     | TokenNotEqR
     | TokenAndU
     | TokenAnd
     | TokenOr
     
     | TokenTP 
     | TokenTPP String
     | TokenElemI
     | TokenConcat

     | TokenPlusPrim
     | TokenMinusPrim
     | TokenTimesPrim
     | TokenDivPrim
     | TokenModPrim

     | TokenGrPrim 
     | TokenGrEqPrim 
     | TokenLtPrim 
     | TokenLtEqPrim 
     | TokenEqRPrim 
     | TokenNotEqRPrim
     | TokenAnddPrim
     | TokenAndPrim
     | TokenOrrPrim
     | TokenOrPrim

     | TokenDash
     | TokenOB 
     | TokenCB 
     | TokenOC 
     | TokenCC 
     | TokenOL 
     | TokenCL 
     | TokenUp 
     | TokenNonUp
     | TokenPC 
     | TokenC 
     | TokenP 
     | TokenArr
     | TokenTilde 
     | TokenDolar 
     | TokenAt 
     | TokenSub 

     | TokenCommentLn  String
     | TokenComment    String
     | TokenModule     String
     | TokenImport     String
     | TokenData       String
     | TokenType       String
     | TokenNewtype    String
     | TokenClass      String
     | TokenInstance   String
     | TokenTypeAnn    String
      deriving Show

lexer :: String -> [Token]
lexer [] = []
lexer (c:cs) 
        | isSpace c = lexer cs
        | isAlpha c = lexVarCons (c:cs)
        | isDigit c = lexNum (c:cs)
lexer ('=':cs) 
        | '=' == head cs && '#' == head (tail cs)= TokenEqRPrim : lexer (tail (tail cs))
        | '=' == head cs && '#' /= head (tail cs)= TokenEqR : lexer (tail cs)
        | otherwise = TokenEq : lexer cs
lexer ('+':cs)
        | '+' == head cs = TokenConcat : lexer (tail cs)
        | otherwise = TokenPlus : lexer cs     
lexer ('-':cs)
        | '#' == head cs = TokenMinusPrim : lexer (tail cs)
        | '>' == head cs = TokenArr : lexer (tail cs)
--        | '-' == head cs = lexer (tail (dropWhile ('\n'/=) cs))  --Comentario
        | '-' == head cs = let (msg, cs')= span ('\n'/=) (tail cs)
                           in TokenCommentLn ("-- "++msg++"\n") : lexer cs'   -- Comentario
        | otherwise = TokenMinus : lexer cs   
lexer ('*':cs)
        | '#' == head cs = TokenTimesPrim : lexer (tail cs)
        | otherwise      = TokenTimes : lexer cs
lexer ('/':cs)
        | '#' == head cs = TokenDivPrim : lexer (tail cs)
        | '=' == head cs && '#' == head (tail cs)= TokenNotEqRPrim : lexer (tail (tail cs))
        | '=' == head cs && '#' /= head (tail cs)= TokenNotEqR : lexer (tail cs)
        | otherwise      = TokenDiv : lexer cs
lexer ('%':cs)
        | '#' == head cs = TokenModPrim : lexer (tail cs)
        | otherwise      = TokenMod : lexer cs
lexer ('>':cs)
        | '#' == head cs = TokenGrPrim : lexer (tail cs)
        | '=' == head cs && '#' == head (tail cs)= TokenGrEqPrim : lexer (tail (tail cs))
        | '=' == head cs && '#' /= head (tail cs)= TokenGrEq : lexer (tail cs)
        | otherwise      = TokenGr : lexer cs
lexer ('<':cs)
        | '#' == head cs = TokenLtPrim : lexer (tail cs)
        | '=' == head cs && '#' == head (tail cs)= TokenLtEqPrim : lexer (tail (tail cs))
        | '=' == head cs && '#' /= head (tail cs)= TokenLtEq : lexer (tail cs)
        | otherwise      = TokenLt : lexer cs
lexer ('&':cs)
        | '#' == head cs = TokenAndPrim : lexer (tail cs)
        | '&' == head cs && '#' == head (tail cs)= TokenAnddPrim : lexer (tail (tail cs))
        | '&' == head cs && '#' /= head (tail cs)= TokenAnd : lexer (tail cs)
        | otherwise      = TokenAndU : lexer cs
lexer ('|':cs)
        | '#' == head cs = TokenOrPrim : lexer (tail cs)
        | '|' == head cs && '#' == head (tail cs)= TokenOrrPrim : lexer (tail (tail cs))
        | '|' == head cs && '#' /= head (tail cs)= TokenOr : lexer (tail cs)
        | otherwise      = TokenDash : lexer cs
lexer ('(':cs) = TokenOB : lexer cs
lexer (')':cs) = TokenCB : lexer cs
lexer ('[':cs) = TokenOC : lexer cs
lexer (']':cs) = TokenCC : lexer cs
lexer ('{':cs)
        | head cs /= '-' = TokenOL : lexer cs
        | otherwise = let (msg, cs')= spanL (\x->take 2 x /= "-}") (tail cs)  -- Comentario
                      in TokenComment ("{-"++msg++"-}\n") : lexer (drop 2 cs') -- borra el "-}"
--         | otherwise = lexer (drop 2 (dropLWhile (\x->take 2 x /= "-}") (tail cs)))--Comentario
lexer ('}':cs) = TokenCL : lexer cs
lexer ('\\':cs)
        | 'u' == head cs = TokenUp : lexer (tail cs)
        | 'n' == head cs = TokenNonUp : lexer (tail cs)
        | otherwise = TokenLamb : lexer cs
lexer (';':cs) = TokenPC : lexer cs
lexer (',':cs) = TokenC : lexer cs
lexer (':':cs)
        | head cs /= ':' = TokenTP : lexer cs
        | otherwise = let (msg, cs')= span ('\n'/=) (tail cs)
                      in TokenTPP msg : lexer cs' -- Tipo de datos
lexer ('.':cs) = TokenP : lexer cs
lexer ('@':cs) = TokenAt : lexer cs
lexer ('`':cs) = TokenTilde : lexer cs
lexer ('_':cs) = TokenSub : lexer cs
lexer ('$':cs) = TokenDolar : lexer cs
lexer ('!':'!':cs) = TokenElemI : lexer cs
lexer ('"':cs) = let (str, cs') = break ('"'==) cs
                 in TokenStr str : lexer (tail cs')
lexer xs = error $ "ERROR lexer: "++xs  

lexNum cs = TokenInt (read num) : lexer rest
     where (num,rest) = span isDigit cs
--        if (head rest == '#') then TokenInt (read num) : lexer (tail rest)
--        else TokenConstr "Int" : TokenInt (read num) : lexer rest
  
lexVarCons cs =
        case span (\c->isAlpha c || isDigit c || c == '_' || c == '\'') cs of
                ("let",rest)       -> TokenLet : lexer rest
                ("letrec",rest)    -> TokenLetrec :lexer rest
                ("in",rest)        -> TokenIn : lexer rest 
                ("case",rest)      -> TokenCase :lexer rest
                ("of",rest)        -> TokenOf : lexer rest
                ("if",rest)        -> TokenIf :lexer rest
                ("then",rest)      -> TokenThen : lexer rest 
                ("else",rest)      -> TokenElse : lexer rest
                ("default",rest)   -> TokenDefault : lexer rest
                ("otherwise",rest) -> TokenDefault : lexer rest
                ("where",rest)     -> TokenWhere : lexer rest
                (str@"module",rest)    -> let (str1, rest1) = spanL (\xs -> "where"/= take 5 xs) rest
                                          in TokenModule (str++str1++" where\n") : (tail (lexer rest1))
                (str@"import",rest)    -> let (str1, rest1) = break (==';') rest
                                          in TokenImport (str++str1++"\n") : lexer rest1
                (str@"data",rest)      -> let (str1, rest1) = break (==';') rest
                                          in TokenData (str++str1++"\n") : lexer rest1
                (str@"type",rest)      -> let (str1, rest1) = break (==';') rest
                                          in TokenType (str++str1++"\n") : lexer rest1
                (str@"newtype",rest)   -> let (str1, rest1) = break (==';') rest
                                          in TokenNewtype (str++str1++"\n") : lexer rest1
                (str@"class",rest)     -> let (str1, rest1) = break (==';') rest
                                          in TokenClass (str++str1++"\n") : lexer rest1
                (str@"instance",rest)  -> let (str1, rest1) = break (==';') rest
                                          in TokenInstance (str++str1++"\n") : lexer rest1
                (str@"local",rest)     -> let (str1, rest1) = break (==';') rest
                                          in TokenComment (str1++"\n") : lexer rest1
                (var,rest)         ->
                      if isUpper (head var) then TokenConstr var : lexer rest
                                            else case lexer rest of
                                                   TokenTPP msg : lexs ->  TokenTypeAnn (var++" :: "++msg++"\n") : lexs
                                                   otherwise           ->  TokenVar var : lexer rest
