/*
 * $AIST_Release: 0.9.0 $
 * Copyright 2011 Information Technology Research Institute, National
 * Institute of Advanced Industrial Science and Technology
 * 
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * 
 *    http://www.apache.org/licenses/LICENSE-2.0
 * 
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package parser

import scala.util.parsing.combinator.lexical._
import scala.util.parsing.input.CharArrayReader.EofCh
import scala.collection.mutable.ArrayBuffer

object Lexer extends Lexical with Tokens {

    /**
     * Lexer matching full range of characters between 'from' and 'to'.
     */
    def ran(from: Char, to: Char): Parser[Elem] = elem(from+".."+to, (x:Elem) => (from <= x && x <= to))

    /**
     * Converts a string to lexer matching the stirng.
     */
    implicit def str(s: String): Parser[String] = {
        if (s.length == 0) 
            success("") 
        else 
            elem(s.head) ~ str(s.tail) ^^ { case f~r => f + r }
    }

    /**
     * Parser matching a token.
     */
    override def token: Parser[Token] = 
        ( ident 
        | floatLiteral
        | intLiteral
        | stringLiteral
        | charLiteral   
        | EofCh ^^^ EOF
        | '\'' ~> failure("unclosed charactor literal")
        | '\"' ~> failure("unclosed string literal")
        | delim 
        | failure("illegal character")
        )

    /**
     * Parser matching whitespace(it is ignored by parser).
     * This include comments.
     */
    override def whitespace: Parser[Any] = rep(
          whitespaceChar
        | '#' ~ comment
        ) ^^ { case _ => ' ' }

    override def whitespaceChar = elem(' ') | '\t' | '\n' | '\r'

    def comment: Parser[Any] = 
        ( lineTerm ^^ { case _ => ' ' }
        | chrExcept(EofCh) ~ comment
        )

    def lineTerm = elem('\n') | '\r' | "\r\n"

    def ident      = identStart ~ rep(identPart) ^^ { case first ~ rest => processIdent(first::rest) }
    def identStart = (elem('$') | ran('A', 'Z') | ran('a', 'z') | '_' | ran('\u0080', '\uffff'))
    def identPart  = identStart | digit

    // Float Literal
    def floatLiteral = (subNumber ~ '.' ~ subNumber) ^^ { case x~y~z => FloatLit(x+y+z)} |
                       (            '.' ~ subNumber) ^^ { case x~y   => FloatLit(x+y) }
    def subNumber    = rep1(decDigit) ^^ {_.mkString("")}

    // Integer Literal
    def intLiteral = hexLiteral | octLiteral | decLiteral 
    def hexLiteral = ('0'~>'x'~>rep(hexDigit))  ^^ { case x => HexLit(x.mkString("")) }
    def octLiteral = ('0'~>rep1(octDigit))      ^^ { case x => OctLit(x.mkString("")) }
    def decLiteral = (rep1(decDigit))           ^^ { case x => DecLit(x.mkString("")) }

    def octDigit = ran('0', '7')
    def hexDigit = ran('0', '9') | ran('a', 'f') | ran('A', 'F')
    def decDigit = ran('0', '9')

    // String Literal
    def stringLiteral   = ( "\"" ~> rep(charElem) <~ "\"" ) ^^ { case x => mkStringLit(x) }
    def charLiteral     = ( "'"  ~>     charElem  <~ "'"  ) ^^ { case x => mkCharLit(x) }
    def charElem        = chrExcept('\\', '\'', '\"', EofCh) | escapeSequence
    def escapeSequence  = escapeSequence1 | escapeSequence2
    def escapeSequence1 = '\\' ~> escapeChars.map { case (c, v) => elem(c) ^^^ v }.reduceLeft { (x, y) => x | y }
    def escapeSequence2 = '\\' ~> ( ran('0', '3')~octDigit~octDigit) ^^ { case x~y~z => Integer.parseInt(""+x+y+z, 8) }

    val escapeChars = Map('n' -> "\n", 't' -> "\t", 'b' -> "\b", 'r' -> "\r", 'f' -> "\f", '\\' -> "\\", '\'' -> "\'", '\"' -> "\"")

    val reserved = Set(
        "if", "else", "for", "while", "when", "some", "each", "all", 
        "table", "of", "emit", "map", "array", "proto", "static", 
        "break", "continue", "type", "function", "return")

    def processIdent(name: List[Char]): Token = {
    	val x = name.mkString("")
        if (reserved.contains(x)) Keyword(x) else Identifier(x)
    }

    def delim = delim_str.map(str).reduceLeft{_ ||| _} ^^ { Keyword(_) }
    // '|||' operator is longest match "or".

    val delim_str = List("==", "!=", "<=", ">=", "&&", "||", "<-", "->", 
                         "<", ">", "-", "*", "/", "%", "+", ".", ";", ":", 
                         "[", "]", "(", ")", "=", "{", "}", ",",
                         "+=", "-=", "*=", "/=", "%=", "--", "++")

    def mkStringLit(x: List[Any]) = {
      val sb  = new StringBuilder
      var buf = new ArrayBuffer[Byte]
      def decodeBuf = {
        if (buf.nonEmpty) {
          sb.append(new String(buf.toArray, "UTF-8"))
          buf.clear
        }
      }

      x.foreach {
        case x: Int  => 
          require(x >= 0 && x < 256)
          buf += x.asInstanceOf[Byte]

        case x: Char =>
          decodeBuf
          sb.append(x)

        case x: String => 
          decodeBuf
          sb.append(x)
      }
      decodeBuf
      StringLit(sb.toString)
    }

    def mkCharLit(x: Any) = {
      val sb  = new StringBuilder
      val b = new Array[Byte](1);

      x match {
        case x: Int  => 
          require(x >= 0 && x < 256)
          b(0) = x.asInstanceOf[Byte]

          CharLit(new String(b, "UTF-8"))
        case x: Char =>
          CharLit(x.toString)
      }
    }
}
