package fst.parsing

trait LexicalCore extends Parsers {
  type Elem = Char
  type Token
  
  def errorToken(msg: String): Token
  
  def token: Parser[Token]
  
  def whitespace: Parser[Any]
}


trait StdTokens {
  abstract class Token {
    def chars: String
  }
  
  /** A class of error tokens. Error tokens are used to communicate
   *  errors detected during lexical analysis 
   */
  case class ErrorToken(msg: String) extends Token {
    def chars = "*** error: "+msg
  }

  /** A class for end-of-file tokens */
  case object EOF extends Token {
    def chars = "<eof>"
  }

  /** This token is produced by a scanner {@see Scanner} when scanning failed. */
  def errorToken(msg: String): Token = new ErrorToken(msg)  

  case class Keyword(chars: String) extends Token {
    override def toString = "`"+chars+"'"
  }

  case class Identifier(chars: String) extends Token {
    override def toString = "identifier "+chars
  }
}

class StdLexical extends StringParsers with MoreCombinators with LexicalCore with StdTokens {
  override type Elem = Char
  
  /** A character-parser that matches any character except the ones given in `cs' (and returns it)*/  
  def chrExcept(cs: Char*) = acceptIf(ch => (cs forall (ch !=)))

  def letter = acceptIf(_.isLetter) expected("letter")
  def digit = acceptIf(_.isDigit) expected("digit")
  
  def token: Parser[Token] = 
    ( letter ~ rep( letter | digit )                    ^^ {case x ~ xs => mkIdent((x :: xs).mkString("")) }
    | EOI                                               ^^ const(EOF)
    | delim                                             
    | failure explain("illegal character")
    )

  def whitespace: Parser[Any] = rep(acceptIf(ch => ch <= ' ' && ch != EOI)) expected("whitespace")

  import collection.mutable.HashSet
  
  /** The set of delimiters (ordering does not matter) */
  val delimiters = new HashSet[String]

  /** The set of reserved identifiers: these will be returned as `Keyword's */
  val reserved = new HashSet[String]

  def mkIdent(name: String) = if(reserved contains(name)) Keyword(name) else Identifier(name)

  lazy val delim: Parser[Token] = {
    // construct parser for delimiters by |'ing together the parsers for the individual delimiters, 
    // starting with the longest one (hence the sort + reverse) -- otherwise a delimiter D will never be matched if 
    // there is another delimiter that is a prefix of D   
    def parseDelim(s: String): Parser[Token] = acceptSeq(s) ^^ const(Keyword(s))
   
    val d = new Array[String](delimiters.size)
    delimiters.copyToArray(d,0)
    scala.util.Sorting.quickSort(d) 

    d.toList.reverse.map(parseDelim).reduceRight[Parser[Token]](_ | _) // no offence :-)      
  }  
}

