package ch.epfl.lara.matcheck.ast.parser;


import scala.util.parsing.combinator.lexical._
import scala.util.parsing.syntax.StdTokens
import scala.util.parsing.combinator.syntactical.StdTokenParsers
import scala.util.parsing.combinator._	

import collection.mutable.HashSet

/**
 * Redefine the StdLexical to the goal of parse comment that starts with '/** [...] */',
 * as they will contain invariant and pre/post-condition declaration.
 *
 * Moreover, fixed a library bug in order to parse '_' as a letter.
 */
class MyLexical extends StdLexical { 
  import scala.util.parsing.input.CharArrayReader.EofCh
  
  

  override def token: Parser[Token] = 
    ( letter ~ rep( letter | digit )                    ^^ { case first ~ rest => processIdent(first :: rest mkString "")}
    | digit ~ rep( digit )                              ^^ { case first ~ rest => NumericLit(first :: rest mkString "") }
    | '\'' ~> rep( chrExcept('\'', '\n', EofCh) ) <~ '\'' ^^ { case chars => StringLit(chars mkString "") }
    | '\"' ~> rep( chrExcept('\"', '\n', EofCh) ) <~ '\"' ^^ { case chars => StringLit(chars mkString "") }
    | EofCh                                             ^^^ EOF
    | delim 	
    | '\\' ~ rep(letter | digit)                        ^^ { case first ~ rest => super.processIdent(first :: rest mkString "") }
    | '\'' ~> failure("unclosed string literal") 
    | '\"' ~> failure("unclosed string literal")
    | failure("illegal character")
    )

  override def whitespace: Parser[Any] = rep(
      whitespaceChar
      | '/' ~ '/' ~ rep( chrExcept(EofCh, '\n') )
      | '/' ~ '*' ~ '*' ~ comment
      | '/' ~ '*' ~ '*' ~ failure("unclosed comment")
      )
      
  override def letter = elem("letter", ch => (ch.isLetter || ch == '_'))
}


import scala.util.parsing.input.Reader
import scala.util.parsing.combinator.syntactical.StdTokenParsers
import scala.util.parsing.combinator.ImplicitConversions
import scala.util.parsing.syntax.StdTokens
import scala.util.parsing.input.StreamReader

import java.io.{File,FileInputStream,InputStreamReader,StringReader}

trait SyntaxParser extends StdTokenParsers {
  
  type Tokens = StdLexical      // StdLexical is used to scan Scala-like languages
  val lexical = new MyLexical   // Use the modified lexer

  // Delimiters                         
  getLexical.delimiters ++= List("{","}","(",")","[","]","/*","*/",
                              ",", "=>", ".", "=",
                              "+", "-", "*", ":", "/","%",
                              "&&","||",
                              "<","<=","==","!=","<",">=",">","!",
                              "&","|","~"
                          ) 
  
  

  /**
   * Get the lexical
   */
  def getLexical: MyLexical = lexical
  
}
