/*
 * http://code.google.com/p/lexing-regex-parsers-4-scala/
 * 
 * Copyright (c) 2011, Sanjay Dasgupta
 * All rights reserved.
 * 
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 * 
 *     * Redistributions of source code must retain the above copyright
 *       notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copyright
 *       notice, this list of conditions and the following disclaimer in the
 *       documentation and/or other materials provided with the distribution.
 *     * The name of the author may not be used to endorse or promote products
 *       derived from this software without specific prior written permission.
 * 
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

package lexpar

/* WARNING: The next line refers to the MODIFIED version of dk.brics.automaton */
import dk.brics.automaton.{Automaton, RegExp, RunAutomaton}
import scala.collection._
import scala.util.matching.Regex
import scala.util.parsing.combinator.RegexParsers

trait LexingRegexParsers extends RegexParsers {

  override def phrase[T](p: Parser[T]): Parser[T] = {
    lexerDataCache.clear()
    super.phrase(p)
  }

  type P = Parser[String]

  override val whiteSpace = "[ \n\r\t]+".r

  override protected def handleWhiteSpace(source: java.lang.CharSequence, offset: Int): Int =
    if (skipWhitespace) {
      val newOffset = whiteSpaceMatcher.run(source.asInstanceOf[String], offset)
      if (newOffset == -1) 0 else newOffset
    } else 
      offset

  private def escapeRegexMetachars(es: String) = {
    val sb = new StringBuilder()
    for (c <- es) {
      c match {
        case '|' | '*' | '+' | '?' | '-' => sb.append('\\').append(c)
        case '(' | ')' | '[' | ']' | '{' | '}' => sb.append('\\').append(c)
        case ',' | '^' | '.' | '$' | '\"' => sb.append('\\').append(c)
        case _ => sb.append(c)
      }
    }
    sb.toString
  }

  def literal$(lit: String) = super.literal(lit)
  
  implicit override def literal(lit: String): P = {
    if ((lit eq null) || lit.isEmpty)
      throw new IllegalArgumentException("Null or empty literal string")
    val lit2 = escapeRegexMetachars(lit)
    if (tokenMap.contains(lit2))
      tokenMap(lit2)
    else {
      if (lexer ne null)
        throw new IllegalStateException(format("Define literal(%s) before use", lit))
      val id = tokens.size
      tokens.append(Left(lit2))
      val parser = parserById(id)
      tokenMap(lit2) = parser
      parser
    }
  }

  def literal(lit1: String, lit2: String, lit3: String, lit4: String, lit5: String, lit6: String, 
       lit7: String, lit8: String, lit9: String, lit10: String): Tuple10[P, P, P, P, P, P, P, P, P, P] = 
       (literal(lit1), literal(lit2), literal(lit3), literal(lit4), literal(lit5), literal(lit6), 
       literal(lit7), literal(lit8), literal(lit9), literal(lit10))
  def literal(lit1: String, lit2: String, lit3: String, lit4: String, lit5: String, lit6: String, 
       lit7: String, lit8: String, lit9: String): Tuple9[P, P, P, P, P, P, P, P, P] = 
       (literal(lit1), literal(lit2), literal(lit3), literal(lit4), literal(lit5), literal(lit6), 
       literal(lit7), literal(lit8), literal(lit9))
  def literal(lit1: String, lit2: String, lit3: String, lit4: String, lit5: String, lit6: String, 
       lit7: String, lit8: String): Tuple8[P, P, P, P, P, P, P, P] = 
       (literal(lit1), literal(lit2), literal(lit3), literal(lit4), literal(lit5), literal(lit6), 
       literal(lit7), literal(lit8))
  def literal(lit1: String, lit2: String, lit3: String, lit4: String, lit5: String, lit6: String, 
       lit7: String): Tuple7[P, P, P, P, P, P, P] = 
       (literal(lit1), literal(lit2), literal(lit3), literal(lit4), literal(lit5), literal(lit6), 
       literal(lit7))
  def literal(lit1: String, lit2: String, lit3: String, lit4: String, lit5: String, lit6: String
        ): Tuple6[P, P, P, P, P, P] = 
       (literal(lit1), literal(lit2), literal(lit3), literal(lit4), literal(lit5), literal(lit6))
  def literal(lit1: String, lit2: String, lit3: String, lit4: String, lit5: String): Tuple5[P, P, P, P, P] = 
       (literal(lit1), literal(lit2), literal(lit3), literal(lit4), literal(lit5))
  def literal(lit1: String, lit2: String, lit3: String, lit4: String): Tuple4[P, P, P, P] = 
       (literal(lit1), literal(lit2), literal(lit3), literal(lit4))
  def literal(lit1: String, lit2: String, lit3: String): Tuple3[P, P, P] = (literal(lit1), literal(lit2), literal(lit3))
  def literal(lit1: String, lit2: String): Tuple2[P, P] = (literal(lit1), literal(lit2))

  def regex$(reg: Regex) = super.regex(reg)
  
  implicit override def regex(reg: Regex): P = {
    if (reg eq null)
      throw new IllegalArgumentException("Null regex value")
    val regAsString = reg.toString
    if (regAsString.isEmpty)
      throw new IllegalArgumentException("Empty regex string")
    if (regAsString == "\\z") {
      Parser(in => {
          val inStr = in.source.asInstanceOf[String]
          val spaces = handleWhiteSpace(inStr, in.offset)
          val afterSpaces = in.drop(spaces)
          if (afterSpaces.atEnd)
            Success("", afterSpaces)
          else
            Failure("Expected-end-of-input", in)
      })
    } else if (tokenMap.contains(regAsString))
      tokenMap(regAsString)
    else {
      if (lexer ne null)
        throw new IllegalStateException(format("Define regex(%s) before use", reg))
      val id = tokens.size
      tokens.append(Right(regAsString))
      val parser = parserById(id)
      tokenMap(regAsString) = parser
      parser
    }
  }

  def regex(reg1: Regex, reg2: Regex, reg3: Regex, reg4: Regex, reg5: Regex, reg6: Regex, 
       reg7: Regex, reg8: Regex, reg9: Regex, reg10: Regex): Tuple10[P, P, P, P, P, P, P, P, P, P] = 
       (regex(reg1), regex(reg2), regex(reg3), regex(reg4), regex(reg5), regex(reg6), 
       regex(reg7), regex(reg8), regex(reg9), regex(reg10))
  def regex(reg1: Regex, reg2: Regex, reg3: Regex, reg4: Regex, reg5: Regex, reg6: Regex, 
       reg7: Regex, reg8: Regex, reg9: Regex): Tuple9[P, P, P, P, P, P, P, P, P] = 
       (regex(reg1), regex(reg2), regex(reg3), regex(reg4), regex(reg5), regex(reg6), 
       regex(reg7), regex(reg8), regex(reg9))
  def regex(reg1: Regex, reg2: Regex, reg3: Regex, reg4: Regex, reg5: Regex, reg6: Regex, 
       reg7: Regex, reg8: Regex): Tuple8[P, P, P, P, P, P, P, P] = 
       (regex(reg1), regex(reg2), regex(reg3), regex(reg4), regex(reg5), regex(reg6), 
       regex(reg7), regex(reg8))
  def regex(reg1: Regex, reg2: Regex, reg3: Regex, reg4: Regex, reg5: Regex, reg6: Regex, 
       reg7: Regex): Tuple7[P, P, P, P, P, P, P] = 
       (regex(reg1), regex(reg2), regex(reg3), regex(reg4), regex(reg5), regex(reg6), 
       regex(reg7))
  def regex(reg1: Regex, reg2: Regex, reg3: Regex, reg4: Regex, reg5: Regex, reg6: Regex 
       ): Tuple6[P, P, P, P, P, P] = 
       (regex(reg1), regex(reg2), regex(reg3), regex(reg4), regex(reg5), regex(reg6))
  def regex(reg1: Regex, reg2: Regex, reg3: Regex, reg4: Regex, reg5: Regex): Tuple5[P, P, P, P, P] = 
       (regex(reg1), regex(reg2), regex(reg3), regex(reg4), regex(reg5))
  def regex(reg1: Regex, reg2: Regex, reg3: Regex, reg4: Regex): Tuple4[P, P, P, P] = 
       (regex(reg1), regex(reg2), regex(reg3), regex(reg4))
  def regex(reg1: Regex, reg2: Regex, reg3: Regex): Tuple3[P, P, P] = (regex(reg1), regex(reg2), regex(reg3))
  def regex(reg1: Regex, reg2: Regex): Tuple2[P, P] = (regex(reg1), regex(reg2))

  private def parserById(id: Int): P = {
    def formatToken(tid: Int) = tokens(tid) match {case Left(lit) => format("literal(%s)", lit); case Right(reg) => format("regex(%s)", reg)}
    def errMsg(found: Int) = {
      format("expected token#%d (%s), found token#%d (%s)", id, formatToken(id), found, if (found == -1) "Unrecognized-input" else formatToken(found))
    }
    Parser(in => {
        if (lexer eq null)
          setupLexer()
        if (traceTokens)
          printf("Trying %s @ (%d,%d)%n", formatToken(id), in.pos.line, in.pos.column)
        globalTokenParser(in) match {
          case Success(Triple(matchLen, matchId, matchString), next) => 
            if (matchId == id) {
              if (traceTokens)
                printf("Success %s @ (%d,%d)%n", formatToken(id), next.pos.line, next.pos.column)
              Success(matchString, next)
            } else {
              if (traceTokens)
                printf("Failure %s @ (%d,%d)%n", formatToken(id), in.pos.line, in.pos.column)
              Failure(errMsg(matchId), in)
            }
          case Failure(_, next) =>
            if (next.atEnd) {
              if (traceTokens)
                printf("Failure %s @ (%d,%d)%n", "End-of-input found", next.pos.line, next.pos.column)
              Failure("End-of-input found", next)
            } else {
              if (traceTokens)
                printf("Failure %s @ (%d,%d)%n", formatToken(id), next.pos.line, next.pos.column)
              Failure("Unknown-input found", next)
            }
        }
      })
  }
  
  private lazy val globalTokenParser: Parser[Triple[Int, Int, String]] = Parser(in => {
      val inStr = in.source.asInstanceOf[String]
      if (lexerDataCache.contains(in.offset)) {
        val ints3 = lexerDataCache(in.offset)
        if (ints3(0) == 0) {
          Failure("", if (in.offset + ints3(2) == inStr.length) in.drop(ints3(2)) else in)
        } else {
          Success(Triple(ints3(0), ints3(1), inStr.substring(in.offset + ints3(2), in.offset + ints3(2) + ints3(0))), in.drop(ints3(2) + ints3(0)))
        }
      } else {
        val spaces = handleWhiteSpace(inStr, in.offset)
        val ints = lexer.lex(inStr, in.offset + spaces)
        lexerDataCache(in.offset) = Array(ints(0), ints(1), spaces)
        if (ints(0) == 0) {
          Failure("", if (in.offset + spaces == inStr.length) in.drop(spaces) else in)
        } else {
          Success(Triple(ints(0), ints(1), inStr.substring(in.offset + spaces, in.offset + spaces + ints(0))), in.drop(spaces + ints(0)))
        }
      }
    })

  private def setupLexer() {
    lastCallForTokens()
    /* WARNING: "Automaton" and "RunAutomaton" are from a modified "dk.brics.automaton" */
    val aut = Automaton.lexer(tokens.map(_ match {case Left(lit) => lit; case Right(reg) => reg}).toArray)
    lexer = new RunAutomaton(aut)
  }

  def dumpTokens() {
    for(t <- tokens)
      println(t)
  }
  
  protected def lastCallForTokens() {}

  private val tokens = mutable.Buffer[Either[String, String]]()
  private var lexer: RunAutomaton = null
  private val whiteSpaceMatcher = new RunAutomaton(new RegExp(whiteSpace.toString).toAutomaton)
  private val tokenMap = mutable.HashMap[String, P]()
  var traceTokens = false
  val lexerDataCache = mutable.HashMap[Int, Array[Int]]()
}
