#region Using directives

using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Text;

using Weazel.Gezel.Model.Errors;

#endregion

namespace Weazel.Gezel.Parser
{
  /// <summary>
  /// Enumerates the set of tokens which
  /// are recognized by the tokenizer.
  /// </summary>
  public enum Token
  {
    Invalid,

    Nil,    // represents no token
    Eof,

    Add,    // +
    Sub,    // - 
    Mul,    // *
    Div,    // /
    Mod,    // %
    And,    // &
    Ior,    // |
    Xor,    // ^
    Not,    // ~

    Assign, // =

    Eq,     // ==
    Neq,    // !=
    Gt,     // >
    Lt,     // <
    Gte,    // >=
    Lte,    // <=

    Sla,    // <<
    Sra,    // >>

    Transition, // ->
    At,     // @
    Concat, // #
    Quest,  // ?
    Comma,  // ,
    Colon,  // :
		Dot,		// . 
    Semicolon, // ;

    Identifier,
    Str,
    Number,
    Comment,

    LeftBrace,
    RightBrace,
    LeftBracket,
    RightBracket,
    LeftParenthesis,
    RightParenthesis,

    Bin,
    Cycle,
    Dec,
    Display,
    Dp,
    DpDirective,
    
    Else,
    FinishDirective,
    Fsm,
    Hardwired,
    Hex,
    If,
    Initial,
    In,
    Ipblock,
    Ipparm,
    Iptype,
    Lookup,
    Ns,
    Out,
    Reg,
    Sequencer,
    Sfg,
    SfgDirective,
    Sig,
    State,
    Stimulus,
    System,
    Tc,
    Then,
    Use,
    TraceDirective,
    Constant,
    OptionDirective,
    ToggleDirective
  };

   /// <summary>
  /// The tokenizer is responsible for splitting a  gezel
  /// system model into left a stream of tokens. The tokens
  /// are defined in the Token enumeration.
  /// </summary>
  public class Tokenizer
  {
    public static String TokenToString(Token tok)
    {
      switch (tok)
      {
        case Token.Nil: return "nil";
        case Token.Eof: return "end of file";
        case Token.Add: return "+";
        case Token.Sub: return "-";
        case Token.Mul: return "*";
        case Token.Div: return "/";
        case Token.Mod: return "%";
        case Token.And: return "&";
        case Token.Ior: return "|";
        case Token.Xor: return "^";
        case Token.Not: return "!";
        case Token.Assign:  return "=";
        case Token.Eq: return "==";
        case Token.Neq: return "!=";
        case Token.Gt: return ">";
        case Token.Lt: return "<";
        case Token.Gte: return ">=";
        case Token.Lte: return "<=";
        case Token.Sla: return "<<";
        case Token.Sra: return ">>";
        case Token.Transition: return "->";
        case Token.At: return "@";
        case Token.Concat: return "#";
        case Token.Quest: return "?";
        case Token.Comma: return ",";
        case Token.Colon: return ":";
        case Token.Semicolon: return ";";
        case Token.Identifier: return "identifier";
        case Token.Str: return "string";
        case Token.Number: return "number";
        case Token.Comment: return "comment";
        case Token.LeftBrace: return "{";
        case Token.RightBrace: return "}";
        case Token.LeftBracket: return "[";
        case Token.RightBracket: return "]";
        case Token.LeftParenthesis: return "(";
        case Token.RightParenthesis: return ")";
        case Token.Bin: return "$bin";
        case Token.Cycle: return "$cycle";
        case Token.Dec: return "$dec";
        case Token.Display: return "$display";
        case Token.Dp: return "dp";
        case Token.DpDirective: return "$dp";
        case Token.Else: return "else";
        case Token.FinishDirective: return "$finish";
        case Token.Fsm: return "fsm";
        case Token.Hardwired: return "hardwired";
        case Token.Hex: return "$hex";
        case Token.If: return "if";
        case Token.Initial: return "initial";
        case Token.In: return "in";
        case Token.Ipblock: return "ipblock";
        case Token.Ipparm: return "ipparam";
        case Token.Iptype: return "iptype";
        case Token.Lookup: return "lookup";
        case Token.Ns: return "ns";
        case Token.Out: return "out";
        case Token.Reg: return "reg";
        case Token.Sequencer: return "sequencer";
        case Token.Sfg: return "sfg";
        case Token.SfgDirective: return "$sfg";
        case Token.Sig: return "sig";
        case Token.State: return "state";
        case Token.Stimulus: return "stimulus";
        case Token.System: return "system";
        case Token.Tc: return "tc";
        case Token.Then: return "then";
        case Token.Use: return "use";
        case Token.TraceDirective: return "$trace";
        case Token.Constant: return "constant";
        case Token.OptionDirective: return "$option";
        case Token.ToggleDirective: return "$toggle";

        default: return "unknown token";
      }
    }

    /// <summary>
    /// The model description.
    /// </summary>
    private readonly char[] input;

    /// <summary>
    /// Marks the current position in the model 
    /// description character array above.
    /// </summary>
    private uint mark;

    private uint startMark = 0;

    private Token currentToken = Token.Nil;

    public Token CurrentToken
    {
      get
      {
        return currentToken;
      }
    }

    /// <summary>
    /// Stores the string representation of an identifierRef
    /// associated with certain tokens (identifiers,
    /// keywords and numbers).
    /// </summary>
    private string currentValue;

    /// <summary>
    /// Returns the current token value as a string.
    /// It is the responsibility of the caller to make
    /// sure that the identifierRef is only read with tokens
    /// that has an associated identifierRef.
    /// </summary>
    /// <value></value>
    public string CurrentString
    {
      get
      {
        return currentValue;
      }
    }

    private List<ParseError> errors = new List<ParseError>();
    public List<ParseError> Errors
    {
      get
      {
        return errors;
      }

      set
      {
        errors = value;
      }
    }


    /// <summary>
    /// Name of the "source" of the input string. Usually a file.
    /// </summary>
    private string fileName;

    /// <summary>
    /// Return current token value as an integer.
    /// It is the responsibility of the caller to make
    /// sure that the identifierRef is only read with tokens
    /// that has an associated identifierRef.
    /// </summary>
    /// <value></value>
    public int CurrentInt
    {
      get
      {
        Debug.Assert(currentValue.Length > 0);
        if (currentValue.Length == 1)
          return currentValue[0] - '0';
        char c = currentValue[1];

        if (c == 'b' || c == 'B')
          return Convert.ToInt32(currentValue.Substring(2), 2);
        else if (c == 'x' || c == 'X')
          return Convert.ToInt32(currentValue.Substring(2), 16);
        else
          return Convert.ToInt32(currentValue, 10);
      }
    }

    /// <summary>
    /// Contains the keywords recognized by the tokenizer.
    /// </summary>
    private static readonly Dictionary<string, Token> keywords;

    /// <summary>
    /// Initializes the keywords hash with the appropriate
    /// keyword strings.
    /// </summary>
    static Tokenizer()
    {
      keywords = new Dictionary<string, Token>();
      keywords["$bin"] = Token.Bin;
      keywords["$cycle"] = Token.Cycle;
      keywords["$dec"] = Token.Dec;
      keywords["$display"] = Token.Display;
      keywords["dp"] = Token.Dp;
      keywords["$dp"] = Token.DpDirective;
      keywords["else"] = Token.Else;
      keywords["$finish"] = Token.FinishDirective;
      keywords["fsm"] = Token.Fsm;
      keywords["hardwired"] = Token.Hardwired;
      keywords["$hex"] = Token.Hex;
      keywords["if"] = Token.If;
      keywords["initial"] = Token.Initial;
      keywords["in"] = Token.In;
      keywords["ipblock"] = Token.Ipblock;
      keywords["ipparm"] = Token.Ipparm;
      keywords["iptype"] = Token.Iptype;
      keywords["lookup"] = Token.Lookup;
      keywords["ns"] = Token.Ns;
      keywords["out"] = Token.Out;
      keywords["reg"] = Token.Reg;
      keywords["sequencer"] = Token.Sequencer;
      keywords["sfg"] = Token.Sfg;
      keywords["$sfg"] = Token.SfgDirective;
      keywords["sig"] = Token.Sig;
      keywords["state"] = Token.State;
      keywords["stimulus"] = Token.Stimulus;
      keywords["system"] = Token.System;
      keywords["tc"] = Token.Tc;
      keywords["then"] = Token.Then;
      keywords["use"] = Token.Use;
      keywords["$trace"] = Token.TraceDirective;
      keywords["const"] = Token.Constant;
      keywords["$option"] = Token.OptionDirective;
      keywords["$toggle"] = Token.ToggleDirective;
    }

    /// <summary>
    /// Constructs a tokenizer from the given input string.
    /// </summary>
    /// <param name="input">the string to tokenize</param>
    public Tokenizer(string input, string fileName)
    {
      this.input = (input + "\n").ToCharArray();
      this.mark = 0;
      this.fileName = fileName;

      // initialize, get the first token
      NextToken();
    }

    public Tokenizer(string input) : 
      this(input, "not specified")
    {
      // empty
    }

		private Gezel.Model.Position computePosition(uint mark)
		{
			uint lineNum = 1, lineStart = 0, lineEnd = (uint)input.Length;

			for (uint i = 0; i < mark && i < input.Length; i++)
			{
				if (input[i] == '\n')
				{
					lineNum++;
					lineStart = i + 1;
				}
			}

			for (uint i = lineStart + 1; i < input.Length; i++)
			{
				if (input[i] == '\n')
				{
					lineEnd = i;
					break;
				}
				else if (i == input.Length - 1) // no end line found
					lineEnd = i + 1;
			}

			uint colNum = mark - lineStart;
			char[] chars = new char[lineEnd - lineStart];
			Array.Copy(input, lineStart, chars, 0, chars.Length);
			string line = new string(chars);

			return new Gezel.Model.Position(line, mark, lineNum, colNum + 1, fileName);
		}

		public Gezel.Model.Position CurrentStartPosition
		{
			get
			{
				return computePosition(startMark - 1);
			}
		}

		public Gezel.Model.Position CurrentEndPosition
		{
			get
			{
				return computePosition(mark - 1);
			}
		}

    /// <summary>
    /// Calculates the current line and column position in the
    /// parse input and returns them wrapped in leftChild Position object
    /// along with the current line.
    /// </summary>
    /// <returns>an object wrapping the tokenizer position</returns>
    public Gezel.Model.Position CurrentPosition
    {
      get
      {
				return CurrentStartPosition;
      }
    }

    public bool SkipTo(Set<Token> toFind)
    {
      if (toFind.IsEmpty)
      {
        return false;
      }
      else
      {
        while (!toFind.Contains(currentToken))
        {
          NextToken();
        }

        return true;
      }
    }

    public void Accept(Token expectedToken, Set<Token> follow)
    {
      Accept(expectedToken, follow, string.Format("expected {0}", expectedToken));
    }

    public void Accept(Token expectedToken, Set<Token> follow, string message)
    {
      Accept(expectedToken, follow, new ErrorMessage(message));
    }

    public void Accept(Token expectedToken, Set<Token> follow, ErrorMessage message)
    {
      if (expectedToken == currentToken)
      {
        ParserDebug.WriteLine(string.Format("Accept success ({0})", currentToken));

        NextToken();
      }
      else
      {
        // dump if debug enabled
        ParserDebug.WriteLine(string.Format("Accept failed (current token: {0}, expected: {1})", currentToken, expectedToken));

        StackTrace st = new StackTrace();

        ParserDebug.WriteLine("at: ");
        ParserDebug.WriteLine("5: " + st.GetFrame(5).GetMethod().DeclaringType.ToString());
        ParserDebug.WriteLine("4: " + st.GetFrame(4).GetMethod().DeclaringType.ToString());
        ParserDebug.WriteLine("3: " + st.GetFrame(3).GetMethod().DeclaringType.ToString());
        ParserDebug.WriteLine("2: " + st.GetFrame(2).GetMethod().DeclaringType.ToString());
        ParserDebug.WriteLine("1: " + st.GetFrame(1).GetMethod().DeclaringType.ToString());

        errors.Add(
          new UnexpectedTokenParseError(
            CurrentPosition,
            currentToken,
            expectedToken
          )
        );

        SkipTo(follow);
      }
    }

    public void Check(Set<Token> follow)
    {
      
      Check(follow, string.Format("Unexpected token"));
    }

    public void Check(Set<Token> follow, string message)
    {
      Check(follow, new ErrorMessage(message));
    }

    public void Check(Set<Token> follow, ErrorMessage message)
    {
      if (!follow.Contains(currentToken))
      {
        // dump, if debug is enabled                        
        ParserDebug.WriteLine("Check failed: " + message.ToString());

        StackTrace st = new StackTrace();

        ParserDebug.WriteLine("at: ");
        ParserDebug.WriteLine("3: " + st.GetFrame(3).GetMethod().DeclaringType.ToString());
        ParserDebug.WriteLine("2: " + st.GetFrame(2).GetMethod().DeclaringType.ToString());
        ParserDebug.WriteLine("1: " + st.GetFrame(1).GetMethod().DeclaringType.ToString());

        ParserDebug.WriteLine("Expected:" + follow.ToString());
        ParserDebug.WriteLine("CurrentToken: " + this.CurrentToken);

        errors.Add(
          new UnexpectedTokenParseError(
            CurrentPosition,
            currentToken,
            follow
          )
        );

        SkipTo(follow);
      }
      else
      {
        ParserDebug.WriteLine(string.Format("Check success ({0})", currentToken));
      }
    }

    /// <summary>
    /// Finds and returns the next token from the string being tokenized.
    /// </summary>
    public void NextToken()
    {
      /*
      if (tokenStack.Count > 0)
        return tokenStack.Pop();
      */

      do
      {
        // skip any whitespace
        while (mark < input.Length && Char.IsWhiteSpace(input[mark]))
          mark++;

        // check if we've read to end of file
        if (mark == input.Length)
        {
          currentToken = Token.Eof;
          return;
        }
        
        char c = input[mark++];

				startMark = mark; // -1;

        uint begin = 0;

        Token t;
        if ((t = TokenizeNumber(c, ref begin)) != Token.Nil)
          currentToken = t;
        else
          currentToken = TokenizeOperator(c, ref begin);
      } while (currentToken == Token.Comment || currentToken == Token.Invalid);
    }

    /// <summary>
    /// Returns the token matching the operator found or throws 
    /// a ParseException if no operator could be recognized.
    /// </summary>
    /// <param name="c">beginning char</param>
    /// <param name="begin">position in the input string</param>
    /// <returns></returns>
    private Token TokenizeOperator(char c, ref uint begin)
    {
      // look for operators
      switch (c)
      {
        case '@':
          return Token.At;
        case '+':
          return Token.Add;
        case '*':
          return Token.Mul;
        case '&':
          return Token.And;
        case '|':
          return Token.Ior;
        case '^':
          return Token.Xor;
        case '~':
          return Token.Not;
        case '%':
          return Token.Mod;
        case '(':
          return Token.LeftParenthesis;
        case '{':
          return Token.LeftBrace;
        case '[':
          return Token.LeftBracket;
        case ')':
          return Token.RightParenthesis;
        case '}':
          return Token.RightBrace;
        case ']':
          return Token.RightBracket;
        case ',':
          return Token.Comma;
        case ':':
          return Token.Colon;
        case ';':
          return Token.Semicolon;
				case '.':
					return Token.Dot;
        case '?':
          return Token.Quest;       
        case '/':
          if (mark < input.Length && input[mark] != '/' || mark == input.Length)
          {
            return Token.Div;
          }
          else
          { // must be a cpp style cmnt
            begin = mark - 1;
            while (mark < input.Length && input[mark] != '\n')
              mark++;

            // get the cmnt as a string
            char[] cmnt = new char[mark - begin];
            Array.Copy(input, begin, cmnt, 0, cmnt.Length);
            currentValue = new string(cmnt);
            return Token.Comment;
          }
        case '#':
          if (mark < input.Length && input[mark] != '!' || mark == input.Length)
          {
            return Token.Concat;
          }
          else
          { // must be a shell style cmnt
            begin = mark - 1;
            while (mark < input.Length && input[mark] != '\n')
              mark++;

            // get the cmnt as a string
            char[] cmnt = new char[mark - begin];
            Array.Copy(input, begin, cmnt, 0, cmnt.Length);
            currentValue = new string(cmnt);
            return Token.Comment;
          }
        case '"':
          begin = mark;
          while (mark < input.Length && input[mark] != '"' && input[mark] != '\n')
            mark++;

          if (mark == input.Length || input[mark] == '\n')
          {
            ErrorMessage message = new ErrorMessage("Newline in string constant at ");
            message.AddPosition(CurrentPosition);

            errors.Add(
              new ParseError(
                ParseErrorType.NewlineInString, 
                message
              )
            );                      
          }
          
            // get the string contents
            char[] comment = new char[mark - begin];
            Array.Copy(input, begin, comment, 0, comment.Length);
            currentValue = new string(comment);          

          // increment past the finishing quote
          mark++;

          return Token.Str;
        case '=':
          if (mark < input.Length && input[mark] != '=' || mark == input.Length)
          {
            return Token.Assign;
          }
          else if (mark < input.Length && input[mark] == '=')
          {
            mark++;
            return Token.Eq;
          }
          else
            throw new ParseException("Internal error");
        case '-':
          if (mark < input.Length && input[mark] != '>' || mark == input.Length)
          {
            return Token.Sub;
          }
          else if (mark < input.Length && input[mark] == '>')
          {
            mark++;
            return Token.Transition;
          }
          else
            throw new ParseException("Internal error");
        case '>':
          if (mark < input.Length && input[mark] != '=' && input[mark] != '>' || mark == input.Length)
          {
            return Token.Gt;
          }
          else if (mark < input.Length && input[mark] == '>')
          {
            mark++;
            return Token.Sra;
          }
          else if (mark < input.Length && input[mark] == '=')
          {
            mark++;
            return Token.Gte;
          }

          // Preview C# 2.0 compiler fails if this statement 
          // is the 'else' part of the branch above.
          throw new ParseException("Internal error");
        case '<':
          if (mark < input.Length && input[mark] != '=' && input[mark] != '<' || mark == input.Length)
          {
            return Token.Lt;
          }
          else if (mark < input.Length && input[mark] == '<')
          {
            mark++;
            return Token.Sla;
          }
          else if (mark < input.Length && input[mark] == '=')
          {
            mark++;
            return Token.Lte;
          }
          else
            throw new ParseException("Internal error");
        case '!':
          if (mark < input.Length && input[mark] != '=' || mark == input.Length)
          {
              ErrorMessage message = new ErrorMessage();
              message.AddString(string.Format("Invalid token '!' at ", input[mark].ToString()));
              message.AddPosition(CurrentPosition);

              ParseError error = new ParseError(
                  ParseErrorType.UnknownInputCharacter, message);

              errors.Add(error);

              return Token.Invalid;
          }
          else if (mark < input.Length && input[mark] == '=')
          {
            mark++;
            return Token.Neq;
          }
          else
            throw new ParseException("internal error");
    default:
        {
            ErrorMessage message = new ErrorMessage();
            message.AddString(string.Format("Invalid character '{0}' at ", input[mark].ToString()));
            message.AddPosition(CurrentPosition);

            ParseError error = new ParseError(
                ParseErrorType.UnknownInputCharacter, message);

            errors.Add(error);

            return Token.Invalid;
        }
      }
    }

    /// <summary>
    /// checks for number which can be:
    //      binary:         0b010101
    //      decimal:        13245678
    //      hexadecimal:    0xDEADBEEF
    /// </summary>
    /// <param name="c">beginning char</param>
    /// <param name="begin">position in the input string</param>
    /// <returns></returns>
    private Token TokenizeNumber(char c, ref uint begin)
    {
      if (Char.IsDigit(c))
      {
        begin = mark - 1;
        while (mark < input.Length && (Char.IsLetterOrDigit(input[mark])))
          mark++;

        // get the num as a string
        char[] num = new char[mark - begin];
        Array.Copy(input, begin, num, 0, num.Length);
        currentValue = new string(num);
        return Token.Number;
      }

      // check for identifierRef which must begin with a non-number and
      // consist of letters, digits and underscores only
      if (Char.IsLetter(c) || c == '_' || c == '$')
      {
        begin = mark - 1;
        while (mark < input.Length && (Char.IsLetterOrDigit(input[mark]) || input[mark] == '_'))
          mark++;

        // get the identifier as a string
        char[] ident = new char[mark - begin];
        Array.Copy(input, begin, ident, 0, ident.Length);
        currentValue = new string(ident);

        // check if it is a keyword or directive
        if (keywords.ContainsKey(currentValue)) //(keywords.ContainsValue(currentValue))
        {
          return keywords[currentValue];
        }
        else
          return Token.Identifier;
      }
      else
        return Token.Nil;
    }

    /// <summary>
    /// Resets the tokenizer. The following tokens will
    /// come from the beginning of the current input.
    /// </summary>
    public void Reset()
    {
      mark = 0;
    }
  }
}
