﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Diagnostics;
using System.Collections;
using System.Reflection;
using System.Text.RegularExpressions;
using System.IO;

using King.Extensions;

using Antlr.Runtime;
using Antlr.Runtime.Tree;
using Antlr.Runtime.Debug;
using Antlr.Runtime.Misc;
using System.Runtime.InteropServices;

namespace King.Antlr3 {

    [DebuggerTypeProxy(typeof(TokenPtrDebuggerTypeProxy))]
    public unsafe struct NillableToken {

        public static bool operator ==(NillableToken lhs, NillableToken rhs) {
            return lhs.Equals(rhs);
        }
        public static bool operator !=(NillableToken lhs, NillableToken rhs) {
            return !(lhs == rhs);
        }

        private class TokenPtrDebuggerTypeProxy {
            private readonly NillableToken m_token;

            public TokenPtrDebuggerTypeProxy(NillableToken token) {
                m_token = token;
            }

            [DebuggerBrowsable(DebuggerBrowsableState.RootHidden)]
            public NillableToken[] Children {
                get { return m_token.Children().ToArray(); }
            }
        }

        public static implicit operator IntPtr(NillableToken token) {
            return (IntPtr)token.Pointer;
        }

        private TokenTreeAdaptor m_adapter;
        private AntlrToken m_token;
        private TokenData* m_pStartChild, m_pEndChild;
        private List<IntPtr> m_children;

        internal NillableToken(TokenTreeAdaptor adapter, TokenData* pToken = null) {
            m_adapter = adapter;
            m_token = new AntlrToken(pToken);
            m_children = null;
            m_pStartChild = null;
            m_pEndChild = null;

            if (IsNil)
                m_children = new List<IntPtr>();
        }

        internal TokenData* Pointer {
            get { return m_token.Pointer; }
        }
        internal bool HasChildren {
            get {
                if (GetChildren() == null)
                    return false;

                return GetChildren().Any();
            }
        }
        internal List<IntPtr> GetChildren() {
            if (IsNil)
                return m_children;

            return m_adapter.GetChildren((IntPtr)m_token);
        }
        internal void SetChildren(List<IntPtr> children) {
            if (IsNil)
                throw new InvalidOperationException();

            m_adapter.SetChildren((IntPtr)m_token, children);
        }

        public NillableToken Token {
            get { return this; }
        }
        public AntlrToken AntlrToken {
            get { return m_token; }
        }
        public int Channel {
            get { return Pointer->Channel; }
        }

        public bool IsNil {
            get { return Pointer == null; }
        }
        public bool IsDefault {
            get { return object.Equals(this, default(NillableToken)); }
        }
        public string Text {
            get {
                if (IsNil)
                    return "<Nil>";

                var text = m_adapter.GetText(this);
                if (text != null)
                    return text;

                if (Type == MyTokenTypes.EndOfFile)
                    return "<EOF>";

                if (Length == 0 || Stream == null)
                    return m_adapter.GetTokenName(Type);

                return Stream.Substring(Position, Length);
            }
            set { m_adapter.SetText(this, value); }
        }
        public int Type {
            get {
                if (IsNil)
                    return MyTokenTypes.Invalid;

                return Pointer->Type;
            }
        }
        public int Line {
            get { return Pointer->Line; }
        }
        public int Column {
            get { return Pointer->Column; }
        }
        public int Position {
            get { return Pointer->Position; }
        }
        public int Length {
            get { return Pointer->Length; }
        }
        public int Index {
            get { return Pointer->Index; }
        }

        public unsafe NillableToken Parent {
            get { return new NillableToken(m_adapter, IsNil ? null : Pointer->Parent); }
            //set {
            //    if (IsNil)
            //        throw new NotSupportedException();
            //    else
            //        Pointer->Parent = value.Pointer;
            //}
        }
        public NillableToken StartToken {
            get { return new NillableToken(m_adapter, IsNil ? m_pStartChild : Pointer->StartChild); }
            //set {
            //    if (IsNil)
            //        m_pStartChild = value.Pointer;
            //    else
            //        Pointer->StartChild = value.Pointer;
            //}
        }
        public NillableToken EndToken {
            get { return new NillableToken(m_adapter, IsNil ? m_pEndChild : Pointer->EndChild); }
            //set {
            //    if (IsNil)
            //        m_pEndChild = value.Pointer;
            //    else
            //        Pointer->EndChild = value.Pointer;
            //}
        }

        public ICharStream Stream {
            get { return m_adapter.Stream; }
            set { throw new NotSupportedException(); }
        }

        public IEnumerable<NillableToken> Children() {
            var children = GetChildren();

            if (children == null)
                return Enumerable.Empty<NillableToken>();

            var adapter = m_adapter;
            return children.Select(o => new NillableToken(adapter, (TokenData*)o));
        }

        public override bool Equals(object obj) {
            if (!(obj is NillableToken))
                return false;

            if (IsNil)
                return m_children == ((NillableToken)obj).m_children;

            return Pointer == ((NillableToken)obj).Pointer;
        }
        public override int GetHashCode() {
            if (IsNil)
                throw new NotSupportedException();

            return (int)Pointer;
        }
        public override string ToString() {
            return Text;
        }

    }

    public unsafe struct AntlrToken {

        public static implicit operator IntPtr(AntlrToken token) {
            return (IntPtr)token.Pointer;
        }

        private TokenData* m_pToken;

        internal AntlrToken(TokenData* pToken) {
            m_pToken = pToken;
        }

        internal TokenData* Pointer {
            get { return m_pToken; }
        }

        public bool IsDefault {
            get { return object.Equals(this, default(NillableToken)); }
        }

        public int Index {
            get { return m_pToken->Index; }
        }
        public int Channel {
            get { return m_pToken->Channel; }
        }
        public int Type {
            get { return m_pToken->Type; }
        }

        public int Line {
            get { return m_pToken->Line; }
        }
        public int Column {
            get { return m_pToken->Column; }
        }
        public int Position {
            get { return m_pToken->Position; }
        }
        public int Length {
            get { return m_pToken->Length; }
        }

        public AntlrToken StartToken {
            get { return new AntlrToken(m_pToken->StartChild); }
        }
        public AntlrToken EndToken {
            get { return new AntlrToken(m_pToken->EndChild); }
        }

        public string GetText(TokenTreeAdaptor adaptor) {
            return adaptor.GetTextInternal(m_pToken);
        }
        public IEnumerable<AntlrToken> Children(TokenTreeAdaptor adaptor) {
            var children = adaptor.GetChildren((IntPtr)m_pToken);

            if (children == null)
                return Enumerable.Empty<AntlrToken>();

            return children.Select(o => new AntlrToken((TokenData*)o));
        }

        public override bool Equals(object obj) {
            if (!(obj is AntlrToken))
                return false;

            return m_pToken == ((AntlrToken)obj).m_pToken;
        }
        public override int GetHashCode() {
            if (m_pToken == null)
                throw new NotSupportedException();

            return (int)m_pToken;
        }
    }

    internal unsafe struct TokenData {
        private int m_index;
        private int m_type;
        private int m_channel;

        private int m_symbol;

        private int m_line;
        private int m_column;
        private int m_position;
        private int m_length;

        private TokenData* m_pParent;
        private TokenData* m_pStartChild;
        private TokenData* m_pEndChild;

        public TokenData(int index, int type, int channel,
            int line, int column, int position, int length,
            TokenData* pParent = null, TokenData* pStartChild = null, TokenData* pEndChild = null,
            int symbol = 0) {

            m_index = index;
            m_type = type;
            m_channel = channel;

            m_symbol = symbol;

            m_line = line;
            m_column = column;
            m_position = position;
            m_length = length;

            m_pParent = pParent;
            m_pStartChild = pStartChild;
            m_pEndChild = pEndChild;

            m_symbol = symbol;
        }

        internal int Index { get { return m_index; } }
        internal int Type { get { return m_type; } }
        internal int Channel { get { return m_channel; } }

        internal int Symbol { get { return m_symbol; } }

        internal int Line { get { return m_line; } }
        internal int Column { get { return m_column; } }
        internal int Position { get { return m_position; } }
        internal int Length { get { return m_length; } }

        internal TokenData* Parent { 
            get { return m_pParent; } 
        }
        internal TokenData* StartChild { get { return m_pStartChild; } }
        internal TokenData* EndChild { get { return m_pEndChild; } }
    }

    public interface IAllocator {
        IntPtr Allocate(int byteCount);
    }
    public unsafe class Allocator : IDisposable, IAllocator {

        private int m_count;
        private byte* m_pNext;
        private byte* m_pLast;
        private List<GCHandle> m_gcHandles;

        public Allocator(byte* pBytes, int cBytes) {
            m_count = cBytes;
            SetNextAndLast(pBytes);
        }

        private void SetNextAndLast(byte* pBytes) {
            m_pNext = pBytes;
            m_pLast = m_pNext + m_count;
            m_count *= 2;
        }
        private void AllocateBlock() {
            var buffer = new Byte[m_count];
            var gcHandle = GCHandle.Alloc(buffer);
            var pBuffer = (byte*)gcHandle.AddrOfPinnedObject();

            if (m_gcHandles == null) 
                m_gcHandles = new List<GCHandle>();
            m_gcHandles.Add(gcHandle);

            SetNextAndLast(pBuffer);
        }

        private void Dispose(bool finalizing) {
            if (!finalizing)
                GC.SuppressFinalize(this);

            if (m_gcHandles != null) {
                foreach (var gcHandle in m_gcHandles)
                    gcHandle.Free();
            }
        }

        public IntPtr Allocate(int byteCount) {
            if (m_pNext + byteCount >= m_pLast) {
                AllocateBlock();
                return Allocate(byteCount);
            }

            var result = m_pNext;
            m_pNext += byteCount;

            return (IntPtr)m_pNext;
        }

        public void Dispose() {
            Dispose(false);
        }
        ~Allocator() {
            Dispose(true);
        }
    }

    public unsafe sealed class TokenTreeAdaptor {

        #region Private Data Members
        private int m_index;
        private ICharStream m_stream;
        private string[] m_tokenNames;
        private IAllocator m_allocator;
        private Dictionary<IntPtr, List<IntPtr>> m_children;

        private List<string> m_symbols;
        private Dictionary<int, string> m_symbolById;
        private Dictionary<string, int> m_idBySymbol;
        #endregion

        #region Constructor
        public TokenTreeAdaptor() {
            throw new NotSupportedException();
        }
        public TokenTreeAdaptor(IAllocator allocator, string[] tokenNames = null, ICharStream stream = null) {
            m_stream = stream;
            m_tokenNames = tokenNames;
            m_allocator = allocator;
            m_children = new Dictionary<IntPtr, List<IntPtr>>();

            m_symbols = new List<string>();
            m_symbols.Add(null);

            m_symbolById = new Dictionary<int, string>();
            m_idBySymbol = new Dictionary<string, int>();
        }
        #endregion

        #region Private Members
        private IntPtr GetTokenIntPtr(NillableToken token) {
            return (IntPtr)token.Pointer;
        }
        private TokenData* CreateToken(TokenData token) {
            var pNextToken = (TokenData*)m_allocator.Allocate(sizeof(TokenData));

            *pNextToken = token;
            return pNextToken;
        }
        #endregion

        #region Internal Members
        internal void SetChildren(IntPtr pToken, List<IntPtr> children) {
            m_children[pToken] = children;
        }
        internal List<IntPtr> GetChildren(IntPtr pToken) {
            return m_children.GetValueOrDefault(pToken);
        }
        internal string GetTokenName(int token) {
            if (m_tokenNames == null)
                return token.ToString();
            return m_tokenNames[token];
        }
        internal void SetTextInternal(NillableToken token, string text) {
            SetTextInternal(token.Pointer, text);
        }
        internal void SetTextInternal(TokenData* pToken, string text) {
            if (text == null)
                return;

            var id = m_idBySymbol.GetValueOrDefault(text);
            if (id == 0) {
                m_idBySymbol[text] = id = m_symbols.Count;
                m_symbols.Add(text);
            }

            pToken->Symbol = id;
        }
        internal string GetTextInternal(NillableToken token) {
            return GetTextInternal(token.Pointer);
        }
        internal string GetTextInternal(TokenData* pToken) {
            return m_symbolById.GetValueOrDefault(pToken->Symbol);
        }
        #endregion

        #region ITreeAdaptor Construction (Create)
        public NillableToken Nil() {
            return new NillableToken(this);
        }
        public NillableToken Create(
            int type = 0, string text = null, int channel = 0, 
            int position = 0, int length = 0, int line = 0, int column = 0,
            int symbolId = 0) {

            var pToken = CreateToken(
                new TokenData(
                    index: m_index++,
                    type: type,
                    channel: channel,
                    line: line,
                    column: column,
                    position: position,
                    length: length)
            );

            SetTextInternal(pToken, text);

            return new NillableToken(this, pToken);
        }
        public NillableToken Create(NillableToken token) {
            var result = Create(
                token.Type,
                GetTextInternal(token),
                token.Channel,
                token.Position,
                token.Length,
                token.Line,
                token.Column);

            return result;
        }
        public NillableToken Create(NillableToken fromToken, string text) {
            var result = Create(fromToken);
            SetTextInternal(result, text);
            return result;
        }
        public NillableToken Create(int tokenType, NillableToken fromToken) {
            var result = Create(fromToken);
            result.Type = tokenType;
            return result;
        }
        public NillableToken Create(int tokenType, NillableToken fromToken, string text) {
            var result = Create(fromToken);
            SetTextInternal(result, text);
            result.Type = tokenType;
            return result;
        }
        #endregion

        #region ITreeAdaptor Construction (Duplication)
        // not supported
        public NillableToken DupNode(NillableToken token) {
            return Create(token);
        }
        public NillableToken DupNode(int type, NillableToken token) {
            return Create(type, token);
        }
        public NillableToken DupNode(NillableToken token, string text) {
            return Create(token, text);
        }
        public NillableToken DupNode(int type, NillableToken token, string text) {
            return Create(type, token, text);
        }
        public NillableToken DupTree(NillableToken tree) {
            throw new NotSupportedException();
        }
        #endregion

        #region ITreeAdaptor Construction
        public void SetParent(NillableToken token, NillableToken parent) {
            token.Parent = parent;
        }
        public void SetType(NillableToken token, int type) {
            token.Type = type;
        }
        public void SetText(NillableToken token, string text) {
            SetTextInternal(token, text);
        }
        public void SetTokenBoundaries(NillableToken token, NillableToken startToken, NillableToken stopToken) {
            token.StartToken = startToken;
            token.EndToken = stopToken;
        }
        public void SetChildIndex(NillableToken index, int childIndex) {
            throw new NotSupportedException();
        }

        public void AddChild(NillableToken token, NillableToken child) {

            // child is list with no elements
            if (child.IsNil && !child.HasChildren)
                return;

            // assign parent
            if (child.IsNil) {

                // child is a list
                foreach (var o in child.Children()) {
                    var x = o;
                    x.Parent = token;
                }

            } else {
                child.Parent = token;
            }

            var children = token.GetChildren();

            // if parent has no list to hold children
            if (children == null) {

                // and the child is a list then use that list
                if (child.IsNil) {
                    children = child.GetChildren();
                    return;
                }

                // else create a new child list
                children = new List<IntPtr>();

                // associate the list with the parent
                token.SetChildren(children);
            }

            // add children to the parents list
            if (child.IsNil)
                children.AddRange(child.GetChildren());
            else
                children.Add((IntPtr)child);
        }
        public void SetChild(NillableToken token, int childIndex, NillableToken child) {
            
            // set parent
            child.Parent = token;

            token.GetChildren()[childIndex] = child;
        }
        public NillableToken DeleteChild(NillableToken token, int childIndex) {
            var children = token.GetChildren();
            var deleted = children[childIndex];
            children.RemoveAt(childIndex);

            // clear parent
            var tokenData = (TokenData*)deleted;
            tokenData->Parent = null;

            return new NillableToken(this, tokenData);
        }
        public void ReplaceChildren(NillableToken parent, int startChildIndex, int stopChildIndex, NillableToken token) {
            var children = parent.GetChildren();
            children.RemoveRange(startChildIndex, stopChildIndex - startChildIndex + 1);

            var pToken = GetTokenIntPtr(token);

            if (IsNil(token)) {

                foreach (var o in token.Children()) {
                    var x = o;
                    x.Parent = token;
                }

                children.InsertRange(startChildIndex, token.GetChildren());

            } else {

                ((TokenData*)pToken)->Parent = parent.Pointer;

                children.Insert(startChildIndex, pToken);
            }
        }

        public NillableToken BecomeRoot(NillableToken newRoot, NillableToken oldRoot) {
            // nomralize root
            if (newRoot.IsNil)
                newRoot = newRoot.Children().Single();

            // addChild takes care of oldRoot being nil
            AddChild(newRoot, oldRoot);

            return newRoot;
        }

        public NillableToken RulePostProcessing(NillableToken root) {
            // already element
            if (!root.IsNil)
                return root;

            // list with no elements
            if (!root.HasChildren)
                return default(NillableToken);

            // list with one element
            var children = root.GetChildren();
            if (children.Count == 1)
                return new NillableToken(this, (TokenData*)children[0]);

            // list
            return root;
        }
        #endregion

        #region ITreeAdaptor Construction (Debug)
        // not supported
        public int GetUniqueID(NillableToken node) {
            throw new NotSupportedException();
        }
        public NillableToken ErrorNode(IMyTokenStream input, NillableToken start, NillableToken stop, RecognitionException e) {
            throw new NotSupportedException();
        }
        #endregion

        #region ITreeAdaptor Content
        public int GetType(NillableToken token) {
            return token.Type;
        }
        public string GetText(NillableToken token) {
            return GetTextInternal(token);
        }
        public NillableToken GetToken(NillableToken token) {
            return token;
        }
        public int GetTokenStartIndex(NillableToken token) {
            return token.Position;
        }
        public int GetTokenStopIndex(NillableToken token) {
            if (token.Length == 0)
                throw new InvalidOperationException();

            return token.Position + token.Length - 1;
        }
        #endregion

        #region ITreeAdaptor Navigation
        public bool IsNil(NillableToken token) {
            return token.IsNil;
        }
        public NillableToken GetChild(NillableToken token, int i) {
            var children = token.GetChildren();
            if (children == null)
                throw new InvalidOperationException();

            return new NillableToken(this, (TokenData*)children[i]);
        }
        public int GetChildCount(NillableToken token) {
            var children = token.GetChildren();
            if (children == null)
                return 0;

            return children.Count;
        }
        public NillableToken GetParent(NillableToken token) {
            throw new NotImplementedException();
        }
        public int GetChildIndex(NillableToken token) {
            throw new NotImplementedException();
        }
        #endregion

        #region Public Members
        public IAllocator Allocator {
            get { return m_allocator; }
        }
        public ICharStream Stream {
            get { return m_stream; }
        }
        #endregion
    }

    public abstract class MyBaseRecognizer {
        public const int MemoRuleFailed = -2;
        public const int MemoRuleUnknown = -1;
        public const int InitialFollowStackSize = 100;

        // copies from Token object for convenience in actions
        public const int DefaultTokenChannel = TokenChannels.Default;
        public const int Hidden = TokenChannels.Hidden;

        public const string NextTokenRuleName = "nextToken";

        /** <summary>
         *  State of a lexer, parser, or tree parser are collected into a state
         *  object so the state can be shared.  This sharing is needed to
         *  have one grammar import others and share same error variables
         *  and other state variables.  It's a kind of explicit multiple
         *  inheritance via delegation of methods and shared state.
         *  </summary>
         */
        protected internal MyRecognizerSharedState state;

        public MyBaseRecognizer()
            : this(new MyRecognizerSharedState()) {
        }

        public MyBaseRecognizer(MyRecognizerSharedState state) {
            if (state == null) {
                state = new MyRecognizerSharedState();
            }
            this.state = state;
            InitDFAs();
        }

        public TextWriter TraceDestination {
            get;
            set;
        }

        public virtual void SetState(MyRecognizerSharedState value) {
            this.state = value;
        }

        protected virtual void InitDFAs() {
        }

        /** <summary>reset the parser's state; subclasses must rewinds the input stream</summary> */
        public virtual void Reset() {
            // wack everything related to error recovery
            if (state == null) {
                return; // no shared state work to do
            }
            state._fsp = -1;
            state.errorRecovery = false;
            state.lastErrorIndex = -1;
            state.failed = false;
            state.syntaxErrors = 0;
            // wack everything related to backtracking and memoization
            state.backtracking = 0;
            for (int i = 0; state.ruleMemo != null && i < state.ruleMemo.Length; i++) { // wipe cache
                state.ruleMemo[i] = null;
            }
        }


        /** <summary>
         *  Match current input symbol against ttype.  Attempt
         *  single token insertion or deletion error recovery.  If
         *  that fails, throw MismatchedTokenException.
         *  </summary>
         *
         *  <remarks>
         *  To turn off single token insertion or deletion error
         *  recovery, override recoverFromMismatchedToken() and have it
         *  throw an exception. See TreeParser.recoverFromMismatchedToken().
         *  This way any error in a rule will cause an exception and
         *  immediate exit from rule.  Rule would recover by resynchronizing
         *  to the set of symbols that can follow rule ref.
         *  </remarks>
         */
        public virtual object Match(IIntStream input, int ttype, BitSet follow) {
            //System.out.println("match "+((TokenStream)input).LT(1));
            object matchedSymbol = GetCurrentInputSymbol(input);
            if (input.LA(1) == ttype) {
                input.Consume();
                state.errorRecovery = false;
                state.failed = false;
                return matchedSymbol;
            }
            if (state.backtracking > 0) {
                state.failed = true;
                return matchedSymbol;
            }
            matchedSymbol = RecoverFromMismatchedToken(input, ttype, follow);
            return matchedSymbol;
        }

        /** <summary>Match the wildcard: in a symbol</summary> */
        public virtual void MatchAny(IIntStream input) {
            state.errorRecovery = false;
            state.failed = false;
            input.Consume();
        }

        public virtual bool MismatchIsUnwantedToken(IIntStream input, int ttype) {
            return input.LA(2) == ttype;
        }

        public virtual bool MismatchIsMissingToken(IIntStream input, BitSet follow) {
            if (follow == null) {
                // we have no information about the follow; we can only consume
                // a single token and hope for the best
                return false;
            }
            // compute what can follow this grammar element reference
            if (follow.Member(MyTokenTypes.EndOfRule)) {
                BitSet viableTokensFollowingThisRule = ComputeContextSensitiveRuleFOLLOW();
                follow = follow.Or(viableTokensFollowingThisRule);
                if (state._fsp >= 0) { // remove EOR if we're not the start symbol
                    follow.Remove(MyTokenTypes.EndOfRule);
                }
            }
            // if current token is consistent with what could come after set
            // then we know we're missing a token; error recovery is free to
            // "insert" the missing token

            //System.out.println("viable tokens="+follow.toString(getTokenNames()));
            //System.out.println("LT(1)="+((TokenStream)input).LT(1));

            // BitSet cannot handle negative numbers like -1 (EOF) so I leave EOR
            // in follow set to indicate that the fall of the start symbol is
            // in the set (EOF can follow).
            if (follow.Member(input.LA(1)) || follow.Member(MyTokenTypes.EndOfRule)) {
                //System.out.println("LT(1)=="+((TokenStream)input).LT(1)+" is consistent with what follows; inserting...");
                return true;
            }
            return false;
        }

        /** <summary>Report a recognition problem.</summary>
         *
         *  <remarks>
         *  This method sets errorRecovery to indicate the parser is recovering
         *  not parsing.  Once in recovery mode, no errors are generated.
         *  To get out of recovery mode, the parser must successfully match
         *  a token (after a resync).  So it will go:
         *
         * 		1. error occurs
         * 		2. enter recovery mode, report error
         * 		3. consume until token found in resynch set
         * 		4. try to resume parsing
         * 		5. next match() will reset errorRecovery mode
         *
         *  If you override, make sure to update syntaxErrors if you care about that.
         *  </remarks>
         */
        public virtual void ReportError(RecognitionException e) {
            // if we've already reported an error and have not matched a token
            // yet successfully, don't report any errors.
            if (state.errorRecovery) {
                //System.err.print("[SPURIOUS] ");
                return;
            }
            state.syntaxErrors++; // don't count spurious
            state.errorRecovery = true;

            DisplayRecognitionError(this.TokenNames, e);
        }

        public virtual void DisplayRecognitionError(string[] tokenNames,
                                            RecognitionException e) {
            string hdr = GetErrorHeader(e);
            string msg = GetErrorMessage(e, tokenNames);
            EmitErrorMessage(hdr + " " + msg);
        }

        /** <summary>What error message should be generated for the various exception types?</summary>
         *
         *  <remarks>
         *  Not very object-oriented code, but I like having all error message
         *  generation within one method rather than spread among all of the
         *  exception classes. This also makes it much easier for the exception
         *  handling because the exception classes do not have to have pointers back
         *  to this object to access utility routines and so on. Also, changing
         *  the message for an exception type would be difficult because you
         *  would have to subclassing exception, but then somehow get ANTLR
         *  to make those kinds of exception objects instead of the default.
         *  This looks weird, but trust me--it makes the most sense in terms
         *  of flexibility.
         *
         *  For grammar debugging, you will want to override this to add
         *  more information such as the stack frame with
         *  getRuleInvocationStack(e, this.getClass().getName()) and,
         *  for no viable alts, the decision description and state etc...
         *
         *  Override this to change the message generated for one or more
         *  exception types.
         *  </remarks>
         */
        public virtual string GetErrorMessage(RecognitionException e, string[] tokenNames) {
            string msg = e.Message;
            if (e is UnwantedTokenException) {
                UnwantedTokenException ute = (UnwantedTokenException)e;
                string tokenName = "<unknown>";
                if (ute.Expecting == MyTokenTypes.EndOfFile) {
                    tokenName = "EndOfFile";
                } else {
                    tokenName = tokenNames[ute.Expecting];
                }
                msg = "extraneous input " + GetTokenErrorDisplay(ute.UnexpectedToken) +
                    " expecting " + tokenName;
            } else if (e is MissingTokenException) {
                MissingTokenException mte = (MissingTokenException)e;
                string tokenName = "<unknown>";
                if (mte.Expecting == MyTokenTypes.EndOfFile) {
                    tokenName = "EndOfFile";
                } else {
                    tokenName = tokenNames[mte.Expecting];
                }
                msg = "missing " + tokenName + " at " + GetTokenErrorDisplay(e.Token);
            } else if (e is MismatchedTokenException) {
                MismatchedTokenException mte = (MismatchedTokenException)e;
                string tokenName = "<unknown>";
                if (mte.Expecting == MyTokenTypes.EndOfFile) {
                    tokenName = "EndOfFile";
                } else {
                    tokenName = tokenNames[mte.Expecting];
                }
                msg = "mismatched input " + GetTokenErrorDisplay(e.Token) +
                    " expecting " + tokenName;
            } else if (e is MismatchedTreeNodeException) {
                MismatchedTreeNodeException mtne = (MismatchedTreeNodeException)e;
                string tokenName = "<unknown>";
                if (mtne.Expecting == MyTokenTypes.EndOfFile) {
                    tokenName = "EndOfFile";
                } else {
                    tokenName = tokenNames[mtne.Expecting];
                }
                // workaround for a .NET framework bug (NullReferenceException)
                string nodeText = (mtne.Node != null) ? mtne.Node.ToString() ?? string.Empty : string.Empty;
                msg = "mismatched tree node: " + nodeText + " expecting " + tokenName;
            } else if (e is NoViableAltException) {
                //NoViableAltException nvae = (NoViableAltException)e;
                // for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
                // and "(decision="+nvae.decisionNumber+") and
                // "state "+nvae.stateNumber
                msg = "no viable alternative at input " + GetTokenErrorDisplay(e.Token);
            } else if (e is EarlyExitException) {
                //EarlyExitException eee = (EarlyExitException)e;
                // for development, can add "(decision="+eee.decisionNumber+")"
                msg = "required (...)+ loop did not match anything at input " +
                    GetTokenErrorDisplay(e.Token);
            } else if (e is MismatchedSetException) {
                MismatchedSetException mse = (MismatchedSetException)e;
                msg = "mismatched input " + GetTokenErrorDisplay(e.Token) +
                    " expecting set " + mse.Expecting;
            } else if (e is MismatchedNotSetException) {
                MismatchedNotSetException mse = (MismatchedNotSetException)e;
                msg = "mismatched input " + GetTokenErrorDisplay(e.Token) +
                    " expecting set " + mse.Expecting;
            } else if (e is FailedPredicateException) {
                FailedPredicateException fpe = (FailedPredicateException)e;
                msg = "rule " + fpe.RuleName + " failed predicate: {" +
                    fpe.PredicateText + "}?";
            }
            return msg;
        }

        /** <summary>
         *  Get number of recognition errors (lexer, parser, tree parser).  Each
         *  recognizer tracks its own number.  So parser and lexer each have
         *  separate count.  Does not count the spurious errors found between
         *  an error and next valid token match
         *  </summary>
         *
         *  <seealso cref="reportError()"/>
         */
        public virtual int NumberOfSyntaxErrors {
            get {
                return state.syntaxErrors;
            }
        }

        /** <summary>What is the error header, normally line/character position information?</summary> */
        public virtual string GetErrorHeader(RecognitionException e) {
            string prefix = SourceName ?? string.Empty;
            if (prefix.Length > 0)
                prefix += ' ';

            return string.Format("{0}line {1}:{2}", prefix, e.Line, e.CharPositionInLine + 1);
        }

        /** <summary>
         *  How should a token be displayed in an error message? The default
         *  is to display just the text, but during development you might
         *  want to have a lot of information spit out.  Override in that case
         *  to use t.ToString() (which, for CommonToken, dumps everything about
         *  the token). This is better than forcing you to override a method in
         *  your token objects because you don't have to go modify your lexer
         *  so that it creates a new Java type.
         *  </summary>
         */
        public virtual string GetTokenErrorDisplay(IToken t) {
            string s = t.Text;
            if (s == null) {
                if (t.Type == MyTokenTypes.EndOfFile) {
                    s = "<EOF>";
                } else {
                    s = "<" + t.Type + ">";
                }
            }
            s = Regex.Replace(s, "\n", "\\\\n");
            s = Regex.Replace(s, "\r", "\\\\r");
            s = Regex.Replace(s, "\t", "\\\\t");
            return "'" + s + "'";
        }

        /** <summary>Override this method to change where error messages go</summary> */
        public virtual void EmitErrorMessage(string msg) {
            if (TraceDestination != null)
                TraceDestination.WriteLine(msg);
        }

        /** <summary>
         *  Recover from an error found on the input stream.  This is
         *  for NoViableAlt and mismatched symbol exceptions.  If you enable
         *  single token insertion and deletion, this will usually not
         *  handle mismatched symbol exceptions but there could be a mismatched
         *  token that the match() routine could not recover from.
         *  </summary>
         */
        public virtual void Recover(IIntStream input, RecognitionException re) {
            if (state.lastErrorIndex == input.Index) {
                // uh oh, another error at same token index; must be a case
                // where LT(1) is in the recovery token set so nothing is
                // consumed; consume a single token so at least to prevent
                // an infinite loop; this is a failsafe.
                input.Consume();
            }
            state.lastErrorIndex = input.Index;
            BitSet followSet = ComputeErrorRecoverySet();
            BeginResync();
            ConsumeUntil(input, followSet);
            EndResync();
        }

        /** <summary>
         *  A hook to listen in on the token consumption during error recovery.
         *  The DebugParser subclasses this to fire events to the listenter.
         *  </summary>
         */
        public virtual void BeginResync() {
        }

        public virtual void EndResync() {
        }

        /*  Compute the error recovery set for the current rule.  During
         *  rule invocation, the parser pushes the set of tokens that can
         *  follow that rule reference on the stack; this amounts to
         *  computing FIRST of what follows the rule reference in the
         *  enclosing rule. This local follow set only includes tokens
         *  from within the rule; i.e., the FIRST computation done by
         *  ANTLR stops at the end of a rule.
         *
         *  EXAMPLE
         *
         *  When you find a "no viable alt exception", the input is not
         *  consistent with any of the alternatives for rule r.  The best
         *  thing to do is to consume tokens until you see something that
         *  can legally follow a call to r *or* any rule that called r.
         *  You don't want the exact set of viable next tokens because the
         *  input might just be missing a token--you might consume the
         *  rest of the input looking for one of the missing tokens.
         *
         *  Consider grammar:
         *
         *  a : '[' b ']'
         *    | '(' b ')'
         *    ;
         *  b : c '^' INT ;
         *  c : ID
         *    | INT
         *    ;
         *
         *  At each rule invocation, the set of tokens that could follow
         *  that rule is pushed on a stack.  Here are the various "local"
         *  follow sets:
         *
         *  FOLLOW(b1_in_a) = FIRST(']') = ']'
         *  FOLLOW(b2_in_a) = FIRST(')') = ')'
         *  FOLLOW(c_in_b) = FIRST('^') = '^'
         *
         *  Upon erroneous input "[]", the call chain is
         *
         *  a -> b -> c
         *
         *  and, hence, the follow context stack is:
         *
         *  depth  local follow set     after call to rule
         *    0         <EOF>                    a (from main())
         *    1          ']'                     b
         *    3          '^'                     c
         *
         *  Notice that ')' is not included, because b would have to have
         *  been called from a different context in rule a for ')' to be
         *  included.
         *
         *  For error recovery, we cannot consider FOLLOW(c)
         *  (context-sensitive or otherwise).  We need the combined set of
         *  all context-sensitive FOLLOW sets--the set of all tokens that
         *  could follow any reference in the call chain.  We need to
         *  resync to one of those tokens.  Note that FOLLOW(c)='^' and if
         *  we resync'd to that token, we'd consume until EOF.  We need to
         *  sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
         *  In this case, for input "[]", LA(1) is in this set so we would
         *  not consume anything and after printing an error rule c would
         *  return normally.  It would not find the required '^' though.
         *  At this point, it gets a mismatched token error and throws an
         *  exception (since LA(1) is not in the viable following token
         *  set).  The rule exception handler tries to recover, but finds
         *  the same recovery set and doesn't consume anything.  Rule b
         *  exits normally returning to rule a.  Now it finds the ']' (and
         *  with the successful match exits errorRecovery mode).
         *
         *  So, you cna see that the parser walks up call chain looking
         *  for the token that was a member of the recovery set.
         *
         *  Errors are not generated in errorRecovery mode.
         *
         *  ANTLR's error recovery mechanism is based upon original ideas:
         *
         *  "Algorithms + Data Structures = Programs" by Niklaus Wirth
         *
         *  and
         *
         *  "A note on error recovery in recursive descent parsers":
         *  http://portal.acm.org/citation.cfm?id=947902.947905
         *
         *  Later, Josef Grosch had some good ideas:
         *
         *  "Efficient and Comfortable Error Recovery in Recursive Descent
         *  Parsers":
         *  ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
         *
         *  Like Grosch I implemented local FOLLOW sets that are combined
         *  at run-time upon error to avoid overhead during parsing.
         */
        protected virtual BitSet ComputeErrorRecoverySet() {
            return CombineFollows(false);
        }

        /** <summary>
         *  Compute the context-sensitive FOLLOW set for current rule.
         *  This is set of token types that can follow a specific rule
         *  reference given a specific call chain.  You get the set of
         *  viable tokens that can possibly come next (lookahead depth 1)
         *  given the current call chain.  Contrast this with the
         *  definition of plain FOLLOW for rule r:
         *  </summary>
         *
         *   FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
         *
         *  where x in T* and alpha, beta in V*; T is set of terminals and
         *  V is the set of terminals and nonterminals.  In other words,
         *  FOLLOW(r) is the set of all tokens that can possibly follow
         *  references to r in *any* sentential form (context).  At
         *  runtime, however, we know precisely which context applies as
         *  we have the call chain.  We may compute the exact (rather
         *  than covering superset) set of following tokens.
         *
         *  For example, consider grammar:
         *
         *  stat : ID '=' expr ';'      // FOLLOW(stat)=={EOF}
         *       | "return" expr '.'
         *       ;
         *  expr : atom ('+' atom)* ;   // FOLLOW(expr)=={';','.',')'}
         *  atom : INT                  // FOLLOW(atom)=={'+',')',';','.'}
         *       | '(' expr ')'
         *       ;
         *
         *  The FOLLOW sets are all inclusive whereas context-sensitive
         *  FOLLOW sets are precisely what could follow a rule reference.
         *  For input input "i=(3);", here is the derivation:
         *
         *  stat => ID '=' expr ';'
         *       => ID '=' atom ('+' atom)* ';'
         *       => ID '=' '(' expr ')' ('+' atom)* ';'
         *       => ID '=' '(' atom ')' ('+' atom)* ';'
         *       => ID '=' '(' INT ')' ('+' atom)* ';'
         *       => ID '=' '(' INT ')' ';'
         *
         *  At the "3" token, you'd have a call chain of
         *
         *    stat -> expr -> atom -> expr -> atom
         *
         *  What can follow that specific nested ref to atom?  Exactly ')'
         *  as you can see by looking at the derivation of this specific
         *  input.  Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
         *
         *  You want the exact viable token set when recovering from a
         *  token mismatch.  Upon token mismatch, if LA(1) is member of
         *  the viable next token set, then you know there is most likely
         *  a missing token in the input stream.  "Insert" one by just not
         *  throwing an exception.
         */
        protected virtual BitSet ComputeContextSensitiveRuleFOLLOW() {
            return CombineFollows(true);
        }

        // what is exact? it seems to only add sets from above on stack
        // if EOR is in set i.  When it sees a set w/o EOR, it stops adding.
        // Why would we ever want them all?  Maybe no viable alt instead of
        // mismatched token?
        protected virtual BitSet CombineFollows(bool exact) {
            int top = state._fsp;
            BitSet followSet = new BitSet();
            for (int i = top; i >= 0; i--) {
                BitSet localFollowSet = (BitSet)state.following[i];
                /*
                System.out.println("local follow depth "+i+"="+
                                   localFollowSet.toString(getTokenNames())+")");
                 */
                followSet.OrInPlace(localFollowSet);
                if (exact) {
                    // can we see end of rule?
                    if (localFollowSet.Member(MyTokenTypes.EndOfRule)) {
                        // Only leave EOR in set if at top (start rule); this lets
                        // us know if have to include follow(start rule); i.e., EOF
                        if (i > 0) {
                            followSet.Remove(MyTokenTypes.EndOfRule);
                        }
                    } else { // can't see end of rule, quit
                        break;
                    }
                }
            }
            return followSet;
        }

        /** <summary>Attempt to recover from a single missing or extra token.</summary>
         *
         *  EXTRA TOKEN
         *
         *  LA(1) is not what we are looking for.  If LA(2) has the right token,
         *  however, then assume LA(1) is some extra spurious token.  Delete it
         *  and LA(2) as if we were doing a normal match(), which advances the
         *  input.
         *
         *  MISSING TOKEN
         *
         *  If current token is consistent with what could come after
         *  ttype then it is ok to "insert" the missing token, else throw
         *  exception For example, Input "i=(3;" is clearly missing the
         *  ')'.  When the parser returns from the nested call to expr, it
         *  will have call chain:
         *
         *    stat -> expr -> atom
         *
         *  and it will be trying to match the ')' at this point in the
         *  derivation:
         *
         *       => ID '=' '(' INT ')' ('+' atom)* ';'
         *                          ^
         *  match() will see that ';' doesn't match ')' and report a
         *  mismatched token error.  To recover, it sees that LA(1)==';'
         *  is in the set of tokens that can follow the ')' token
         *  reference in rule atom.  It can assume that you forgot the ')'.
         */
        protected virtual object RecoverFromMismatchedToken(IIntStream input, int ttype, BitSet follow) {
            RecognitionException e = null;
            // if next token is what we are looking for then "delete" this token
            if (MismatchIsUnwantedToken(input, ttype)) {
                e = new UnwantedTokenException(ttype, input, TokenNames);
                /*
                System.err.println("recoverFromMismatchedToken deleting "+
                                   ((TokenStream)input).LT(1)+
                                   " since "+((TokenStream)input).LT(2)+" is what we want");
                 */
                BeginResync();
                input.Consume(); // simply delete extra token
                EndResync();
                ReportError(e);  // report after consuming so AW sees the token in the exception
                // we want to return the token we're actually matching
                object matchedSymbol = GetCurrentInputSymbol(input);
                input.Consume(); // move past ttype token as if all were ok
                return matchedSymbol;
            }
            // can't recover with single token deletion, try insertion
            if (MismatchIsMissingToken(input, follow)) {
                object inserted = GetMissingSymbol(input, e, ttype, follow);
                e = new MissingTokenException(ttype, input, inserted);
                ReportError(e);  // report after inserting so AW sees the token in the exception
                return inserted;
            }
            // even that didn't work; must throw the exception
            e = new MismatchedTokenException(ttype, input, TokenNames);
            throw e;
        }

        /** Not currently used */
        public virtual object RecoverFromMismatchedSet(IIntStream input,
                                               RecognitionException e,
                                               BitSet follow) {
            if (MismatchIsMissingToken(input, follow)) {
                // System.out.println("missing token");
                ReportError(e);
                // we don't know how to conjure up a token for sets yet
                return GetMissingSymbol(input, e, MyTokenTypes.Invalid, follow);
            }
            // TODO do single token deletion like above for Token mismatch
            throw e;
        }

        /** <summary>
         *  Match needs to return the current input symbol, which gets put
         *  into the label for the associated token ref; e.g., x=ID.  Token
         *  and tree parsers need to return different objects. Rather than test
         *  for input stream type or change the IntStream interface, I use
         *  a simple method to ask the recognizer to tell me what the current
         *  input symbol is.
         *  </summary>
         *
         *  <remarks>This is ignored for lexers.</remarks>
         */
        protected virtual object GetCurrentInputSymbol(IIntStream input) {
            return null;
        }

        /** <summary>Conjure up a missing token during error recovery.</summary>
         *
         *  <remarks>
         *  The recognizer attempts to recover from single missing
         *  symbols. But, actions might refer to that missing symbol.
         *  For example, x=ID {f($x);}. The action clearly assumes
         *  that there has been an identifier matched previously and that
         *  $x points at that token. If that token is missing, but
         *  the next token in the stream is what we want we assume that
         *  this token is missing and we keep going. Because we
         *  have to return some token to replace the missing token,
         *  we have to conjure one up. This method gives the user control
         *  over the tokens returned for missing tokens. Mostly,
         *  you will want to create something special for identifier
         *  tokens. For literals such as '{' and ',', the default
         *  action in the parser or tree parser works. It simply creates
         *  a CommonToken of the appropriate type. The text will be the token.
         *  If you change what tokens must be created by the lexer,
         *  override this method to create the appropriate tokens.
         *  </remarks>
         */
        protected virtual object GetMissingSymbol(IIntStream input,
                                          RecognitionException e,
                                          int expectedTokenType,
                                          BitSet follow) {
            return null;
        }

        public virtual void ConsumeUntil(IIntStream input, int tokenType) {
            //System.out.println("consumeUntil "+tokenType);
            int ttype = input.LA(1);
            while (ttype != MyTokenTypes.EndOfFile && ttype != tokenType) {
                input.Consume();
                ttype = input.LA(1);
            }
        }

        /** <summary>Consume tokens until one matches the given token set</summary> */
        public virtual void ConsumeUntil(IIntStream input, BitSet set) {
            //System.out.println("consumeUntil("+set.toString(getTokenNames())+")");
            int ttype = input.LA(1);
            while (ttype != MyTokenTypes.EndOfFile && !set.Member(ttype)) {
                //System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
                input.Consume();
                ttype = input.LA(1);
            }
        }

        /** <summary>Push a rule's follow set using our own hardcoded stack</summary> */
        protected void PushFollow(BitSet fset) {
            if ((state._fsp + 1) >= state.following.Length) {
                Array.Resize(ref state.following, state.following.Length * 2);
            }
            state.following[++state._fsp] = fset;
        }

        protected void PopFollow() {
            state._fsp--;
        }

        /** <summary>
         *  Return List<String> of the rules in your parser instance
         *  leading up to a call to this method.  You could override if
         *  you want more details such as the file/line info of where
         *  in the parser java code a rule is invoked.
         *  </summary>
         *
         *  <remarks>
         *  This is very useful for error messages and for context-sensitive
         *  error recovery.
         *  </remarks>
         */
        public virtual IList<string> GetRuleInvocationStack() {
            return GetRuleInvocationStack(new StackTrace(true));
        }

        /** <summary>
         *  A more general version of GetRuleInvocationStack where you can
         *  pass in the StackTrace of, for example, a RecognitionException
         *  to get it's rule stack trace.
         *  </summary>
         */
        public static IList<string> GetRuleInvocationStack(StackTrace trace) {
            if (trace == null)
                throw new ArgumentNullException("trace");

            List<string> rules = new List<string>();
            StackFrame[] stack = trace.GetFrames() ?? new StackFrame[0];

            for (int i = stack.Length - 1; i >= 0; i--) {
                StackFrame frame = stack[i];
                MethodBase method = frame.GetMethod();
                GrammarRuleAttribute[] attributes = (GrammarRuleAttribute[])method.GetCustomAttributes(typeof(GrammarRuleAttribute), true);
                if (attributes != null && attributes.Length > 0)
                    rules.Add(attributes[0].Name);
            }

            return rules;
        }

        public virtual int BacktrackingLevel {
            get {
                return state.backtracking;
            }
            set {
                state.backtracking = value;
            }
        }

        /** <summary>Return whether or not a backtracking attempt failed.</summary> */
        public virtual bool Failed {
            get {
                return state.failed;
            }
        }

        /** <summary>
         *  Used to print out token names like ID during debugging and
         *  error reporting.  The generated parsers implement a method
         *  that overrides this to point to their String[] tokenNames.
         *  </summary>
         */
        public virtual string[] TokenNames {
            get {
                return null;
            }
        }

        /** <summary>
         *  For debugging and other purposes, might want the grammar name.
         *  Have ANTLR generate an implementation for this method.
         *  </summary>
         */
        public virtual string GrammarFileName {
            get {
                return null;
            }
        }

        public abstract string SourceName {
            get;
        }

        /** <summary>
         *  A convenience method for use most often with template rewrites.
         *  Convert a List<Token> to List<String>
         *  </summary>
         */
        public virtual List<string> ToStrings(ICollection<IToken> tokens) {
            if (tokens == null)
                return null;

            List<string> strings = new List<string>(tokens.Count);
            foreach (IToken token in tokens) {
                strings.Add(token.Text);
            }

            return strings;
        }

        /** <summary>
         *  Given a rule number and a start token index number, return
         *  MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
         *  start index.  If this rule has parsed input starting from the
         *  start index before, then return where the rule stopped parsing.
         *  It returns the index of the last token matched by the rule.
         *  </summary>
         *
         *  <remarks>
         *  For now we use a hashtable and just the slow Object-based one.
         *  Later, we can make a special one for ints and also one that
         *  tosses out data after we commit past input position i.
         *  </remarks>
         */
        public virtual int GetRuleMemoization(int ruleIndex, int ruleStartIndex) {
            if (state.ruleMemo[ruleIndex] == null) {
                state.ruleMemo[ruleIndex] = new Dictionary<int, int>();
            }

            int stopIndex;
            if (!state.ruleMemo[ruleIndex].TryGetValue(ruleStartIndex, out stopIndex))
                return MemoRuleUnknown;

            return stopIndex;
        }

        /** <summary>
         *  Has this rule already parsed input at the current index in the
         *  input stream?  Return the stop token index or MEMO_RULE_UNKNOWN.
         *  If we attempted but failed to parse properly before, return
         *  MEMO_RULE_FAILED.
         *  </summary>
         *
         *  <remarks>
         *  This method has a side-effect: if we have seen this input for
         *  this rule and successfully parsed before, then seek ahead to
         *  1 past the stop token matched for this rule last time.
         *  </remarks>
         */
        public virtual bool AlreadyParsedRule(IIntStream input, int ruleIndex) {
            int stopIndex = GetRuleMemoization(ruleIndex, input.Index);
            if (stopIndex == MemoRuleUnknown) {
                return false;
            }
            if (stopIndex == MemoRuleFailed) {
                //System.out.println("rule "+ruleIndex+" will never succeed");
                state.failed = true;
            } else {
                //System.out.println("seen rule "+ruleIndex+" before; skipping ahead to @"+(stopIndex+1)+" failed="+state.failed);
                input.Seek(stopIndex + 1); // jump to one past stop token
            }
            return true;
        }

        /** <summary>
         *  Record whether or not this rule parsed the input at this position
         *  successfully.  Use a standard java hashtable for now.
         *  </summary>
         */
        public virtual void Memoize(IIntStream input,
                            int ruleIndex,
                            int ruleStartIndex) {
            int stopTokenIndex = state.failed ? MemoRuleFailed : input.Index - 1;
            if (state.ruleMemo == null) {
                if (TraceDestination != null)
                    TraceDestination.WriteLine("!!!!!!!!! memo array is null for " + GrammarFileName);
            }
            if (ruleIndex >= state.ruleMemo.Length) {
                if (TraceDestination != null)
                    TraceDestination.WriteLine("!!!!!!!!! memo size is " + state.ruleMemo.Length + ", but rule index is " + ruleIndex);
            }
            if (state.ruleMemo[ruleIndex] != null) {
                state.ruleMemo[ruleIndex][ruleStartIndex] = stopTokenIndex;
            }
        }

        /** <summary>return how many rule/input-index pairs there are in total.</summary>
         *  TODO: this includes synpreds. :(
         */
        public virtual int GetRuleMemoizationCacheSize() {
            int n = 0;
            for (int i = 0; state.ruleMemo != null && i < state.ruleMemo.Length; i++) {
                var ruleMap = state.ruleMemo[i];
                if (ruleMap != null) {
                    n += ruleMap.Count; // how many input indexes are recorded?
                }
            }
            return n;
        }

        public virtual void TraceIn(string ruleName, int ruleIndex, object inputSymbol) {
            if (TraceDestination == null)
                return;

            TraceDestination.Write("enter " + ruleName + " " + inputSymbol);
            if (state.backtracking > 0) {
                TraceDestination.Write(" backtracking=" + state.backtracking);
            }
            TraceDestination.WriteLine();
        }

        public virtual void TraceOut(string ruleName, int ruleIndex, object inputSymbol) {
            if (TraceDestination == null)
                return;

            TraceDestination.Write("exit " + ruleName + " " + inputSymbol);
            if (state.backtracking > 0) {
                TraceDestination.Write(" backtracking=" + state.backtracking);
                if (state.failed)
                    TraceDestination.Write(" failed");
                else
                    TraceDestination.Write(" succeeded");
            }
            TraceDestination.WriteLine();
        }

        #region Debugging support
        public virtual IDebugEventListener DebugListener {
            get {
                return null;
            }
        }

        [Conditional("ANTLR_DEBUG")]
        protected virtual void DebugEnterRule(string grammarFileName, string ruleName) {
            IDebugEventListener dbg = DebugListener;
            if (dbg != null)
                dbg.EnterRule(grammarFileName, ruleName);
        }

        [Conditional("ANTLR_DEBUG")]
        protected virtual void DebugExitRule(string grammarFileName, string ruleName) {
            IDebugEventListener dbg = DebugListener;
            if (dbg != null)
                dbg.ExitRule(grammarFileName, ruleName);
        }

        [Conditional("ANTLR_DEBUG")]
        protected virtual void DebugEnterSubRule(int decisionNumber) {
            IDebugEventListener dbg = DebugListener;
            if (dbg != null)
                dbg.EnterSubRule(decisionNumber);
        }

        [Conditional("ANTLR_DEBUG")]
        protected virtual void DebugExitSubRule(int decisionNumber) {
            IDebugEventListener dbg = DebugListener;
            if (dbg != null)
                dbg.ExitSubRule(decisionNumber);
        }

        [Conditional("ANTLR_DEBUG")]
        protected virtual void DebugEnterAlt(int alt) {
            IDebugEventListener dbg = DebugListener;
            if (dbg != null)
                dbg.EnterAlt(alt);
        }

        [Conditional("ANTLR_DEBUG")]
        protected virtual void DebugEnterDecision(int decisionNumber, bool couldBacktrack) {
            IDebugEventListener dbg = DebugListener;
            if (dbg != null)
                dbg.EnterDecision(decisionNumber, couldBacktrack);
        }

        [Conditional("ANTLR_DEBUG")]
        protected virtual void DebugExitDecision(int decisionNumber) {
            IDebugEventListener dbg = DebugListener;
            if (dbg != null)
                dbg.ExitDecision(decisionNumber);
        }

        [Conditional("ANTLR_DEBUG")]
        protected virtual void DebugLocation(int line, int charPositionInLine) {
            IDebugEventListener dbg = DebugListener;
            if (dbg != null)
                dbg.Location(line, charPositionInLine);
        }

        [Conditional("ANTLR_DEBUG")]
        protected virtual void DebugSemanticPredicate(bool result, string predicate) {
            IDebugEventListener dbg = DebugListener;
            if (dbg != null)
                dbg.SemanticPredicate(result, predicate);
        }

        [Conditional("ANTLR_DEBUG")]
        protected virtual void DebugBeginBacktrack(int level) {
            IDebugEventListener dbg = DebugListener;
            if (dbg != null)
                dbg.BeginBacktrack(level);
        }

        [Conditional("ANTLR_DEBUG")]
        protected virtual void DebugEndBacktrack(int level, bool successful) {
            IDebugEventListener dbg = DebugListener;
            if (dbg != null)
                dbg.EndBacktrack(level, successful);
        }

        [Conditional("ANTLR_DEBUG")]
        protected virtual void DebugRecognitionException(RecognitionException ex) {
            IDebugEventListener dbg = DebugListener;
            if (dbg != null)
                dbg.RecognitionException(ex);
        }
        #endregion
    }
    public class MyRecognizerSharedState {
        /** <summary>
         *  Track the set of token types that can follow any rule invocation.
         *  Stack grows upwards.  When it hits the max, it grows 2x in size
         *  and keeps going.
         *  </summary>
         */
        //public List<BitSet> following;
        public BitSet[] following;
        [CLSCompliant(false)]
        public int _fsp;

        /** <summary>
         *  This is true when we see an error and before having successfully
         *  matched a token.  Prevents generation of more than one error message
         *  per error.
         *  </summary>
         */
        public bool errorRecovery;

        /** <summary>
         *  The index into the input stream where the last error occurred.
         * 	This is used to prevent infinite loops where an error is found
         *  but no token is consumed during recovery...another error is found,
         *  ad naseum.  This is a failsafe mechanism to guarantee that at least
         *  one token/tree node is consumed for two errors.
         *  </summary>
         */
        public int lastErrorIndex;

        /** <summary>
         *  In lieu of a return value, this indicates that a rule or token
         *  has failed to match.  Reset to false upon valid token match.
         *  </summary>
         */
        public bool failed;

        /** <summary>Did the recognizer encounter a syntax error?  Track how many.</summary> */
        public int syntaxErrors;

        /** <summary>
         *  If 0, no backtracking is going on.  Safe to exec actions etc...
         *  If >0 then it's the level of backtracking.
         *  </summary>
         */
        public int backtracking;

        /** <summary>
         *  An array[size num rules] of Map<Integer,Integer> that tracks
         *  the stop token index for each rule.  ruleMemo[ruleIndex] is
         *  the memoization table for ruleIndex.  For key ruleStartIndex, you
         *  get back the stop token for associated rule or MEMO_RULE_FAILED.
         *  </summary>
         *
         *  <remarks>This is only used if rule memoization is on (which it is by default).</remarks>
         */
        public IDictionary<int, int>[] ruleMemo;


        // LEXER FIELDS (must be in same state object to avoid casting
        //               constantly in generated code and Lexer object) :(


        /** <summary>
         *  The goal of all lexer rules/methods is to create a token object.
         *  This is an instance variable as multiple rules may collaborate to
         *  create a single token.  nextToken will return this object after
         *  matching lexer rule(s).  If you subclass to allow multiple token
         *  emissions, then set this to the last token to be matched or
         *  something nonnull so that the auto token emit mechanism will not
         *  emit another token.
         *  </summary>
         */
        public NillableToken? token;
        public bool skip;

        /** <summary>
         *  What character index in the stream did the current token start at?
         *  Needed, for example, to get the text for current token.  Set at
         *  the start of nextToken.
         *  </summary>
         */
        public int tokenStartCharIndex;

        /** <summary>The line on which the first character of the token resides</summary> */
        public int tokenStartLine;

        /** <summary>The character position of first character within the line</summary> */
        public int tokenStartCharPositionInLine;

        /** <summary>The channel number for the current token</summary> */
        public int channel;

        /** <summary>The token type for the current token</summary> */
        public int type;

        /** <summary>
         *  You can set the text for the current token to override what is in
         *  the input char buffer.  Use setText() or can set this instance var.
         *  </summary>
         */
        public string text;

        public MyRecognizerSharedState() {
            //following = new List<BitSet>( BaseRecognizer.InitialFollowStackSize );
            following = new BitSet[BaseRecognizer.InitialFollowStackSize];
            _fsp = -1;
            lastErrorIndex = -1;
            tokenStartCharIndex = -1;
        }

        public MyRecognizerSharedState(MyRecognizerSharedState state) {
            if (state == null)
                throw new ArgumentNullException("state");

            following = (BitSet[])state.following.Clone();
            _fsp = state._fsp;
            errorRecovery = state.errorRecovery;
            lastErrorIndex = state.lastErrorIndex;
            failed = state.failed;
            syntaxErrors = state.syntaxErrors;
            backtracking = state.backtracking;

            if (state.ruleMemo != null)
                ruleMemo = (IDictionary<int, int>[])state.ruleMemo.Clone();

            token = state.token;
            tokenStartCharIndex = state.tokenStartCharIndex;
            tokenStartCharPositionInLine = state.tokenStartCharPositionInLine;
            channel = state.channel;
            type = state.type;
            text = state.text;
        }
    }

    public abstract class Parser : MyBaseRecognizer {
        public IMyTokenStream input;

        public Parser(IMyTokenStream input)
            : base() {
            //super(); // highlight that we go to super to set state object
            TokenStream = input;
        }

        public Parser(IMyTokenStream input, MyRecognizerSharedState state)
            : base(state) // share the state object with another parser
        {
            this.input = input;
        }

        public override void Reset() {
            base.Reset(); // reset all recognizer state variables
            if (input != null) {
                input.Seek(0); // rewind the input
            }
        }

        protected override object GetCurrentInputSymbol(IIntStream input) {
            return ((IMyTokenStream)input).LT(1);
        }

        protected override object GetMissingSymbol(IIntStream input,
                                          RecognitionException e,
                                          int expectedTokenType,
                                          BitSet follow) {
            string tokenText = null;
            if (expectedTokenType == MyTokenTypes.EndOfFile)
                tokenText = "<missing EOF>";
            else
                tokenText = "<missing " + TokenNames[expectedTokenType] + ">";
            CommonToken t = new CommonToken(expectedTokenType, tokenText);
            NillableToken current = ((IMyTokenStream)input).LT(1);
            if (current.Type == MyTokenTypes.EndOfFile) {
                current = ((IMyTokenStream)input).LT(-1);
            }
            t.Line = current.Line;
            t.CharPositionInLine = current.Column;
            t.Channel = DefaultTokenChannel;
            t.InputStream = current.Stream;
            return t;
        }

        /** <summary>Gets or sets the token stream; resets the parser upon a set.</summary> */
        public virtual IMyTokenStream TokenStream {
            get {
                return input;
            }
            set {
                input = null;
                Reset();
                input = value;
            }
        }

        public override string SourceName {
            get {
                return input.SourceName;
            }
        }

        [Conditional("ANTLR_TRACE")]
        public virtual void TraceIn(string ruleName, int ruleIndex) {
            base.TraceIn(ruleName, ruleIndex, input.LT(1));
        }

        [Conditional("ANTLR_TRACE")]
        public virtual void TraceOut(string ruleName, int ruleIndex) {
            base.TraceOut(ruleName, ruleIndex, input.LT(1));
        }
    }
    public abstract class Lexer : MyBaseRecognizer, IMyTokenSource {
        /** <summary>Where is the lexer drawing characters from?</summary> */
        protected ICharStream input;

        public Lexer() {
        }

        public Lexer(ICharStream input) {
            this.input = input;
        }

        public Lexer(ICharStream input, MyRecognizerSharedState state) : base(state) {
            this.input = input;
        }

        #region Properties
        public string Text {
            /** <summary>Return the text matched so far for the current token or any text override.</summary> */
            get {
                if (state.text != null) {
                    return state.text;
                }
                return input.Substring(state.tokenStartCharIndex, CharIndex - state.tokenStartCharIndex);
            }
            /** <summary>Set the complete text of this token; it wipes any previous changes to the text.</summary> */
            set {
                state.text = value;
            }
        }
        public int Line {
            get {
                return input.Line;
            }
            set {
                input.Line = value;
            }
        }
        public int CharPositionInLine {
            get {
                return input.CharPositionInLine;
            }
            set {
                input.CharPositionInLine = value;
            }
        }
        #endregion

        public override void Reset() {
            base.Reset(); // reset all recognizer state variables
            // wack Lexer state variables
            if (input != null) {
                input.Seek(0); // rewind the input
            }
            if (state == null) {
                return; // no shared state work to do
            }
            //state.token = null;
            state.type = MyTokenTypes.Invalid;
            state.channel = TokenChannels.Default;
            state.tokenStartCharIndex = -1;
            state.tokenStartCharPositionInLine = -1;
            state.tokenStartLine = -1;
            state.text = null;
        }

        /** <summary>Return a token from this source; i.e., match a token on the char stream.</summary> */
        NillableToken IMyTokenSource.NextToken() {
            return NextToken();
        }
        public virtual NillableToken NextToken() {
            for (; ; ) {
                //state.token = null;
                state.token = null;
                state.skip = false;
                state.channel = TokenChannels.Default;
                state.tokenStartCharIndex = input.Index;
                state.tokenStartCharPositionInLine = input.CharPositionInLine;
                state.tokenStartLine = input.Line;
                state.text = null;

                try {
                    ParseNextToken();
                    if (state.token == null) {
                        Emit();
                    } 
                    if (state.skip) {
                        continue;
                    }
                    return (NillableToken)state.token;

                } catch (MismatchedRangeException mre) {
                    ReportError(mre);
                    // MatchRange() routine has already called recover()
                } catch (MismatchedTokenException mte) {
                    ReportError(mte);
                    // Match() routine has already called recover()
                } catch (RecognitionException re) {
                    ReportError(re);
                    Recover(re); // throw out current char and try again
                }
            }
        }

        /** <summary>
         *  Instruct the lexer to skip creating a token for current lexer rule
         *  and look for another token.  nextToken() knows to keep looking when
         *  a lexer rule finishes with token set to SKIP_TOKEN.  Recall that
         *  if token==null at end of any token rule, it creates one for you
         *  and emits it.
         *  </summary>
         */
        public virtual void Skip() {
            state.skip = true;
        }

        /** <summary>This is the lexer entry point that sets instance var 'token'</summary> */
        public abstract void mTokens();

        public virtual ICharStream CharStream {
            get {
                return input;
            }
            /** <summary>Set the char stream and reset the lexer</summary> */
            set {
                input = null;
                Reset();
                input = value;
            }
        }

        public override string SourceName {
            get {
                return input.SourceName;
            }
        }

        /** <summary>
         *  Currently does not support multiple emits per nextToken invocation
         *  for efficiency reasons.  Subclass and override this method and
         *  nextToken (to push tokens into a list and pull from that list rather
         *  than a single variable as this implementation does).
         *  </summary>
         */
        public virtual void Emit(NillableToken token) {
            state.token = token;
        }

        /** <summary>
         *  The standard method called to automatically emit a token at the
         *  outermost lexical rule.  The token object should point into the
         *  char buffer start..stop.  If there is a text override in 'text',
         *  use that to set the token's text.  Override this method to emit
         *  custom Token objects.
         *  </summary>
         *
         *  <remarks>
         *  If you are building trees, then you should also override
         *  Parser or TreeParser.getMissingSymbol().
         *  </remarks>
         */
        public abstract NillableToken Emit();

        public virtual void Match(string s) {
            int i = 0;
            while (i < s.Length) {
                if (input.LA(1) != s[i]) {
                    if (state.backtracking > 0) {
                        state.failed = true;
                        return;
                    }
                    MismatchedTokenException mte = new MismatchedTokenException(s[i], input, TokenNames);
                    Recover(mte);
                    throw mte;
                }
                i++;
                input.Consume();
                state.failed = false;
            }
        }

        public virtual void MatchAny() {
            input.Consume();
        }

        public virtual void Match(int c) {
            if (input.LA(1) != c) {
                if (state.backtracking > 0) {
                    state.failed = true;
                    return;
                }
                MismatchedTokenException mte = new MismatchedTokenException(c, input, TokenNames);
                Recover(mte);  // don't really recover; just consume in lexer
                throw mte;
            }
            input.Consume();
            state.failed = false;
        }

        public virtual void MatchRange(int a, int b) {
            if (input.LA(1) < a || input.LA(1) > b) {
                if (state.backtracking > 0) {
                    state.failed = true;
                    return;
                }
                MismatchedRangeException mre = new MismatchedRangeException(a, b, input);
                Recover(mre);
                throw mre;
            }
            input.Consume();
            state.failed = false;
        }

        /** <summary>What is the index of the current character of lookahead?</summary> */
        public virtual int CharIndex {
            get {
                return input.Index;
            }
        }

        public override void ReportError(RecognitionException e) {
            /** TODO: not thought about recovery in lexer yet.
             *
            // if we've already reported an error and have not matched a token
            // yet successfully, don't report any errors.
            if ( errorRecovery ) {
                //System.err.print("[SPURIOUS] ");
                return;
            }
            errorRecovery = true;
             */

            DisplayRecognitionError(this.TokenNames, e);
        }

        public override string GetErrorMessage(RecognitionException e, string[] tokenNames) {
            string msg = null;
            if (e is MismatchedTokenException) {
                MismatchedTokenException mte = (MismatchedTokenException)e;
                msg = "mismatched character " + GetCharErrorDisplay(e.Character) + " expecting " + GetCharErrorDisplay(mte.Expecting);
            } else if (e is NoViableAltException) {
                NoViableAltException nvae = (NoViableAltException)e;
                // for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
                // and "(decision="+nvae.decisionNumber+") and
                // "state "+nvae.stateNumber
                msg = "no viable alternative at character " + GetCharErrorDisplay(e.Character);
            } else if (e is EarlyExitException) {
                EarlyExitException eee = (EarlyExitException)e;
                // for development, can add "(decision="+eee.decisionNumber+")"
                msg = "required (...)+ loop did not match anything at character " + GetCharErrorDisplay(e.Character);
            } else if (e is MismatchedNotSetException) {
                MismatchedNotSetException mse = (MismatchedNotSetException)e;
                msg = "mismatched character " + GetCharErrorDisplay(e.Character) + " expecting set " + mse.Expecting;
            } else if (e is MismatchedSetException) {
                MismatchedSetException mse = (MismatchedSetException)e;
                msg = "mismatched character " + GetCharErrorDisplay(e.Character) + " expecting set " + mse.Expecting;
            } else if (e is MismatchedRangeException) {
                MismatchedRangeException mre = (MismatchedRangeException)e;
                msg = "mismatched character " + GetCharErrorDisplay(e.Character) + " expecting set " +
                      GetCharErrorDisplay(mre.A) + ".." + GetCharErrorDisplay(mre.B);
            } else {
                msg = base.GetErrorMessage(e, tokenNames);
            }
            return msg;
        }

        public virtual string GetCharErrorDisplay(int c) {
            string s = ((char)c).ToString();
            switch (c) {
                case MyTokenTypes.EndOfFile:
                    s = "<EOF>";
                    break;
                case '\n':
                    s = "\\n";
                    break;
                case '\t':
                    s = "\\t";
                    break;
                case '\r':
                    s = "\\r";
                    break;
            }
            return "'" + s + "'";
        }

        /** <summary>
         *  Lexers can normally match any char in it's vocabulary after matching
         *  a token, so do the easy thing and just kill a character and hope
         *  it all works out.  You can instead use the rule invocation stack
         *  to do sophisticated error recovery if you are in a fragment rule.
         *  </summary>
         */
        public virtual void Recover(RecognitionException re) {
            //System.out.println("consuming char "+(char)input.LA(1)+" during recovery");
            //re.printStackTrace();
            input.Consume();
        }

        [Conditional("ANTLR_TRACE")]
        public virtual void TraceIn(string ruleName, int ruleIndex) {
            string inputSymbol = ((char)input.LT(1)) + " line=" + Line + ":" + CharPositionInLine;
            base.TraceIn(ruleName, ruleIndex, inputSymbol);
        }

        [Conditional("ANTLR_TRACE")]
        public virtual void TraceOut(string ruleName, int ruleIndex) {
            string inputSymbol = ((char)input.LT(1)) + " line=" + Line + ":" + CharPositionInLine;
            base.TraceOut(ruleName, ruleIndex, inputSymbol);
        }

        protected virtual void ParseNextToken() {
            mTokens();
        }
    }

    public abstract class MyRewriteRuleElementStream {
        /** <summary>
         *  Cursor 0..n-1.  If singleElement!=null, cursor is 0 until you next(),
         *  which bumps it to 1 meaning no more elements.
         *  </summary>
         */
        protected int cursor = 0;

        /** <summary>Track single elements w/o creating a list.  Upon 2nd add, alloc list */
        protected NillableToken? singleElement;

        /** <summary>The list of tokens or subtrees we are tracking */
        protected List<NillableToken> elements;

        /** <summary>Once a node / subtree has been used in a stream, it must be dup'd
         *  from then on.  Streams are reset after subrules so that the streams
         *  can be reused in future subrules.  So, reset must set a dirty bit.
         *  If dirty, then next() always returns a dup.
         *
         *  I wanted to use "naughty bit" here, but couldn't think of a way
         *  to use "naughty".
         */
        protected bool dirty = false;

        /** <summary>The element or stream description; usually has name of the token or
         *  rule reference that this list tracks.  Can include rulename too, but
         *  the exception would track that info.
         */
        protected string elementDescription;
        protected TokenTreeAdaptor adaptor;

        public MyRewriteRuleElementStream(TokenTreeAdaptor adaptor, string elementDescription) {
            this.elementDescription = elementDescription;
            this.adaptor = adaptor;
        }

        /** <summary>Create a stream with one element</summary> */
        public MyRewriteRuleElementStream(TokenTreeAdaptor adaptor, string elementDescription, NillableToken oneElement)
            : this(adaptor, elementDescription) {
            Add(oneElement);
        }

        /** <summary>Create a stream, but feed off an existing list</summary> */
        public MyRewriteRuleElementStream(TokenTreeAdaptor adaptor, string elementDescription, List<NillableToken> elements)
            : this(adaptor, elementDescription) {
            this.singleElement = null;
            this.elements = elements;
        }

        /** <summary>
         *  Reset the condition of this stream so that it appears we have
         *  not consumed any of its elements.  Elements themselves are untouched.
         *  Once we reset the stream, any future use will need duplicates.  Set
         *  the dirty bit.
         *  </summary>
         */
        public virtual void Reset() {
            cursor = 0;
            dirty = true;
        }

        public virtual void Add(NillableToken el) {
            //System.out.println("add '"+elementDescription+"' is "+el);
            //if (el == null) {
            //    return;
            //}
            if (elements != null) { // if in list, just add
                elements.Add(el);
                return;
            }
            if (singleElement == null) { // no elements yet, track w/o list
                singleElement = el;
                return;
            }
            // adding 2nd element, move to list
            elements = new List<NillableToken>(5);
            elements.Add((NillableToken)singleElement);
            singleElement = null;
            elements.Add(el);
        }

        /** <summary>
         *  Return the next element in the stream.  If out of elements, throw
         *  an exception unless size()==1.  If size is 1, then return elements[0].
         *  Return a duplicate node/subtree if stream is out of elements and
         *  size==1.  If we've already used the element, dup (dirty bit set).
         *  </summary>
         */
        public virtual NillableToken NextTree() {
            int n = Count;
            if (dirty || (cursor >= n && n == 1)) {
                // if out of elements and size is 1, dup
                NillableToken el = NextCore();
                return Dup(el);
            }
            // test size above then fetch
            NillableToken el2 = NextCore();
            return el2;
        }

        /** <summary>
         *  Do the work of getting the next element, making sure that it's
         *  a tree node or subtree.  Deal with the optimization of single-
         *  element list versus list of size > 1.  Throw an exception
         *  if the stream is empty or we're out of elements and size>1.
         *  protected so you can override in a subclass if necessary.
         *  </summary>
         */
        protected virtual NillableToken NextCore() {
            int n = Count;
            if (n == 0) {
                throw new RewriteEmptyStreamException(elementDescription);
            }
            if (cursor >= n) { // out of elements?
                if (n == 1) {  // if size is 1, it's ok; return and we'll dup
                    return ToTree((NillableToken)singleElement);
                }
                // out of elements and size was not 1, so we can't dup
                throw new RewriteCardinalityException(elementDescription);
            }
            // we have elements
            if (singleElement != null) {
                cursor++; // move cursor even for single element list
                return ToTree((NillableToken)singleElement);
            }
            // must have more than one in list, pull from elements
            NillableToken o = ToTree(elements[cursor]);
            cursor++;
            return o;
        }

        /** <summary>
         *  When constructing trees, sometimes we need to dup a token or AST
         * 	subtree.  Dup'ing a token means just creating another AST node
         *  around it.  For trees, you must call the adaptor.dupTree() unless
         *  the element is for a tree root; then it must be a node dup.
         *  </summary>
         */
        protected abstract NillableToken Dup(NillableToken el);

        /** <summary>
         *  Ensure stream emits trees; tokens must be converted to AST nodes.
         *  AST nodes can be passed through unmolested.
         *  </summary>
         */
        protected virtual NillableToken ToTree(NillableToken el) {
            return el;
        }

        public virtual bool HasNext {
            get {
                return (singleElement != null && cursor < 1) ||
                      (elements != null && cursor < elements.Count);
            }
        }

        public virtual int Count {
            get {
                int n = 0;
                if (singleElement != null) {
                    n = 1;
                }
                if (elements != null) {
                    return elements.Count;
                }
                return n;
            }
        }

        public virtual string Description {
            get {
                return elementDescription;
            }
        }
    }
    public class MyRewriteRuleTokenStream : MyRewriteRuleElementStream {

        public MyRewriteRuleTokenStream(TokenTreeAdaptor adaptor, string elementDescription)
            : base(adaptor, elementDescription) {
        }

        /** <summary>Create a stream with one element</summary> */
        public MyRewriteRuleTokenStream(TokenTreeAdaptor adaptor, string elementDescription, NillableToken oneElement)
            : base(adaptor, elementDescription, oneElement) {
        }

        /** <summary>Create a stream, but feed off an existing list</summary> */
        public MyRewriteRuleTokenStream(TokenTreeAdaptor adaptor, string elementDescription, List<NillableToken> elements)
            : base(adaptor, elementDescription, elements) {
        }

        /** <summary>Get next token from stream and make a node for it</summary> */
        public virtual NillableToken NextNode() {
            NillableToken t = (NillableToken)NextCore();
            return adaptor.Create(t);
        }

        public virtual NillableToken NextToken() {
            return (NillableToken)NextCore();
        }

        /** <summary>
         *  Don't convert to a tree unless they explicitly call nextTree.
         *  This way we can do hetero tree nodes in rewrite.
         *  </summary>
         */
        protected override NillableToken ToTree(NillableToken el) {
            return el;
        }

        protected override NillableToken Dup(NillableToken el) {
            throw new NotSupportedException("dup can't be called for a token stream.");
        }
    }
    public class MyRewriteRuleSubtreeStream : MyRewriteRuleElementStream {

        public MyRewriteRuleSubtreeStream(TokenTreeAdaptor adaptor, string elementDescription)
            : base(adaptor, elementDescription) {
        }
        public MyRewriteRuleSubtreeStream(TokenTreeAdaptor adaptor, string elementDescription, NillableToken oneElement)
            : base(adaptor, elementDescription, oneElement) {
        }
        public MyRewriteRuleSubtreeStream(TokenTreeAdaptor adaptor, string elementDescription, List<NillableToken> elements)
            : base(adaptor, elementDescription, elements) {
        }

        /** <summary>
         *  Treat next element as a single node even if it's a subtree.
         *  This is used instead of next() when the result has to be a
         *  tree root node.  Also prevents us from duplicating recently-added
         *  children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
         *  must dup the type node, but ID has been added.
         *  </summary>
         *
         *  <remarks>
         *  Referencing a rule result twice is ok; dup entire tree as
         *  we can't be adding trees as root; e.g., expr expr.
         *
         *  Hideous code duplication here with super.next().  Can't think of
         *  a proper way to refactor.  This needs to always call dup node
         *  and super.next() doesn't know which to call: dup node or dup tree.
         *  </remarks>
         */
        public virtual object NextNode() {
            //System.Console.WriteLine("nextNode: elements={0}, singleElement={1}", elements, ((ITree)singleElement).ToStringTree());
            int n = Count;
            if (dirty || (cursor >= n && n == 1)) {
                // if out of elements and size is 1, dup (at most a single node
                // since this is for making root nodes).
                NillableToken el = NextCore();
                return adaptor.DupNode(el);
            }
            // test size above then fetch
            NillableToken tree = NextCore();
            while (adaptor.IsNil(tree) && adaptor.GetChildCount(tree) == 1)
                tree = adaptor.GetChild(tree, 0);
            //System.Console.WriteLine("_next={0}", ((ITree)tree).ToStringTree());
            object el2 = adaptor.DupNode(tree); // dup just the root (want node here)
            return el2;
        }

        protected override NillableToken Dup(NillableToken el) {
            return adaptor.DupTree(el);
        }
    }

    public struct MyAstParserRuleReturnScope : IRuleReturnScope<NillableToken>, IAstRuleReturnScope<NillableToken>, IAstRuleReturnScope {
        
        private NillableToken m_tree;

        public NillableToken Tree {
            get { return m_tree; }
            set { m_tree = value; }
        }

        object IAstRuleReturnScope.Tree {
            get { return Tree; }
        }
        
        private NillableToken m_start;
        private NillableToken m_stop;

        public NillableToken Start {
            get { return m_start; }
            set { m_start = value; }
        }
        public NillableToken Stop {
            get { return m_stop; }
            set { m_stop = value; }
        }

        object IRuleReturnScope.Start {
            get { return Start; }
        }
        object IRuleReturnScope.Stop {
            get { return Stop; }
        }
    }


    public static class MyCharStreamConstants {
        public const int EndOfFile = -1;
    }
    
    public static class MyTokenTypes
    {
        public const int Skip = -2;
        public const int EndOfFile = CharStreamConstants.EndOfFile;
        public const int Invalid = 0;
        public const int EndOfRule = 1;
        /** <summary>imaginary tree navigation type; traverse "get child" link</summary> */
        public const int Down = 2;
        /** <summary>imaginary tree navigation type; finish with a child list</summary> */
        public const int Up = 3;
        public const int Min = Up + 1;
    }

    public interface IMyYieldableTokenSource {
        bool Yield { get; }
    }
    public interface IMyTokenSource {
        NillableToken NextToken();
        string SourceName { get; }
        string[] TokenNames { get; }
    }
    public class MyCommonTokenStream : MyBufferedTokenStream {
        // Skip tokens on any channel but this one; this is how we skip whitespace...
        private int m_channel;

        public MyCommonTokenStream(IMyTokenSource tokenSource)
            : this(tokenSource, TokenChannels.Default) {
        }
        public MyCommonTokenStream(IMyTokenSource tokenSource, int channel)
            : base(tokenSource) {
            this.m_channel = channel;
        }

        public int Channel {
            get {
                return m_channel;
            }
        }

        public override IMyTokenSource TokenSource {
            get {
                return base.TokenSource;
            }
            set {
                base.TokenSource = value;
                m_channel = TokenChannels.Default;
            }
        }

        // Always leave p on an on-channel token.
        public override void Consume() {
            if (m_nextToken == -1)
                Setup();
            var yieldable = TokenSource as IMyYieldableTokenSource;
            if (yieldable != null && yieldable.Yield)
                return;
            m_nextToken++;
            m_nextToken = SkipOffTokenChannels(m_nextToken);
        }

        protected override NillableToken LB(int k) {
            if (k == 0 || (m_nextToken - k) < 0)
                return default(NillableToken);

            int i = m_nextToken;
            int n = 1;
            // find k good tokens looking backwards
            while (n <= k) {
                // skip off-channel tokens
                i = SkipOffTokenChannelsReverse(i - 1);
                n++;
            }
            if (i < 0)
                throw new ArgumentException();
            return m_tokens[i];
        }
        public override NillableToken LT(int k) {
            if (m_nextToken == -1)
                Setup();
            if (k == 0)
                throw new ArgumentException();
            if (k < 0)
                return LB(-k);
            int i = m_nextToken;
            int n = 1; // we know tokens[p] is a good one
            // find k good tokens
            while (n < k) {
                // skip off-channel tokens
                i = SkipOffTokenChannels(i + 1);
                n++;
            }

            if (i > Range)
                Range = i;

            return m_tokens[i];
        }

        protected virtual int SkipOffTokenChannels(int i) {
            Sync(i);
            while (m_tokens[i].Channel != m_channel) {
                // also stops at EOF (it's on channel)
                i++;
                Sync(i);
            }
            return i;
        }
        protected virtual int SkipOffTokenChannelsReverse(int i) {
            while (i >= 0 && ((NillableToken)m_tokens[i]).Channel != m_channel) {
                i--;
            }

            return i;
        }

        public override void Reset() {
            base.Reset();
            m_nextToken = SkipOffTokenChannels(0);
        }
        protected override void Setup() {
            m_nextToken = 0;
            m_nextToken = SkipOffTokenChannels(m_nextToken);
        }
    }

    public interface IMyTokenStream : IIntStream {
        NillableToken LT(int k);
        int Range {
            get;
        }
        NillableToken Get(int i);
        IMyTokenSource TokenSource {
            get;
        }
        string ToString(int start, int stop);
        string ToString(NillableToken start, NillableToken stop);
    }
    public interface IMyTokenStreamInformation {
        NillableToken LastToken { get; }
        NillableToken LastRealToken { get; }
        int MaxLookBehind { get; }
    }
    public class MyBufferedTokenStream : IMyTokenStream, IMyTokenStreamInformation {

        // The index into the tokens list of the current token (next token
        //  to consume).  tokens[p] should be LT(1).  p=-1 indicates need
        //  to initialize with first token.  The ctor doesn't get a token.
        //  First call to LT(1) or whatever gets the first token and sets p=0;
        protected int m_nextToken = -1;
        protected List<NillableToken> m_tokens = new List<NillableToken>(100);
        private IMyTokenSource m_tokenSource;
        private int m_lastMarker;

        public MyBufferedTokenStream() {
        }
        public MyBufferedTokenStream(IMyTokenSource tokenSource) {
            this.m_tokenSource = tokenSource;
        }

        protected virtual void Setup() {
            Sync(0);
            m_nextToken = 0;
        }

        public virtual IMyTokenSource TokenSource {
            get {
                return m_tokenSource;
            }
            set {
                this.m_tokenSource = value;
                m_tokens.Clear();
                m_nextToken = -1;
            }
        }
        public virtual int Index {
            get {
                return m_nextToken;
            }
        }
        public virtual int Range {
            get;
            protected set;
        }
        public virtual int Count {
            get {
                return m_tokens.Count;
            }
        }
        public virtual string SourceName {
            get {
                return m_tokenSource.SourceName;
            }
        }

        public virtual NillableToken LastToken {
            get {
                return LB(1);
            }
        }
        public virtual NillableToken LastRealToken {
            get {
                int i = 0;
                NillableToken token;
                do {
                    i++;
                    token = LB(i);
                } while (token.Line <= 0);

                return token;
            }
        }

        public virtual int MaxLookBehind {
            get {
                return int.MaxValue;
            }
        }

        public virtual int Mark() {
            if (m_nextToken == -1)
                Setup();
            m_lastMarker = Index;
            return m_lastMarker;
        }
        public virtual void Release(int marker) {
            // no resources to release
        }
        public virtual void Rewind(int marker) {
            Seek(marker);
        }
        public virtual void Rewind() {
            Seek(m_lastMarker);
        }
        public virtual void Reset() {
            m_nextToken = 0;
            m_lastMarker = 0;
        }

        public virtual void Seek(int index) {
            m_nextToken = index;
        }

        // Move the input pointer to the next incoming token.  The stream
        //  must become active with LT(1) available.  consume() simply
        //  moves the input pointer so that LT(1) points at the next
        //  input symbol. Consume at least one token.
        //
        //  Walk past any token not on the channel the parser is listening to.
        public virtual void Consume() {
            if (m_nextToken == -1)
                Setup();
            m_nextToken++;
            Sync(m_nextToken);
        }

        // Make sure index i in tokens has a token.
        protected virtual void Sync(int i) {
            int n = i - m_tokens.Count + 1; // how many more elements we need?
            if (n > 0)
                Fetch(n);
        }

        // add n elements to buffer
        protected virtual void Fetch(int n) {
            for (int i = 0; i < n; i++) {
                NillableToken t = TokenSource.NextToken();
                t.Index = m_tokens.Count;
                m_tokens.Add(t);
                if (t.Type == MyCharStreamConstants.EndOfFile)
                    break;
            }
        }
        public virtual NillableToken Get(int i) {
            if (i < 0 || i >= m_tokens.Count) {
                throw new IndexOutOfRangeException("token index " + i + " out of range 0.." + (m_tokens.Count - 1));
            }
            return m_tokens[i];
        }
        public virtual void Fill() {
            if (m_nextToken == -1)
                Setup();

            if (m_tokens[m_nextToken].Type == MyCharStreamConstants.EndOfFile)
                return;

            int i = m_nextToken + 1;
            Sync(i);
            while (m_tokens[i].Type != MyCharStreamConstants.EndOfFile) {
                i++;
                Sync(i);
            }
        }

        public virtual int LA(int i) {
            return LT(i).Type;
        }
        protected virtual NillableToken LB(int k) {
            if ((m_nextToken - k) < 0)
                throw new ArgumentException();

            return m_tokens[m_nextToken - k];
        }
        public virtual NillableToken LT(int k) {
            if (m_nextToken == -1)
                Setup();
            if (k == 0)
                throw new ArgumentException();
            if (k < 0)
                return LB(-k);

            int i = m_nextToken + k - 1;
            Sync(i);
            if (i >= m_tokens.Count) {
                // EOF must be last token
                return m_tokens[m_tokens.Count - 1];
            }

            if (i > Range)
                Range = i;

            return m_tokens[m_nextToken + k - 1];
        }

        public virtual List<NillableToken> GetTokens() {
            return m_tokens;
        }
        public virtual List<NillableToken> GetTokens(int start, int stop) {
            return GetTokens(start, stop, default(BitSet));
        }

        public virtual List<NillableToken> GetTokens(int start, int stop, BitSet types) {
            if (m_nextToken == -1)
                Setup();
            if (stop >= m_tokens.Count)
                stop = m_tokens.Count - 1;
            if (start < 0)
                start = 0;
            if (start > stop)
                return null;

            // list = tokens[start:stop]:{Token t, t.getType() in types}
            List<NillableToken> filteredTokens = new List<NillableToken>();
            for (int i = start; i <= stop; i++) {
                NillableToken t = m_tokens[i];
                if (types == null || types.Member(t.Type)) {
                    filteredTokens.Add(t);
                }
            }
            if (filteredTokens.Count == 0) {
                filteredTokens = null;
            }
            return filteredTokens;
        }
        public virtual List<NillableToken> GetTokens(int start, int stop, IEnumerable<int> types) {
            return GetTokens(start, stop, new BitSet(types));
        }
        public virtual List<NillableToken> GetTokens(int start, int stop, int ttype) {
            return GetTokens(start, stop, BitSet.Of(ttype));
        }

        public override string ToString() {
            if (m_nextToken == -1)
                Setup();

            //Fill();
            return ToString(0, m_tokens.Count - 1);
        }
        public virtual string ToString(int start, int stop) {
            if (start < 0 || stop < 0)
                return null;
            if (m_nextToken == -1)
                Setup();
            if (stop >= m_tokens.Count)
                stop = m_tokens.Count - 1;

            StringBuilder buf = new StringBuilder();
            for (int i = start; i <= stop; i++) {
                NillableToken t = m_tokens[i];
                if (t.Type == MyCharStreamConstants.EndOfFile)
                    break;
                buf.Append(t.Text);
            }

            return buf.ToString();
        }
        public virtual string ToString(NillableToken start, NillableToken stop) {
            return ToString(start.Index, stop.Index);
        }

    }

    public delegate int MySpecialStateTransitionHandler(MyDFA dfa, int s, IIntStream input);
    public class MyDFA {
        protected short[] eot;
        protected short[] eof;
        protected char[] min;
        protected char[] max;
        protected short[] accept;
        protected short[] special;
        protected short[][] transition;

        protected int decisionNumber;

        /** <summary>Which recognizer encloses this DFA?  Needed to check backtracking</summary> */
        protected MyBaseRecognizer recognizer;

        public readonly bool debug = false;

        public MyDFA()
            : this(new MySpecialStateTransitionHandler(SpecialStateTransitionDefault)) {
        }

        public MyDFA(MySpecialStateTransitionHandler specialStateTransition) {
            this.SpecialStateTransition = specialStateTransition ?? new MySpecialStateTransitionHandler(SpecialStateTransitionDefault);
        }

        public virtual string Description {
            get {
                return "n/a";
            }
        }

        /** <summary>
         *  From the input stream, predict what alternative will succeed
         *  using this DFA (representing the covering regular approximation
         *  to the underlying CFL).  Return an alternative number 1..n.  Throw
         *  an exception upon error.
         *  </summary>
         */
        public virtual int Predict(IIntStream input) {
            if (debug) {
                Console.Error.WriteLine("Enter DFA.predict for decision " + decisionNumber);
            }
            int mark = input.Mark(); // remember where decision started in input
            int s = 0; // we always start at s0
            try {
                for (; ; ) {
                    if (debug)
                        Console.Error.WriteLine("DFA " + decisionNumber + " state " + s + " LA(1)=" + (char)input.LA(1) + "(" + input.LA(1) +
                                           "), index=" + input.Index);
                    int specialState = special[s];
                    if (specialState >= 0) {
                        if (debug) {
                            Console.Error.WriteLine("DFA " + decisionNumber +
                                " state " + s + " is special state " + specialState);
                        }
                        s = SpecialStateTransition(this, specialState, input);
                        if (debug) {
                            Console.Error.WriteLine("DFA " + decisionNumber +
                                " returns from special state " + specialState + " to " + s);
                        }
                        if (s == -1) {
                            NoViableAlt(s, input);
                            return 0;
                        }
                        input.Consume();
                        continue;
                    }
                    if (accept[s] >= 1) {
                        if (debug)
                            Console.Error.WriteLine("accept; predict " + accept[s] + " from state " + s);
                        return accept[s];
                    }
                    // look for a normal char transition
                    char c = (char)input.LA(1); // -1 == \uFFFF, all tokens fit in 65000 space
                    if (c >= min[s] && c <= max[s]) {
                        int snext = transition[s][c - min[s]]; // move to next state
                        if (snext < 0) {
                            // was in range but not a normal transition
                            // must check EOT, which is like the else clause.
                            // eot[s]>=0 indicates that an EOT edge goes to another
                            // state.
                            if (eot[s] >= 0) {  // EOT Transition to accept state?
                                if (debug)
                                    Console.Error.WriteLine("EOT transition");
                                s = eot[s];
                                input.Consume();
                                // TODO: I had this as return accept[eot[s]]
                                // which assumed here that the EOT edge always
                                // went to an accept...faster to do this, but
                                // what about predicated edges coming from EOT
                                // target?
                                continue;
                            }
                            NoViableAlt(s, input);
                            return 0;
                        }
                        s = snext;
                        input.Consume();
                        continue;
                    }
                    if (eot[s] >= 0) {  // EOT Transition?
                        if (debug)
                            Console.Error.WriteLine("EOT transition");
                        s = eot[s];
                        input.Consume();
                        continue;
                    }
                    if (c == unchecked((char)MyTokenTypes.EndOfFile) && eof[s] >= 0) {  // EOF Transition to accept state?
                        if (debug)
                            Console.Error.WriteLine("accept via EOF; predict " + accept[eof[s]] + " from " + eof[s]);
                        return accept[eof[s]];
                    }
                    // not in range and not EOF/EOT, must be invalid symbol
                    if (debug) {
                        Console.Error.WriteLine("min[" + s + "]=" + min[s]);
                        Console.Error.WriteLine("max[" + s + "]=" + max[s]);
                        Console.Error.WriteLine("eot[" + s + "]=" + eot[s]);
                        Console.Error.WriteLine("eof[" + s + "]=" + eof[s]);
                        for (int p = 0; p < transition[s].Length; p++) {
                            Console.Error.Write(transition[s][p] + " ");
                        }
                        Console.Error.WriteLine();
                    }
                    NoViableAlt(s, input);
                    return 0;
                }
            } finally {
                input.Rewind(mark);
            }
        }

        protected virtual void NoViableAlt(int s, IIntStream input) {
            if (recognizer.state.backtracking > 0) {
                recognizer.state.failed = true;
                return;
            }
            NoViableAltException nvae =
                new NoViableAltException(Description,
                                         decisionNumber,
                                         s,
                                         input);
            Error(nvae);
            throw nvae;
        }

        /** <summary>A hook for debugging interface</summary> */
        public virtual void Error(NoViableAltException nvae) {
        }

        public MySpecialStateTransitionHandler SpecialStateTransition {
            get;
            private set;
        }
        //public virtual int specialStateTransition( int s, IntStream input )
        //{
        //    return -1;
        //}

        static int SpecialStateTransitionDefault(MyDFA dfa, int s, IIntStream input) {
            return -1;
        }

        /** <summary>
         *  Given a String that has a run-length-encoding of some unsigned shorts
         *  like "\1\2\3\9", convert to short[] {2,9,9,9}.  We do this to avoid
         *  static short[] which generates so much init code that the class won't
         *  compile. :(
         *  </summary>
         */
        public static short[] UnpackEncodedString(string encodedString) {
            // walk first to find how big it is.
            int size = 0;
            for (int i = 0; i < encodedString.Length; i += 2) {
                size += encodedString[i];
            }
            short[] data = new short[size];
            int di = 0;
            for (int i = 0; i < encodedString.Length; i += 2) {
                char n = encodedString[i];
                char v = encodedString[i + 1];
                // add v n times to data
                for (int j = 1; j <= n; j++) {
                    data[di++] = (short)v;
                }
            }
            return data;
        }

        /** <summary>Hideous duplication of code, but I need different typed arrays out :(</summary> */
        public static char[] UnpackEncodedStringToUnsignedChars(string encodedString) {
            // walk first to find how big it is.
            int size = 0;
            for (int i = 0; i < encodedString.Length; i += 2) {
                size += encodedString[i];
            }
            char[] data = new char[size];
            int di = 0;
            for (int i = 0; i < encodedString.Length; i += 2) {
                char n = encodedString[i];
                char v = encodedString[i + 1];
                // add v n times to data
                for (int j = 1; j <= n; j++) {
                    data[di++] = v;
                }
            }
            return data;
        }

        [Conditional("ANTLR_DEBUG")]
        protected virtual void DebugRecognitionException(RecognitionException ex) {
            IDebugEventListener dbg = recognizer.DebugListener;
            if (dbg != null)
                dbg.RecognitionException(ex);
        }
    }

    public class MyCommonTreeNodeStream : MyLookaheadStream<NillableToken>, IMyTreeNodeStream {
        public const int DEFAULT_INITIAL_BUFFER_SIZE = 100;
        public const int INITIAL_CALL_STACK_SIZE = 10;

        /** <summary>Pull nodes from which tree?</summary> */
        private readonly NillableToken _root;

        /** <summary>If this tree (root) was created from a token stream, track it.</summary> */
        protected ITokenStream tokens;

        /** <summary>What tree adaptor was used to build these trees</summary> */
        private TokenTreeAdaptor _adaptor;

        /** The tree iterator we are using */
        private readonly MyTreeIterator _it;

        /** <summary>Stack of indexes used for push/pop calls</summary> */
        private Stack<int> _calls;

        /** <summary>Tree (nil A B C) trees like flat A B C streams</summary> */
        private bool _hasNilRoot = false;

        /** <summary>Tracks tree depth.  Level=0 means we're at root node level.</summary> */
        private int _level = 0;

        public MyCommonTreeNodeStream(TokenTreeAdaptor adaptor, NillableToken tree) {
            this._root = tree;
            this._adaptor = adaptor;
            _it = new MyTreeIterator(adaptor, _root);
        }


        public virtual string SourceName {
            get {
                if (TokenStream == null)
                    return null;

                return TokenStream.SourceName;
            }
        }

        public virtual ITokenStream TokenStream {
            get {
                return tokens;
            }

            set {
                tokens = value;
            }
        }

        public virtual TokenTreeAdaptor TreeAdaptor {
            get {
                return _adaptor;
            }

            set {
                _adaptor = value;
            }
        }

        public virtual NillableToken TreeSource {
            get {
                return _root;
            }
        }

        public virtual bool UniqueNavigationNodes {
            get {
                return false;
            }

            set {
            }
        }

        public virtual void Reset() {
            base.Clear();
            _it.Reset();
            _hasNilRoot = false;
            _level = 0;
            if (_calls != null)
                _calls.Clear();
        }

        public override NillableToken NextElement() {
            _it.MoveNext();
            NillableToken t = _it.Current;
            //System.out.println("pulled "+adaptor.getType(t));
            if (t == _it.up) {
                _level--;
                if (_level == 0 && _hasNilRoot) {
                    _it.MoveNext();
                    return _it.Current; // don't give last UP; get EOF
                }
            } else if (t == _it.down) {
                _level++;
            }

            if (_level == 0 && TreeAdaptor.IsNil(t)) {
                // if nil root, scarf nil, DOWN
                _hasNilRoot = true;
                _it.MoveNext();
                t = _it.Current; // t is now DOWN, so get first real node next
                _level++;
                _it.MoveNext();
                t = _it.Current;
            }

            return t;
        }

        public override bool IsEndOfFile(NillableToken o) {
            return TreeAdaptor.GetType(o) == MyCharStreamConstants.EndOfFile;
        }

        public virtual int LA(int i) {
            return TreeAdaptor.GetType(LT(i));
        }

        /** Make stream jump to a new location, saving old location.
         *  Switch back with pop().
         */
        public virtual void Push(int index) {
            if (_calls == null)
                _calls = new Stack<int>();

            _calls.Push(_p); // save current index
            Seek(index);
        }

        /** Seek back to previous index saved during last push() call.
         *  Return top of stack (return index).
         */
        public virtual int Pop() {
            int ret = _calls.Pop();
            Seek(ret);
            return ret;
        }

        #region Tree rewrite interface

        public virtual void ReplaceChildren(NillableToken parent, int startChildIndex, int stopChildIndex, NillableToken t) {
            if (parent == null)
                return;
                
            TreeAdaptor.ReplaceChildren(parent, startChildIndex, stopChildIndex, t);
        }

        #endregion

        public virtual string ToString(object start, object stop) {
            // we'll have to walk from start to stop in tree; we're not keeping
            // a complete node stream buffer
            return "n/a";
        }
    }
    public class MyTreeIterator : IEnumerator<NillableToken> {
        protected TokenTreeAdaptor adaptor;
        protected NillableToken root;
        protected NillableToken tree;
        protected bool firstTime = true;
        private bool reachedEof;

        // navigation nodes to return during walk and at end
        public NillableToken up;
        public NillableToken down;
        public NillableToken eof;

        /** If we emit UP/DOWN nodes, we need to spit out multiple nodes per
         *  next() call.
         */
        protected Queue<NillableToken> nodes;
        protected Stack<IEnumerator<NillableToken>> nodes2;

        public MyTreeIterator(TokenTreeAdaptor adaptor, NillableToken tree) {
            this.adaptor = adaptor;
            this.tree = tree;
            this.root = tree;
            nodes = new Queue<NillableToken>();
            down = adaptor.Create(MyTokenTypes.Down, "DOWN");
            up = adaptor.Create(MyTokenTypes.Up, "UP");
            eof = adaptor.Create(MyTokenTypes.EndOfFile, "EOF");

            Reset();
        }

        #region IDisposable Members
        public void Dispose() { }
        #endregion

        #region IEnumerator<object> Members
        public NillableToken Current {
            get;
            private set;
        }
        object IEnumerator.Current {
            get { return Current; }
        }
        public void Reset() {
            nodes2 = new Stack<IEnumerator<NillableToken>>();
            nodes2.Push(new List<NillableToken> { tree }.GetEnumerator());
        }
        public bool MoveNext() {

            if (nodes2.Count == 0)
                return false;

            if (Current.HasChildren) {
                nodes2.Push(Current.Children().GetEnumerator());
                Current = down;

            } else {

                var currentEnumerator = nodes2.Peek();

                if (currentEnumerator.MoveNext()) {
                    Current = currentEnumerator.Current;

                } else {
                    nodes2.Pop();
                    Current = up;

                    if (nodes2.Count == 0)
                        Current = eof;
                }
            }

            return true;
        }
        #endregion
    }
    public interface IMyTreeNodeStream : IIntStream {
        NillableToken this[int i] {
            get;
        }
        NillableToken LT(int k);
        NillableToken TreeSource {
            get;
        }
        ITokenStream TokenStream {
            get;
        }
        TokenTreeAdaptor TreeAdaptor {
            get;
        }
        bool UniqueNavigationNodes {
            get;
            set;
        }
        string ToString(object start, object stop);
        void ReplaceChildren(NillableToken parent, int startChildIndex, int stopChildIndex, NillableToken t);
    }
    public abstract class MyLookaheadStream<T> : MyFastQueue<T> {
        /** Absolute token index. It's the index of the symbol about to be
         *  read via LT(1). Goes from 0 to numtokens.
         */
        private int _currentElementIndex = 0;

        private T _previousElement;

        /** Track object returned by nextElement upon end of stream;
         *  Return it later when they ask for LT passed end of input.
         */
        T _eof = default(T);

        /** <summary>Track the last mark() call result value for use in rewind().</summary> */
        int _lastMarker;

        /** <summary>tracks how deep mark() calls are nested</summary> */
        int _markDepth;

        public T EndOfFile {
            get {
                return _eof;
            }
            protected set {
                _eof = value;
            }
        }

        public T PreviousElement {
            get {
                return _previousElement;
            }
        }

        public override void Clear() {
            base.Clear();
            _currentElementIndex = 0;
            _p = 0;
            _previousElement = default(T);
        }

        /** <summary>
         *  Implement nextElement to supply a stream of elements to this
         *  lookahead buffer.  Return eof upon end of the stream we're pulling from.
         *  </summary>
         */
        public abstract T NextElement();

        public abstract bool IsEndOfFile(T o);

        /** <summary>Get and remove first element in queue; override FastQueue.remove()</summary> */
        public override T Dequeue() {
            T o = this[0];
            _p++;
            // have we hit end of buffer and not backtracking?
            if (_p == _data.Count && _markDepth == 0) {
                // if so, it's an opportunity to start filling at index 0 again
                Clear(); // size goes to 0, but retains memory
            }
            return o;
        }

        /** <summary>Make sure we have at least one element to remove, even if EOF</summary> */
        public virtual void Consume() {
            SyncAhead(1);
            _previousElement = Dequeue();
            _currentElementIndex++;
        }

        /** <summary>
         *  Make sure we have 'need' elements from current position p. Last valid
         *  p index is data.size()-1.  p+need-1 is the data index 'need' elements
         *  ahead.  If we need 1 element, (p+1-1)==p must be &lt; data.size().
         *  </summary>
         */
        protected virtual void SyncAhead(int need) {
            int n = (_p + need - 1) - _data.Count + 1; // how many more elements we need?
            if (n > 0)
                Fill(n);                 // out of elements?
        }

        /** <summary>add n elements to buffer</summary> */
        public virtual void Fill(int n) {
            for (int i = 0; i < n; i++) {
                T o = NextElement();
                if (IsEndOfFile(o))
                    _eof = o;

                _data.Add(o);
            }
        }

        /** <summary>Size of entire stream is unknown; we only know buffer size from FastQueue</summary> */
        public override int Count {
            get {
                throw new System.NotSupportedException("streams are of unknown size");
            }
        }

        public virtual T LT(int k) {
            if (k == 0) {
                return default(T);
            }
            if (k < 0) {
                return LB(-k);
            }

            SyncAhead(k);
            if ((_p + k - 1) > _data.Count)
                return _eof;

            return this[k - 1];
        }

        public virtual int Index {
            get {
                return _currentElementIndex;
            }
        }

        public virtual int Mark() {
            _markDepth++;
            _lastMarker = _p; // track where we are in buffer, not absolute token index
            return _lastMarker;
        }

        public virtual void Release(int marker) {
            if (_markDepth == 0)
                throw new InvalidOperationException();

            _markDepth--;
        }

        public virtual void Rewind(int marker) {
            Seek(marker);
            Release(marker);
        }

        public virtual void Rewind() {
            Rewind(_lastMarker);
        }

        /** <summary>
         *  Seek to a 0-indexed position within data buffer.  Can't handle
         *  case where you seek beyond end of existing buffer.  Normally used
         *  to seek backwards in the buffer. Does not force loading of nodes.
         *  Doesn't see to absolute position in input stream since this stream
         *  is unbuffered. Seeks only into our moving window of elements.
         *  </summary>
         */
        public virtual void Seek(int index) {
            _p = index;
        }

        protected virtual T LB(int k) {
            if (k == 1)
                return _previousElement;

            throw new ArgumentException("can't look backwards more than one token in this stream");
        }
    }
    public class MyFastQueue<T> {
        /** <summary>dynamically-sized buffer of elements</summary> */
        internal List<T> _data = new List<T>();
        /** <summary>index of next element to fill</summary> */
        internal int _p = 0;

        public virtual int Count {
            get {
                return _data.Count - _p;
            }
        }

        /// <summary>
        /// How deep have we gone?
        /// </summary>
        public virtual int Range {
            get;
            protected set;
        }

        /** <summary>
         *  Return element i elements ahead of current element.  i==0 gets
         *  current element.  This is not an absolute index into the data list
         *  since p defines the start of the real list.
         *  </summary>
         */
        public virtual T this[int i] {
            get {
                int absIndex = _p + i;
                if (absIndex >= _data.Count)
                    throw new ArgumentException(string.Format("queue index {0} > last index {1}", absIndex, _data.Count - 1));
                if (absIndex < 0)
                    throw new ArgumentException(string.Format("queue index {0} < 0", absIndex));

                if (absIndex > Range)
                    Range = absIndex;

                return _data[absIndex];
            }
        }

        /** <summary>Get and remove first element in queue</summary> */
        public virtual T Dequeue() {
            if (Count == 0)
                throw new InvalidOperationException();

            T o = this[0];
            _p++;
            // have we hit end of buffer?
            if (_p == _data.Count) {
                // if so, it's an opportunity to start filling at index 0 again
                Clear(); // size goes to 0, but retains memory
            }
            return o;
        }

        public virtual void Enqueue(T o) {
            _data.Add(o);
        }

        public virtual T Peek() {
            return this[0];
        }

        public virtual void Clear() {
            _p = 0;
            _data.Clear();
        }

        /** <summary>Return string of current buffer contents; non-destructive</summary> */
        public override string ToString() {
            System.Text.StringBuilder buf = new System.Text.StringBuilder();
            int n = Count;
            for (int i = 0; i < n; i++) {
                buf.Append(this[i]);
                if ((i + 1) < n)
                    buf.Append(" ");
            }
            return buf.ToString();
        }
    }
    public class MyTreeParser : MyBaseRecognizer {
        public const int DOWN = MyTokenTypes.Down;
        public const int UP = MyTokenTypes.Up;

        // precompiled regex used by inContext
        static string dotdot = ".*[^.]\\.\\.[^.].*";
        static string doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*";
        static Regex dotdotPattern = new Regex(dotdot, RegexOptions.Compiled);
        static Regex doubleEtcPattern = new Regex(doubleEtc, RegexOptions.Compiled);

        protected IMyTreeNodeStream input;

        public MyTreeParser(IMyTreeNodeStream input)
            : base() // highlight that we go to super to set state object
        {
            this.input = input;
        }

        public MyTreeParser(IMyTreeNodeStream input, MyRecognizerSharedState state)
            : base(state) // share the state object with another parser
        {
            this.input = input;
        }

        public override void Reset() {
            base.Reset(); // reset all recognizer state variables
            if (input != null) {
                input.Seek(0); // rewind the input
            }
        }

        /** <summary>Set the input stream</summary> */
        public virtual void SetTreeNodeStream(IMyTreeNodeStream input) {
            this.input = input;
        }

        public virtual IMyTreeNodeStream GetTreeNodeStream() {
            return input;
        }

        public override string SourceName {
            get {
                return input.SourceName;
            }
        }

        protected override object GetCurrentInputSymbol(IIntStream input) {
            return ((IMyTreeNodeStream)input).LT(1);
        }

        protected override object GetMissingSymbol(IIntStream input,
                                          RecognitionException e,
                                          int expectedTokenType,
                                          BitSet follow) {
            throw new NotImplementedException();
            //string tokenText =
            //    "<missing " + TokenNames[expectedTokenType] + ">";
            //IMyTreeAdaptor adaptor = ((IMyTreeNodeStream)e.Input).TreeAdaptor;
            //return adaptor.Create(new CommonToken(expectedTokenType, tokenText));
        }

        /** <summary>
         *  Match '.' in tree parser has special meaning.  Skip node or
         *  entire tree if node has children.  If children, scan until
         *  corresponding UP node.
         *  </summary>
         */
        public override void MatchAny(IIntStream ignore) {
            state.errorRecovery = false;
            state.failed = false;
            // always consume the current node
            input.Consume();
            // if the next node is DOWN, then the current node is a subtree:
            // skip to corresponding UP. must count nesting level to get right UP
            int look = input.LA(1);
            if (look == DOWN) {
                input.Consume();
                int level = 1;
                while (level > 0) {
                    switch (input.LA(1)) {
                        case DOWN:
                            level++;
                            break;
                        case UP:
                            level--;
                            break;
                        case MyTokenTypes.EndOfFile:
                            return;
                        default:
                            break;
                    }
                    input.Consume();
                }
            }
        }

        /** <summary>
         *  We have DOWN/UP nodes in the stream that have no line info; override.
         *  plus we want to alter the exception type.  Don't try to recover
         *  from tree parser errors inline...
         *  </summary>
         */
        protected override object RecoverFromMismatchedToken(IIntStream input, int ttype, BitSet follow) {
            throw new MismatchedTreeNodeException(ttype, (ITreeNodeStream)input);
        }

        /** <summary>
         *  Prefix error message with the grammar name because message is
         *  always intended for the programmer because the parser built
         *  the input tree not the user.
         *  </summary>
         */
        public override string GetErrorHeader(RecognitionException e) {
            return GrammarFileName + ": node from " +
                   (e.ApproximateLineInfo ? "after " : "") + "line " + e.Line + ":" + e.CharPositionInLine;
        }

        /** <summary>
         *  Tree parsers parse nodes they usually have a token object as
         *  payload. Set the exception token and do the default behavior.
         *  </summary>
         */
        public override string GetErrorMessage(RecognitionException e, string[] tokenNames) {
            if (this is MyTreeParser) {
                ITreeAdaptor adaptor = ((ITreeNodeStream)e.Input).TreeAdaptor;
                e.Token = adaptor.GetToken(e.Node);
                if (e.Token == null) { // could be an UP/DOWN node
                    e.Token = new CommonToken(adaptor.GetType(e.Node),
                                              adaptor.GetText(e.Node));
                }
            }
            return base.GetErrorMessage(e, tokenNames);
        }

        [Conditional("ANTLR_TRACE")]
        public virtual void TraceIn(string ruleName, int ruleIndex) {
            base.TraceIn(ruleName, ruleIndex, input.LT(1));
        }

        [Conditional("ANTLR_TRACE")]
        public virtual void TraceOut(string ruleName, int ruleIndex) {
            base.TraceOut(ruleName, ruleIndex, input.LT(1));
        }

    }
    public interface IMyTreeAdaptor {
        #region Construction
        /** <summary>
         *  Create a tree node from Token NillableToken; for CommonTree type trees,
         *  then the token just becomes the payload.  This is the most
         *  common create call.
         *  </summary>
         *
         *  <remarks>
         *  Override if you want another kind of node to be built.
         *  </remarks>
         */
        NillableToken Create(NillableToken payload);

        /** <summary>
         *  Create a new node derived from a token, with a new token type.
         *  This is invoked from an imaginary node ref on right side of a
         *  rewrite rule as IMAG[$tokenLabel].
         *  </summary>
         *
         *  <remarks>
         *  This should invoke createToken(Token).
         *  </remarks>
         */
        NillableToken Create(int tokenType, NillableToken fromToken);

        /** <summary>
         *  Same as create(tokenType,fromToken) except set the text too.
         *  This is invoked from an imaginary node ref on right side of a
         *  rewrite rule as IMAG[$tokenLabel, "IMAG"].
         *  </summary>
         *
         *  <remarks>
         *  This should invoke createToken(Token).
         *  </remarks>
         */
        NillableToken Create(int tokenType, NillableToken fromToken, string text);

        /** <summary>
         *  Same as create(fromToken) except set the text too.
         *  This is invoked when the <c>text</c> terminal option is set, as in
         *  IMAG&lt;text='IMAG'&gt;.
         *  </summary>
         *
         *  <remarks>
         *  This should invoke createToken(Token).
         *  </remarks>
         */
        NillableToken Create(NillableToken fromToken, string text);

        /** <summary>
         *  Create a new node derived from a token, with a new token type.
         *  This is invoked from an imaginary node ref on right side of a
         *  rewrite rule as IMAG["IMAG"].
         *  </summary>
         *
         *  <remarks>
         *  This should invoke createToken(int,String).
         *  </remarks>
         */
        NillableToken Create(int tokenType, string text);

        /** <summary>Duplicate a single tree node.</summary>
         *  <remarks>Override if you want another kind of node to be built.</remarks>
         */
        NillableToken DupNode(NillableToken treeNode);

        NillableToken DupNode(int type, NillableToken treeNode);

        NillableToken DupNode(NillableToken treeNode, string text);

        NillableToken DupNode(int type, NillableToken treeNode, string text);

        /** <summary>Duplicate tree recursively, using dupNode() for each node</summary> */
        NillableToken DupTree(NillableToken tree);

        /** <summary>
         *  Return a nil node (an empty but non-null node) that can hold
         *  a list of element as the children.  If you want a flat tree (a list)
         *  use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
         *  </summary>
         */
        NillableToken Nil();

        /** <summary>
         *  Return a tree node representing an error.  This node records the
         *  tokens consumed during error recovery.  The start token indicates the
         *  input symbol at which the error was detected.  The stop token indicates
         *  the last symbol consumed during recovery.
         *  </summary>
         *
         *  </remarks>
         *  You must specify the input stream so that the erroneous text can
         *  be packaged up in the error node.  The exception could be useful
         *  to some applications; default implementation stores ptr to it in
         *  the CommonErrorNode.
         *
         *  This only makes sense during token parsing, not tree parsing.
         *  Tree parsing should happen only when parsing and tree construction
         *  succeed.
         *  </remarks>
         */
        NillableToken ErrorNode(IMyTokenStream input, NillableToken start, NillableToken stop, RecognitionException e);

        /** <summary>Is tree considered a nil node used to make lists of child nodes?</summary> */
        bool IsNil(NillableToken tree);

        /** <summary>
         *  Add a child to the tree t.  If child is a flat tree (a list), make all
         *  in list children of t.  Warning: if t has no children, but child does
         *  and child isNil then you can decide it is ok to move children to t via
         *  t.children = child.children; i.e., without copying the array.  Just
         *  make sure that this is consistent with have the user will build
         *  ASTs.  Do nothing if t or child is null.
         *  </summary>
         */
        void AddChild(NillableToken t, NillableToken child);

        /** <summary>
         *  If oldRoot is a nil root, just copy or move the children to newRoot.
         *  If not a nil root, make oldRoot a child of newRoot.
         *  </summary>
         *
         *  <remarks>
         *    old=^(nil a b c), new=r yields ^(r a b c)
         *    old=^(a b c), new=r yields ^(r ^(a b c))
         *
         *  If newRoot is a nil-rooted single child tree, use the single
         *  child as the new root node.
         *
         *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
         *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
         *
         *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
         *
         *    old=null, new=r yields r
         *    old=null, new=^(nil r) yields ^(nil r)
         *
         *  Return newRoot.  Throw an exception if newRoot is not a
         *  simple node or nil root with a single child node--it must be a root
         *  node.  If newRoot is ^(nil x) return x as newRoot.
         *
         *  Be advised that it's ok for newRoot to point at oldRoot's
         *  children; i.e., you don't have to copy the list.  We are
         *  constructing these nodes so we should have this control for
         *  efficiency.
         *  </remarks>
         */
        NillableToken BecomeRoot(NillableToken newRoot, NillableToken oldRoot);

        /** <summary>
         *  Given the root of the subtree created for this rule, post process
         *  it to do any simplifications or whatever you want.  A required
         *  behavior is to convert ^(nil singleSubtree) to singleSubtree
         *  as the setting of start/stop indexes relies on a single non-nil root
         *  for non-flat trees.
         *  </summary>
         *
         *  <remarks>
         *  Flat trees such as for lists like "idlist : ID+ ;" are left alone
         *  unless there is only one ID.  For a list, the start/stop indexes
         *  are set in the nil node.
         *
         *  This method is executed after all rule tree construction and right
         *  before setTokenBoundaries().
         *  </remarks>
         */
        NillableToken RulePostProcessing(NillableToken root);

        /** <summary>For identifying trees.</summary>
         *
         *  <remarks>
         *  How to identify nodes so we can say "add node to a prior node"?
         *  Even becomeRoot is an issue.  Use System.identityHashCode(node)
         *  usually.
         *  </remarks>
         */
        int GetUniqueID(NillableToken node);


        // R e w r i t e  R u l e s

        /** <summary>
         *  Create a node for newRoot make it the root of oldRoot.
         *  If oldRoot is a nil root, just copy or move the children to newRoot.
         *  If not a nil root, make oldRoot a child of newRoot.
         *  </summary>
         *
         *  <returns>
         *  Return node created for newRoot.
         *  </returns>
         *
         *  <remarks>
         *  Be advised: when debugging ASTs, the DebugTreeAdaptor manually
         *  calls create(Token child) and then plain becomeRoot(node, node)
         *  because it needs to trap calls to create, but it can't since it delegates
         *  to not inherits from the TreeAdaptor.
         *  </remarks>
         */
        //NillableToken BecomeRoot(NillableToken newRoot, NillableToken oldRoot);

        #endregion


        #region Content

        /** <summary>For tree parsing, I need to know the token type of a node</summary> */
        int GetType(NillableToken t);

        /** <summary>Node constructors can set the type of a node</summary> */
        void SetType(NillableToken t, int type);

        string GetText(NillableToken t);

        /** <summary>Node constructors can set the text of a node</summary> */
        void SetText(NillableToken t, string text);

        /** <summary>
         *  Return the token NillableToken from which this node was created.
         *  Currently used only for printing an error message.
         *  The error display routine in BaseRecognizer needs to
         *  display where the input the error occurred. If your
         *  tree of limitation does not store information that can
         *  lead you to the token, you can create a token filled with
         *  the appropriate information and pass that back.  See
         *  BaseRecognizer.getErrorMessage().
         *  </summary>
         */
        IToken GetToken(NillableToken t);

        /** <summary>
         *  Where are the bounds in the input token stream for this node and
         *  all children?  Each rule that creates AST nodes will call this
         *  method right before returning.  Flat trees (i.e., lists) will
         *  still usually have a nil root node just to hold the children list.
         *  That node would contain the start/stop indexes then.
         *  </summary>
         */
        void SetTokenBoundaries(NillableToken t, IToken startToken, IToken stopToken);

        /** <summary>Get the token start index for this subtree; return -1 if no such index</summary> */
        int GetTokenStartIndex(NillableToken t);

        /** <summary>Get the token stop index for this subtree; return -1 if no such index</summary> */
        int GetTokenStopIndex(NillableToken t);

        #endregion


        #region Navigation / Tree Parsing

        /** <summary>Get a child 0..n-1 node</summary> */
        NillableToken GetChild(NillableToken t, int i);

        /** <summary>Set ith child (0..n-1) to t; t must be non-null and non-nil node</summary> */
        void SetChild(NillableToken t, int i, NillableToken child);

        /** <summary>Remove ith child and shift children down from right.</summary> */
        NillableToken DeleteChild(NillableToken t, int i);

        /** <summary>How many children?  If 0, then this is a leaf node</summary> */
        int GetChildCount(NillableToken t);

        /** <summary>
         *  Who is the parent node of this node; if null, implies node is root.
         *  If your node type doesn't handle this, it's ok but the tree rewrites
         *  in tree parsers need this functionality.
         *  </summary>
         */
        NillableToken GetParent(NillableToken t);
        void SetParent(NillableToken t, NillableToken parent);

        /** <summary>
         *  What index is this node in the child list? Range: 0..n-1
         *  If your node type doesn't handle this, it's ok but the tree rewrites
         *  in tree parsers need this functionality.
         *  </summary>
         */
        int GetChildIndex(NillableToken t);
        void SetChildIndex(NillableToken t, int index);

        /** <summary>
         *  Replace from start to stop child index of parent with t, which might
         *  be a list.  Number of children may be different after this call.
         *  </summary>
         *
         *  <remarks>
         *  If parent is null, don't do anything; must be at root of overall tree.
         *  Can't replace whatever points to the parent externally.  Do nothing.
         *  </remarks>
         */
        void ReplaceChildren(NillableToken parent, int startChildIndex, int stopChildIndex, NillableToken t);

        #endregion
    }
}
