﻿using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;

namespace KxParser.Lexer
{
    /// <summary>
    /// KxLexer class based on Poor Man's Lexer, that's why its called Pm (poor man's).
    /// This version is slightly modified.
    /// </summary>
    /// <seealso cref="http://stackoverflow.com/questions/673113/poor-mans-lexer-for-c"/>
    [Obsolete("Use Lexer class instead")]
    public sealed class PmLexer : IDisposable
    {
        public enum ELexerState
        {
            TokenUsedReservedWordError,
            LexicalAnalysisFail,
            TokenNameEmpty,
            OK
        }

        private ELexerState _state; // Indicates that lexer in some kind of error state

        private string _tokenContents;
        private int _lineNumber;
        private int _lineIndex;
        private int _totalIndex;

        private List<Token> _kxTokens=new List<Token>();
        private List<int> _errorIndexes = new List<int>();


        private readonly TextReader _reader;
        private readonly TokenDefinition[] _tokenDefinitions;
        private string _lineRemaining;
        private string[] _errorDelimiters;

        /// <summary>
        /// PmLexer constructor
        /// </summary>
        /// <param name="reader">Input string represented as TextReader</param>
        /// <param name="tokenDefinitions">definition of tokens</param>
        /// <param name="errorDelimiters">If lexer finds an error, it won't stop searhing for more tokens, it just skips some characters until it reaches errorDelimiter.
        /// For example, if we have an input: 17 + UNKNOWN_FUNCTION(param1) > 96, when lexer reaches UNKNOWN_FUNCTION and that function is not defined, it will skip
        /// all characters until it reaches errorDelimiter. If you define left paren as errorDelimiter, lexer will skip all characters to left paren (UNKNOWN_FUNCTION) and
        /// continue from left paren.</param>
        public PmLexer(TextReader reader, TokenDefinition[] tokenDefinitions, string[] errorDelimiters)
        {
            _state = ELexerState.OK;

            _reader = reader;
            _errorDelimiters = errorDelimiters;

            // Check for reserved words usage
            if (!tokenDefinitions.All(tokenDefinition => !KxParserGlobal.LexerReservedWords.Contains(tokenDefinition.Token)))
                _state = ELexerState.TokenUsedReservedWordError;

            // After that, we add reserved words
            _tokenDefinitions = new[] { new TokenDefinition(@"\$", "$") }.Concat(tokenDefinitions).ToArray();

            foreach (var tokenDefinition in _tokenDefinitions)
            {
                if (string.IsNullOrEmpty(tokenDefinition.Token.ToString()))
                    _state = ELexerState.TokenNameEmpty;
            }

            if (_state != ELexerState.OK) return;

            ReadNextInputLine();

            if (!DoLexicAnalysis())
                _state = ELexerState.LexicalAnalysisFail;
        }

        /// <summary>
        /// Gets the current state of PmLexer
        /// </summary>
        /// <returns>ELexerState</returns>
        public ELexerState GetState()
        {
            return _state;
        }

        /// <summary>
        /// Gets all readed tokens
        /// </summary>
        /// <returns></returns>
        public Token[] GetAllTokens()
        {
            return _kxTokens.ToArray();
        }

        ///<summary>
        /// Returns error tokens
        ///</summary>
        ///<returns></returns>
        public Token[] GetErrorTokens()
        {
            return (from et in _kxTokens where et.Name.Equals(KxParserGlobal.ErrorTokenName) select et).ToArray();
        }

        private void ReadNextInputLine()
        {
            do
            {
                _lineRemaining = _reader.ReadLine();

                if (_lineRemaining != null)
                {
                    ++_lineNumber;
                    _lineIndex = 0;
                }
            } while (_lineRemaining != null && _lineRemaining.Length == 0);
        }


        /// <summary>
        /// Gets only non-ignored tokens
        /// </summary>
        /// <returns></returns>
        public Token[] GetNonIgnoredTokens()
        {
			return (from t in _kxTokens where !t.IsIgnored select t).ToArray();
        }

        /// <summary>
        /// Analysis the input string
        /// </summary>
        /// <returns>True if analysis was successful, otherwise False</returns>
        private bool DoLexicAnalysis()
        {
			//bool matchedInLoop = false;
            var currentErrorTokenString = "";

            if (_state != ELexerState.OK)
                return false;

            while (_lineRemaining != null)
            {
                var isNextTokenMatched = false;

				for(int ii = 0; ii < _tokenDefinitions.Length; ii++)
                //foreach (var def in _tokenDefinitions)
                {
					TokenDefinition def = _tokenDefinitions[ii];

                    var matched = def.Matcher.Match(_lineRemaining);
                    if (matched > 0)
                    {
						isNextTokenMatched = true;

                        _tokenContents = _lineRemaining.Substring(0, matched);
                        _lineRemaining = _lineRemaining.Substring(matched);

						_kxTokens.Add(new Token
												{
													Name = def.Token.ToString(),
													Content = _tokenContents,
													IsIgnored = def.IsIgnored, //_ignoredTokens.Contains(def.Token),
													LineNumber = _lineNumber,
													StartIndex = _lineIndex,
													EndIndex = _lineIndex + matched - 1,
													TotalIndex = _totalIndex
												});					

						findMoreTokenMatches(ii + 1, _tokenContents);

						_lineIndex += matched;
						_totalIndex += matched;

                        if (_lineRemaining.Length == 0)
                            ReadNextInputLine();

                        break;
                    }
                }

                if (!isNextTokenMatched)
                {
                    do
                    {
                        _errorIndexes.Add(_totalIndex);
                        currentErrorTokenString += _lineRemaining[0];
                        _lineRemaining = _lineRemaining.Substring(1, _lineRemaining.Length - 1);

                        _lineIndex++;
                        _totalIndex++;

                        if (_lineRemaining.Length == 0)
                        {
                            AddErrorToken(ref currentErrorTokenString, false, true);
                            ReadNextInputLine();
                            break;
                        }

                    } while (_lineRemaining!=null && _lineRemaining.Length > 0 && !_errorDelimiters.Contains(_lineRemaining[0].ToString()));

                }
                else if (!string.IsNullOrEmpty(currentErrorTokenString))
                {
                    AddErrorToken(ref currentErrorTokenString, false, false);
                }
            }

            if (!string.IsNullOrEmpty(currentErrorTokenString))
            {
                _lineIndex++;
                _totalIndex++;
                AddErrorToken(ref currentErrorTokenString, true, false);
            }

            if (_errorIndexes.Count > 0)
                return false;

            var enderFound = _kxTokens.Any(mxToken => mxToken.Name.Equals(KxParserGlobal.Ender));

            if (!enderFound)
                _kxTokens.Add(new Token { Name = KxParserGlobal.Ender, Content = KxParserGlobal.Ender, IsIgnored = false, StartIndex = _lineIndex, EndIndex = _lineIndex, TotalIndex = _totalIndex });

            return true;
        }

        void AddErrorToken(ref string errorToken, bool isLast, bool lastInLine)
        {
            var newToken = new Token
                               {
                                   Name = KxParserGlobal.ErrorTokenName,
                                   Content = errorToken,
                                   IsIgnored = true,
                                   LineNumber = _lineNumber,
                                   StartIndex = _lineIndex - errorToken.Length - ((lastInLine)? 0: _kxTokens.Last().Content.Length),
                                   EndIndex = _lineIndex - ((lastInLine)? 0 : _kxTokens.Last().Content.Length) - 1,
                                   TotalIndex = _totalIndex - errorToken.Length - ((lastInLine)? 0 : _kxTokens.Last().Content.Length)
                               };
            if (!isLast && !lastInLine)
            {
                var totalTokens = _kxTokens.Count;
                _kxTokens.Insert(totalTokens - 1, newToken);
            }
            else
            {
                _kxTokens.Add(newToken);
            }
            errorToken = "";
        }

		/// <summary>
		/// Because system enables multiple tokens with same content
		/// </summary>
		/// <param name="startPosition">DemarkatorPosition in _tokenDefinition from where method should search for more matching tokens</param>
		/// <param name="content">Content to search</param>
		void findMoreTokenMatches(int startPosition, string content)
		{
			for(int i = startPosition; i < _tokenDefinitions.Length; i++)
			{
				TokenDefinition def = _tokenDefinitions[i];

                var matched = def.Matcher.Match(content);
				if (matched > 0)
				{
					_kxTokens.Last().AdditionalNames.Add(def.Token.ToString());
				}
            }
		}

        /// <summary>
        /// If there are errors in lexical analysis, this methos will return error positions in string
        /// </summary>
        /// <returns>Error positions in input string</returns>
        public int[] GetErrorIndexes()
        {
            return _errorIndexes.ToArray();
        }

        /// <summary>
        /// Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources.
        /// </summary>
        /// <filterpriority>2</filterpriority>
        public void Dispose()
        {
            _reader.Dispose();
        }
    }
}
