﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Compilex.BaseLibrary;
using Compilex.Automata;
using Infraestruturex.Automata;
using Infraestruturex.BaseLibrary;
using System.Text.RegularExpressions;
using System.IO;

namespace Compilex.Automata
{
    public class Lexer : ITokenProvider
    {
        private int CurrentLineWhileParsing { get; set; }
        public int CurrentLine
        {
            get
            {
                return TokensLines[currentTokenIndex].Second;
            }
        }

        private SimpleHashtable<string, WordToken> words = new SimpleHashtable<string,WordToken>();
        private SimpleList<TokenRecognizer> tokenRecognizers;

        public Pair<Token, int>[] TokensLines { get; private set; }
        int currentTokenIndex = 0;
        public string Input { get; set; }
        private string unparsedInput;

        public Lexer(LanguageConfiguration language, string source)
        {
            ReserveKeywords(language.reservedWords);
            TokenRecognizerBuilder recognizerBuilder = new TokenRecognizerBuilder(language);
            this.tokenRecognizers = recognizerBuilder.TokenRecognizers;
            this.Input = source;
            // se não houver a última quebra de linha, um comentário no fim do texto não
            // será reconhecido
            if (!this.Input.EndsWith("\n"))
            {
                this.Input += "\n";
            }
            int addedLinesCount = Preprocess();
            this.unparsedInput = this.Input;
            
            this.CurrentLineWhileParsing = 1 - addedLinesCount;

            this.Tokenize();
        }

        private int Preprocess()
        {
            int addedLinesCount = 0;
            string newInput = "";
            string[] lines = this.Input.Split(new string[] { "\r\n" }, StringSplitOptions.None);
            foreach (string line in lines)
            {
                if (Regex.Match(line, @"^[\s]*#includex[\s]+<[^>]+>").Success)
                {
                    string fileToBeIncluded = Regex.Match(line, "<[^>]+>").Captures[0].Value;
                    fileToBeIncluded = fileToBeIncluded.Substring(1, fileToBeIncluded.Length - 2);
                    string include;
                    try
                    {
                        include = File.ReadAllText(fileToBeIncluded);
                    }
                    catch (Exception e)
                    {
                        throw new UnrecognizedInputException("Nao foi possivel abrir o arquivo " + fileToBeIncluded + ": " + e.Message);
                    }
                    newInput = include + "\r\n" + newInput;
                    addedLinesCount += include.Split(new string[] { "\r\n" }, StringSplitOptions.None).Length - 1;
                }
                else
                {
                    newInput += line + "\r\n";
                }
            }
            this.Input = newInput;
            return addedLinesCount;
        }

        private void Tokenize()
        {
            SimpleList<Pair<Token, int>> tokens = new SimpleList<Pair<Token, int>>();
            while (unparsedInput.Length > 0)
            {
                Token token = GetNextTokenInternal();
                if (token != null)
                {
                    tokens.Add(new Pair<Token, int>(token, CurrentLineWhileParsing));
                }
            }
            TokensLines = tokens.ToArray();
        }

        private void ReserveKeywords(string[] reservedWords)
        {
            foreach (string keyword in reservedWords)
            {
                words.Add(keyword, new KeywordToken(keyword));
            }
        }

        public Token[] Analyze()
        {
            Token[] tokens = new Token[TokensLines.Length];
            for (int i = 0; i < tokens.Length; i++)
            {
                tokens[i] = TokensLines[i].First;
            }
            return tokens;
        }

        public void Reset()
        {
            currentTokenIndex = 0;
        }

        public bool HasMoreTokens()
        {
            return currentTokenIndex < TokensLines.Length;
        }

        public string PeekNextTokenClass()
        {
            return PeekNextToken().Name;
        }

        public string PeekNextTokenLexeme()
        {
            return PeekNextToken().Lexeme;
        }

        public string TryPeekNextNextTokenLexeme()
        {
            if (currentTokenIndex + 1 < TokensLines.Length)
                return TokensLines[currentTokenIndex + 1].First.Lexeme;
            else
                return null;
        }

        public void ConsumeNextToken()
        {
            currentTokenIndex++;
        }

        public Token PeekNextToken()
        {
            return TokensLines[currentTokenIndex].First;
        }

        public Token GetNextToken()
        {
            Token token = PeekNextToken();
            ConsumeNextToken();
            return token;
        }

        private Token GetNextTokenInternal()
        {
            if (unparsedInput.Length == 0)
                throw new InvalidOperationException("Todo o fonte já foi analisado");
            int charsRead;
            TokenRecognizer bestRecognizer = RecognizeLongestPrefix(out charsRead);
            if (charsRead != -1)
            {
                string read = unparsedInput.Substring(0, charsRead);
                if (read.Contains('\n'))
                    CurrentLineWhileParsing++;
                unparsedInput = unparsedInput.Substring(charsRead);
                return bestRecognizer.GetLastToken(words);
            }
            else
            {
                throw new UnrecognizedInputException(unparsedInput);
            }
        }

        private TokenRecognizer RecognizeLongestPrefix(out int charsRead)
        {
            int biggestPrefix = -1;
            TokenRecognizer bestRecognizer = null;
            for (int i = 0; i < tokenRecognizers.Count; i++)
            {
                int lenRecognized = tokenRecognizers[i].TryRecognizeLongestPrefix(unparsedInput);
                if (lenRecognized > biggestPrefix)
                {
                    biggestPrefix = lenRecognized;
                    bestRecognizer = tokenRecognizers[i];
                }
            }
            charsRead = biggestPrefix;
            return bestRecognizer;
        }
    }
}
