using System.Diagnostics;
using System.IO;
using System.Text.RegularExpressions;
using Lucene.Net.Analysis;

namespace SpellCenter.Core
{
    sealed internal class SpellWordTokenizer : CharTokenizer
    {
        private Regex _reWordBuildingChars;
        private long _offset = 0;
        private long _startOfCurrentToken=-1;

        public SpellWordTokenizer(TextReader in_Renamed) : base(in_Renamed)
        {
            // \\P{P} is everything that is not in Unicode group Punctuation
            CreateReWordBuildingCharsUsingString("\\P{P}");
        }
        public SpellWordTokenizer(TextReader reader, string characterSet) : base(reader)
        {
            CreateReWordBuildingCharsUsingString(characterSet);
        }

        public long Offset
        {
            get { return _offset; }
        }

        public long StartOfCurrentToken
        {
            get
            {
                Debug.Assert(_startOfCurrentToken > -1, "Can't call this until you've read at least one token");
                return _startOfCurrentToken;
            }
        }

        private void CreateReWordBuildingCharsUsingString(string str)
        {
            string reStr = "^[^" + str + "]*(?<Word>.*?)[^" + str + "]*$";
            _reWordBuildingChars = new Regex(reStr);
        }

        public override Token Next()
        {
            Token ret;
            do
            {
                // Use Lucene's highly tested Next() method
                Token token = base.Next();

                if (token == null)
                    // The interface requires this
                    return null;

                Match m = _reWordBuildingChars.Match(token.TermText());
                Debug.Assert(m.Success);

                string word = m.Groups["Word"].Value;
              //todo why are we returning a token that may have initial
              // spaces? why hasn't this been factored away?
                ret = new Token(word, token.StartOffset(),
                                      token.EndOffset(), token.Type());

                _startOfCurrentToken = token.StartOffset() // where the base analyzer thinks it starts
                        + m.Groups["Word"].Index;//skip spaces or whatever the regex decided to skip
                _offset = token.EndOffset();
            } while (ret.TermText() == string.Empty);

            return ret;
        }

        protected override bool IsTokenChar(char c)
        {
            if (char.IsWhiteSpace(c) || c == (char)0x200c || c == (char)0x200b) 
                return false;
            return true;
        }

    }
}