/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 * 
 * http://www.apache.org/licenses/LICENSE-2.0
 * 
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

using System;
using System.Collections;
using Common;

namespace Lucene.Net.Analysis
{
	
    /// <summary>A simple Hebrew Tokenizer (provides support for prefixes and suffixes.</summary>
    public class HebrewTokenizer : Tokenizer
    {
        HebrewStemmer _hebrewStemmer;

        public HebrewTokenizer(System.IO.TextReader input, HebrewStemmer hebrewStemmer)
            : base(input)
        {
            _hebrewStemmer = hebrewStemmer;
        }

        
        private int offset = 0, bufferIndex = 0, dataLen = 0;
        private const int MAX_WORD_LEN = 255;
        private const int IO_BUFFER_SIZE = 1024;
        private char[] ioBuffer = new char[IO_BUFFER_SIZE];
        //This stores all morphologies of a word . When not empty, the same word (with a
        //different morphology) is returned by the token stream. when empty, the next word in the field is returned.
        private ArrayList _morphArray = new ArrayList();
		
        /// <summary>Called on each token character to normalize it before it is added to the
        /// token.  The default implementation does nothing.  Subclasses may use this
        /// to, e.g., lowercase tokens. 
        /// </summary>
        protected internal virtual char Normalize(char c)
        {
            return c;
        }

        public override Token Next(Token token)
        {
            //If there are words left in the morph array, return the next one (we're still
            //on the same word in the tokenstream.)
            if (_morphArray.Count > 0)
            {
                //token.Clear();
                token.termBuffer = ((string)_morphArray[0]).ToCharArray();
                token.termLength = ((string)_morphArray[0]).Length;
                _morphArray.RemoveAt(0);
                token.SetPositionIncrement(0); 
                //token.endOffset = token.startOffset + token.termLength;
                return token;
            }

            token.Clear();
            int length = 0;
            int start = bufferIndex;
            char[] buffer = token.TermBuffer();
            while (true)
            {

                if (bufferIndex >= dataLen)
                {
                    offset += dataLen;
                    dataLen = input is Lucene.Net.Index.DocumentsWriter.ReusableStringReader ? ((Lucene.Net.Index.DocumentsWriter.ReusableStringReader) input).Read(ioBuffer) : input.Read((System.Char[]) ioBuffer, 0, ioBuffer.Length);
                    if (dataLen <= 0)
                    {
                        if (length > 0)
                            break;
                        else
                            return null;
                    }
                    bufferIndex = 0;
                }

                char c = ioBuffer[bufferIndex++];

                if (IsTokenChar(c) || IsDoubleQuoteInWord(bufferIndex))
                {
                    // if it's a token char

                    if (length == 0)
                        // start of token
                        start = offset + bufferIndex - 1;
                    else if (length == buffer.Length)
                        buffer = token.ResizeTermBuffer(1 + length);

                    buffer[length++] = Normalize(c); // buffer it, normalized

                    if (length == MAX_WORD_LEN)
                        // buffer overflow!
                        break;
                }
                else if (length > 0)
                    // at non-Letter w/ chars
                    break; // return 'em
            }

            token.termLength = length;
            token.startOffset = start;
            token.endOffset = start + length;
            //token = trimToken(token);
            //If this was a " or ' token
            _morphArray = _hebrewStemmer.GetStemmedWords(new string(token.termBuffer).Substring(0,length));
            //since all " and ' charracters were ignored by IsTokenChar, need to trim the edges for these chars.
            
            return token;
        }

        private bool IsDoubleQuoteInWord(int bufferIndex)
        {
            if (bufferIndex < ioBuffer.Length - 1 && bufferIndex > 0 &&  ioBuffer[bufferIndex].Equals('\"'))
                return (IsTokenChar(ioBuffer[bufferIndex + 1]) && IsTokenChar(ioBuffer[bufferIndex - 1]));
            return false;
        }

        //trim the token for " or ' at beginning/end of word.
        private Token trimToken(Token token)
        {
            string tkn = new string(token.termBuffer);
            tkn = tkn.Substring(0, token.termLength);
            tkn = tkn.Trim(new char[] { '\'', '\"' });
            token.termBuffer = tkn.ToCharArray();
            token.termLength = tkn.Length;
            return token;
        }

        public override void Reset(System.IO.TextReader input)
        {
            base.Reset(input);
            bufferIndex = 0;
            offset = 0;
            dataLen = 0;
        }
        protected internal bool IsTokenChar(char c)
        {
            //Added for Hebrew support! recognizes " as letter, so it won't cut token in the middle.
            //Will be trimmed later in the analyzer.
            return System.Char.IsLetter(c);
        }
    }
}