﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Lucene.Net.Analysis;
using System.Collections;

namespace FisheryPlatform.Search.Core.Analysis
{
    /// <summary>
    /// The Tokenizer used by MMAnalyzer.
    /// </summary>
    /// <author>gwd, 2006-5-26</author>
    public class MMTokenizer : Tokenizer
    {
        private const int IO_BUFFER_SIZE = 256;
        private ArrayList _tokens = null;

        private MMAnalyzer _analyzer;

        public MMTokenizer(MMAnalyzer analyzer, System.IO.TextReader reader)
            : base(reader)
        {
            _analyzer = analyzer;
        }

        /// <summary>
        /// Get the next word.
        /// </summary>
        /// <returns>Token</returns>
        /// <remarks>Token.Type()
        /// 1. Chinese sentence: CN, but do not segment words now!!!
        /// 2. English letter, including reserved character: EN_WORD
        /// 3. English digit: DIGIT
        /// 4. Punctuation; PUNC
        /// 5. Chinese word: CN_WORD
        /// </remarks>
        public override Token Next()
        {
            if (_tokens == null) Tokenize(false);

            if (_tokens.Count == 0) return null;

            // fetch out the first token, then remove it.
            Token token = (Token)_tokens[0];
            _tokens.RemoveAt(0);
            return token;
        }

        public static readonly string EN_WORD = "EN_WORD";
        public static readonly string DIGIT = "DIGIT";
        public static readonly string PUNC = "PUNC";
        public static readonly string ORIG_SPLITER = ":";

        /// <summary>
        /// 将tokenstream中的token合并输出.
        /// </summary>
        /// <returns></returns>
        public string GetFilteredTermString()
        {
            StringBuilder sbTerm = new StringBuilder();
            if (_tokens == null) Tokenize(true);
            foreach (Token t in _tokens)
                sbTerm.Append(t.TermText());
            return sbTerm.ToString();
        }

        /// <summary>
        /// Read the content from "input", and tokenize them int Tokens.
        /// </summary>
        /// <remarks>
        /// Step 1. Execute Char filter
        /// Step 2. Tokenize: EN_WORD, DIGIT, PUNC, CN
        /// Setp 3. Deal with punctuation: if any side of this punctuation is "CN", reserve this punctuation. Otherwise, remove it.
        /// </remarks>
        private void Tokenize(bool isQuery)
        {
            ArrayList tokens = new ArrayList();     // temp tokens
            _tokens = new ArrayList();      // create instance to store real Tokens.

            #region parse content to Tokens

            Filter reservedFilter = _analyzer.FilterFactory.GetFilter("Reserved");      // character to be treated as letter or a part of english word.
            Filter charFilter = _analyzer.FilterFactory.GetFilter("Char");      // character to be replacted/trimed unconditionally. Execute before punctuationFilter.
            Filter punctuationFilter = _analyzer.FilterFactory.GetFilter("Punctuation");      // do punctuation filter. Rule: only when ever side of the punctuation is CN-Token, Execute replacement. or trim it!

            System.Text.StringBuilder wordBuffer = new System.Text.StringBuilder();
            int offset = -1;      // token's offset

            int bufferIndex = 0;
            int dataLen = 0;
            string tokenType = "";
            StringBuilder sbOrigWord = new StringBuilder();
            char[] ioBuffer = new char[IO_BUFFER_SIZE];

            // parse the content to Tokens cyclely.
            while (true)     // while start
            {
                // circularly read the content
                if (bufferIndex >= dataLen)
                {
                    dataLen = this.input.Read(ioBuffer, 0, ioBuffer.Length);
                    bufferIndex = 0;
                }
                // reach the end of the content
                if (dataLen == 0)
                {
                    if (wordBuffer.Length > 0 || tokens.Count > 0) break;
                    else return;
                }

                char c = ioBuffer[bufferIndex++];
                char? orig_c = c;

                if (!isQuery)
                    c = charFilter.Execute(c.ToString())[0];       // replace special character

                if (c == orig_c)
                    orig_c = null;

                offset++;
                // get the unicode set of a single character(en/cn)
                MMSupport.UnicodeSetSupport ub = MMSupport.UnicodeSetSupport.BlockOf(c);
                //MMSupport.Debug(c+": "+ub.ToString());
                if ((ub.Equals(MMSupport.UnicodeSetSupport.BasicLatin)) || (ub.Equals(MMSupport.UnicodeSetSupport.HalfWidthAndFullWidthForms)))
                {
                    if (ub.Equals(MMSupport.UnicodeSetSupport.HalfWidthAndFullWidthForms))
                    {
                        // convert HALFWIDTH_AND_FULLWIDTH_FORMS to BASIC_LATIN
                        int iValue = (int)c;
                        iValue = iValue - 65248;
                        c = (char)iValue;
                        // ## replace again for FullWidth char
                        if (!isQuery)
                            c = charFilter.Execute(c.ToString())[0];       // 替换特殊字符
                    }

                    // deal with english letter
                    //if (Char.IsLetter(c) || ((c == '_') || (c == '+') || (c == '#')))
                    if (Char.IsLetter(c) || reservedFilter.Lookup(c.ToString()))
                    {
                        if ((tokenType.Equals("CN")) && (wordBuffer.Length > 0))
                        {
                            tokens.Add(new Token(wordBuffer.ToString(), offset - wordBuffer.Length, offset, tokenType));
                            wordBuffer.Remove(0, wordBuffer.Length);
                        }
                        tokenType = EN_WORD;      // register token type.
                        if (orig_c != null)
                        {
                            if (sbOrigWord.Length == 0)
                                sbOrigWord.Append(wordBuffer.ToString());
                            sbOrigWord.Append(Char.ToLower(orig_c.Value));
                        }
                        else if (sbOrigWord.Length != 0)
                            sbOrigWord.Append(Char.ToLower(c));
                        wordBuffer.Append(Char.ToLower(c));     // convert to lowercase.
                    }
                    // deal with digital letter
                    else if (Char.IsDigit(c))
                    {
                        if ((tokenType.Equals("CN")) && (wordBuffer.Length > 0))
                        {
                            tokens.Add(new Token(wordBuffer.ToString(), offset - wordBuffer.Length, offset, tokenType));
                            wordBuffer.Remove(0, wordBuffer.Length);
                        }
                        tokenType = DIGIT;      // register token type.
                        if (sbOrigWord.Length != 0)
                            sbOrigWord.Append(c);
                        wordBuffer.Append(c);     // convert to lowercase.
                    }
                    else      // deal with punctuation filter
                    {
                        if (wordBuffer.Length > 0)
                        {
                            string info = string.Format("{0}{1}", tokenType, sbOrigWord.Length == 0 ? string.Empty : MMTokenizer.ORIG_SPLITER + sbOrigWord.ToString());
                            tokens.Add(new Token(wordBuffer.ToString(), offset - wordBuffer.Length, offset - 1, info));
                            wordBuffer.Remove(0, wordBuffer.Length);
                            sbOrigWord.Remove(0, sbOrigWord.Length);
                        }

                        if (punctuationFilter.Lookup(c.ToString()))
                        {
                            // do not Execute functuation replacement here. 
                            // only when ever side of the punctuation is CN-Token, Execute replacement.
                            tokens.Add(new Token(c.ToString(), offset, offset, PUNC));
                        }
                        else
                        {
                            if (!isQuery)
                                continue;      // skip over other characters.
                            else
                                tokens.Add(new Token(c.ToString(), offset, offset, string.Empty));
                        }
                    }
                }
                else
                {
                    if (Char.IsLetter(c))     //Chinese Character
                    {
                        if ((!tokenType.Equals("CN")) && (wordBuffer.Length > 0))
                        {
                            string info = string.Format("{0}{1}", tokenType, sbOrigWord.Length == 0 ? string.Empty : MMTokenizer.ORIG_SPLITER + sbOrigWord.ToString());
                            tokens.Add(new Token(wordBuffer.ToString(), offset - wordBuffer.Length, offset - 1, info));
                            wordBuffer.Remove(0, wordBuffer.Length);
                            sbOrigWord.Remove(0, sbOrigWord.Length);
                        }
                        tokenType = "CN";
                        wordBuffer.Append(c);
                    }
                    else      // skip over other characters
                    {
                        if (wordBuffer.Length > 0)
                        {
                            string info = string.Format("{0}{1}", tokenType, sbOrigWord.Length == 0 ? string.Empty : MMTokenizer.ORIG_SPLITER + sbOrigWord.ToString());
                            tokens.Add(new Token(wordBuffer.ToString(), offset - wordBuffer.Length, offset - 1, info));
                            wordBuffer.Remove(0, wordBuffer.Length);
                            sbOrigWord.Remove(0, sbOrigWord.Length);
                        }

                        if (punctuationFilter.Lookup(c.ToString()))
                        {
                            // do not Execute functuation replacement here. 
                            // only when ever side of the punctuation is CN-Token, Execute replacement.
                            tokens.Add(new Token(c.ToString(), offset, offset, PUNC));
                        }
                        else
                        {
                            if (!isQuery)
                                continue;      // skip over other characters.
                            else
                                tokens.Add(new Token(c.ToString(), offset, offset, string.Empty));
                        }
                    }
                }

            }     // while end

            // deal with the content left in buffer.
            if (wordBuffer.Length > 0)
            {
                string info = string.Format("{0}{1}", tokenType, sbOrigWord.Length == 0 ? string.Empty : MMTokenizer.ORIG_SPLITER + sbOrigWord.ToString());
                tokens.Add(new Token(wordBuffer.ToString(), (offset + 1) - wordBuffer.Length, offset, info));
                wordBuffer.Remove(0, wordBuffer.Length);
                sbOrigWord.Remove(0, sbOrigWord.Length);
            }

            #endregion

            for (int i = 0; i < tokens.Count; ++i)
            {
                Token token = (Token)tokens[i];
                if (token.Type().Equals(PUNC) || (isQuery && token.TermText().Equals(" ")))
                {
                    // skip over the starting punctuation and ending punctuation.
                    if (i > 0 && i < tokens.Count - 1)
                    {
                        if (((Token)tokens[i - 1]).Type().Equals("CN") || ((Token)tokens[i + 1]).Type().Equals("CN"))
                        {
                            // Execute punctuation replacement.
                            string text;
                            if (token.Type().Equals(PUNC))
                                text = punctuationFilter.Execute(token.TermText());
                            else
                                text = ",";
                            _tokens.Add(new Token(text, token.StartOffset(), token.EndOffset(), token.Type()));
                        }
                        else if (isQuery)
                        {
                            _tokens.Add(new Token("。", token.StartOffset(), token.EndOffset(), token.Type()));
                        }
                    }
                }
                else
                {
                    // filter overlong Tokens
                    if (token.TermText().Length <= 100) _tokens.Add(token);
                }
            }

        }

    }
}
